Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_rtl.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_rtl.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_rtl.cc (revision 351984)
@@ -0,0 +1,626 @@
+//===-- asan_rtl.cc -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Main file of the ASan run-time library.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_allocator.h"
+#include "asan_interceptors.h"
+#include "asan_interface_internal.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_stats.h"
+#include "asan_suppressions.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "lsan/lsan_common.h"
+#include "ubsan/ubsan_init.h"
+#include "ubsan/ubsan_platform.h"
+
+uptr __asan_shadow_memory_dynamic_address; // Global interface symbol.
+int __asan_option_detect_stack_use_after_return; // Global interface symbol.
+uptr *__asan_test_only_reported_buggy_pointer; // Used only for testing asan.
+
+namespace __asan {
+
+uptr AsanMappingProfile[kAsanMappingProfileSize];
+
+static void AsanDie() {
+ static atomic_uint32_t num_calls;
+ if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
+ // Don't die twice - run a busy loop.
+ while (1) { }
+ }
+ if (common_flags()->print_module_map >= 1) PrintModuleMap();
+ if (flags()->sleep_before_dying) {
+ Report("Sleeping for %d second(s)\n", flags()->sleep_before_dying);
+ SleepForSeconds(flags()->sleep_before_dying);
+ }
+ if (flags()->unmap_shadow_on_exit) {
+ if (kMidMemBeg) {
+ UnmapOrDie((void*)kLowShadowBeg, kMidMemBeg - kLowShadowBeg);
+ UnmapOrDie((void*)kMidMemEnd, kHighShadowEnd - kMidMemEnd);
+ } else {
+ if (kHighShadowEnd)
+ UnmapOrDie((void*)kLowShadowBeg, kHighShadowEnd - kLowShadowBeg);
+ }
+ }
+}
+
+static void AsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ Report("AddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
+ line, cond, (uptr)v1, (uptr)v2);
+
+ // Print a stack trace the first time we come here. Otherwise, we probably
+ // failed a CHECK during symbolization.
+ static atomic_uint32_t num_calls;
+ if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) == 0) {
+ PRINT_CURRENT_STACK_CHECK();
+ }
+
+ Die();
+}
+
+// -------------------------- Globals --------------------- {{{1
+int asan_inited;
+bool asan_init_is_running;
+
+#if !ASAN_FIXED_MAPPING
+uptr kHighMemEnd, kMidMemBeg, kMidMemEnd;
+#endif
+
+// -------------------------- Misc ---------------- {{{1
+void ShowStatsAndAbort() {
+ __asan_print_accumulated_stats();
+ Die();
+}
+
+// --------------- LowLevelAllocateCallbac ---------- {{{1
+static void OnLowLevelAllocate(uptr ptr, uptr size) {
+ PoisonShadow(ptr, size, kAsanInternalHeapMagic);
+}
+
+// -------------------------- Run-time entry ------------------- {{{1
+// exported functions
+#define ASAN_REPORT_ERROR(type, is_write, size) \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_ ## type ## size(uptr addr) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \
+} \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_exp_ ## type ## size(uptr addr, u32 exp) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \
+} \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_ ## type ## size ## _noabort(uptr addr) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \
+} \
+
+ASAN_REPORT_ERROR(load, false, 1)
+ASAN_REPORT_ERROR(load, false, 2)
+ASAN_REPORT_ERROR(load, false, 4)
+ASAN_REPORT_ERROR(load, false, 8)
+ASAN_REPORT_ERROR(load, false, 16)
+ASAN_REPORT_ERROR(store, true, 1)
+ASAN_REPORT_ERROR(store, true, 2)
+ASAN_REPORT_ERROR(store, true, 4)
+ASAN_REPORT_ERROR(store, true, 8)
+ASAN_REPORT_ERROR(store, true, 16)
+
+#define ASAN_REPORT_ERROR_N(type, is_write) \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_ ## type ## _n(uptr addr, uptr size) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, true); \
+} \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_exp_ ## type ## _n(uptr addr, uptr size, u32 exp) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, exp, true); \
+} \
+extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+void __asan_report_ ## type ## _n_noabort(uptr addr, uptr size) { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, 0, false); \
+} \
+
+ASAN_REPORT_ERROR_N(load, false)
+ASAN_REPORT_ERROR_N(store, true)
+
+#define ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp_arg, fatal) \
+ if (SANITIZER_MYRIAD2 && !AddrIsInMem(addr) && !AddrIsInShadow(addr)) \
+ return; \
+ uptr sp = MEM_TO_SHADOW(addr); \
+ uptr s = size <= SHADOW_GRANULARITY ? *reinterpret_cast<u8 *>(sp) \
+ : *reinterpret_cast<u16 *>(sp); \
+ if (UNLIKELY(s)) { \
+ if (UNLIKELY(size >= SHADOW_GRANULARITY || \
+ ((s8)((addr & (SHADOW_GRANULARITY - 1)) + size - 1)) >= \
+ (s8)s)) { \
+ if (__asan_test_only_reported_buggy_pointer) { \
+ *__asan_test_only_reported_buggy_pointer = addr; \
+ } else { \
+ GET_CALLER_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, addr, is_write, size, exp_arg, \
+ fatal); \
+ } \
+ } \
+ }
+
+#define ASAN_MEMORY_ACCESS_CALLBACK(type, is_write, size) \
+ extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+ void __asan_##type##size(uptr addr) { \
+ ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, true) \
+ } \
+ extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+ void __asan_exp_##type##size(uptr addr, u32 exp) { \
+ ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, exp, true) \
+ } \
+ extern "C" NOINLINE INTERFACE_ATTRIBUTE \
+ void __asan_##type##size ## _noabort(uptr addr) { \
+ ASAN_MEMORY_ACCESS_CALLBACK_BODY(type, is_write, size, 0, false) \
+ } \
+
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 1)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 2)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 4)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 8)
+ASAN_MEMORY_ACCESS_CALLBACK(load, false, 16)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 1)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 2)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 4)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 8)
+ASAN_MEMORY_ACCESS_CALLBACK(store, true, 16)
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_loadN(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, false, size, 0, true);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_exp_loadN(uptr addr, uptr size, u32 exp) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, false, size, exp, true);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_loadN_noabort(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, false, size, 0, false);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_storeN(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, true, size, 0, true);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_exp_storeN(uptr addr, uptr size, u32 exp) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, true, size, exp, true);
+ }
+}
+
+extern "C"
+NOINLINE INTERFACE_ATTRIBUTE
+void __asan_storeN_noabort(uptr addr, uptr size) {
+ if (__asan_region_is_poisoned(addr, size)) {
+ GET_CALLER_PC_BP_SP;
+ ReportGenericError(pc, bp, sp, addr, true, size, 0, false);
+ }
+}
+
+// Force the linker to keep the symbols for various ASan interface functions.
+// We want to keep those in the executable in order to let the instrumented
+// dynamic libraries access the symbol even if it is not used by the executable
+// itself. This should help if the build system is removing dead code at link
+// time.
+static NOINLINE void force_interface_symbols() {
+ volatile int fake_condition = 0; // prevent dead condition elimination.
+ // __asan_report_* functions are noreturn, so we need a switch to prevent
+ // the compiler from removing any of them.
+ // clang-format off
+ switch (fake_condition) {
+ case 1: __asan_report_load1(0); break;
+ case 2: __asan_report_load2(0); break;
+ case 3: __asan_report_load4(0); break;
+ case 4: __asan_report_load8(0); break;
+ case 5: __asan_report_load16(0); break;
+ case 6: __asan_report_load_n(0, 0); break;
+ case 7: __asan_report_store1(0); break;
+ case 8: __asan_report_store2(0); break;
+ case 9: __asan_report_store4(0); break;
+ case 10: __asan_report_store8(0); break;
+ case 11: __asan_report_store16(0); break;
+ case 12: __asan_report_store_n(0, 0); break;
+ case 13: __asan_report_exp_load1(0, 0); break;
+ case 14: __asan_report_exp_load2(0, 0); break;
+ case 15: __asan_report_exp_load4(0, 0); break;
+ case 16: __asan_report_exp_load8(0, 0); break;
+ case 17: __asan_report_exp_load16(0, 0); break;
+ case 18: __asan_report_exp_load_n(0, 0, 0); break;
+ case 19: __asan_report_exp_store1(0, 0); break;
+ case 20: __asan_report_exp_store2(0, 0); break;
+ case 21: __asan_report_exp_store4(0, 0); break;
+ case 22: __asan_report_exp_store8(0, 0); break;
+ case 23: __asan_report_exp_store16(0, 0); break;
+ case 24: __asan_report_exp_store_n(0, 0, 0); break;
+ case 25: __asan_register_globals(nullptr, 0); break;
+ case 26: __asan_unregister_globals(nullptr, 0); break;
+ case 27: __asan_set_death_callback(nullptr); break;
+ case 28: __asan_set_error_report_callback(nullptr); break;
+ case 29: __asan_handle_no_return(); break;
+ case 30: __asan_address_is_poisoned(nullptr); break;
+ case 31: __asan_poison_memory_region(nullptr, 0); break;
+ case 32: __asan_unpoison_memory_region(nullptr, 0); break;
+ case 34: __asan_before_dynamic_init(nullptr); break;
+ case 35: __asan_after_dynamic_init(); break;
+ case 36: __asan_poison_stack_memory(0, 0); break;
+ case 37: __asan_unpoison_stack_memory(0, 0); break;
+ case 38: __asan_region_is_poisoned(0, 0); break;
+ case 39: __asan_describe_address(0); break;
+ case 40: __asan_set_shadow_00(0, 0); break;
+ case 41: __asan_set_shadow_f1(0, 0); break;
+ case 42: __asan_set_shadow_f2(0, 0); break;
+ case 43: __asan_set_shadow_f3(0, 0); break;
+ case 44: __asan_set_shadow_f5(0, 0); break;
+ case 45: __asan_set_shadow_f8(0, 0); break;
+ }
+ // clang-format on
+}
+
+static void asan_atexit() {
+ Printf("AddressSanitizer exit stats:\n");
+ __asan_print_accumulated_stats();
+ // Print AsanMappingProfile.
+ for (uptr i = 0; i < kAsanMappingProfileSize; i++) {
+ if (AsanMappingProfile[i] == 0) continue;
+ Printf("asan_mapping.h:%zd -- %zd\n", i, AsanMappingProfile[i]);
+ }
+}
+
+static void InitializeHighMemEnd() {
+#if !SANITIZER_MYRIAD2
+#if !ASAN_FIXED_MAPPING
+ kHighMemEnd = GetMaxUserVirtualAddress();
+ // Increase kHighMemEnd to make sure it's properly
+ // aligned together with kHighMemBeg:
+ kHighMemEnd |= SHADOW_GRANULARITY * GetMmapGranularity() - 1;
+#endif // !ASAN_FIXED_MAPPING
+ CHECK_EQ((kHighMemBeg % GetMmapGranularity()), 0);
+#endif // !SANITIZER_MYRIAD2
+}
+
+void PrintAddressSpaceLayout() {
+ if (kHighMemBeg) {
+ Printf("|| `[%p, %p]` || HighMem ||\n",
+ (void*)kHighMemBeg, (void*)kHighMemEnd);
+ Printf("|| `[%p, %p]` || HighShadow ||\n",
+ (void*)kHighShadowBeg, (void*)kHighShadowEnd);
+ }
+ if (kMidMemBeg) {
+ Printf("|| `[%p, %p]` || ShadowGap3 ||\n",
+ (void*)kShadowGap3Beg, (void*)kShadowGap3End);
+ Printf("|| `[%p, %p]` || MidMem ||\n",
+ (void*)kMidMemBeg, (void*)kMidMemEnd);
+ Printf("|| `[%p, %p]` || ShadowGap2 ||\n",
+ (void*)kShadowGap2Beg, (void*)kShadowGap2End);
+ Printf("|| `[%p, %p]` || MidShadow ||\n",
+ (void*)kMidShadowBeg, (void*)kMidShadowEnd);
+ }
+ Printf("|| `[%p, %p]` || ShadowGap ||\n",
+ (void*)kShadowGapBeg, (void*)kShadowGapEnd);
+ if (kLowShadowBeg) {
+ Printf("|| `[%p, %p]` || LowShadow ||\n",
+ (void*)kLowShadowBeg, (void*)kLowShadowEnd);
+ Printf("|| `[%p, %p]` || LowMem ||\n",
+ (void*)kLowMemBeg, (void*)kLowMemEnd);
+ }
+ Printf("MemToShadow(shadow): %p %p",
+ (void*)MEM_TO_SHADOW(kLowShadowBeg),
+ (void*)MEM_TO_SHADOW(kLowShadowEnd));
+ if (kHighMemBeg) {
+ Printf(" %p %p",
+ (void*)MEM_TO_SHADOW(kHighShadowBeg),
+ (void*)MEM_TO_SHADOW(kHighShadowEnd));
+ }
+ if (kMidMemBeg) {
+ Printf(" %p %p",
+ (void*)MEM_TO_SHADOW(kMidShadowBeg),
+ (void*)MEM_TO_SHADOW(kMidShadowEnd));
+ }
+ Printf("\n");
+ Printf("redzone=%zu\n", (uptr)flags()->redzone);
+ Printf("max_redzone=%zu\n", (uptr)flags()->max_redzone);
+ Printf("quarantine_size_mb=%zuM\n", (uptr)flags()->quarantine_size_mb);
+ Printf("thread_local_quarantine_size_kb=%zuK\n",
+ (uptr)flags()->thread_local_quarantine_size_kb);
+ Printf("malloc_context_size=%zu\n",
+ (uptr)common_flags()->malloc_context_size);
+
+ Printf("SHADOW_SCALE: %d\n", (int)SHADOW_SCALE);
+ Printf("SHADOW_GRANULARITY: %d\n", (int)SHADOW_GRANULARITY);
+ Printf("SHADOW_OFFSET: 0x%zx\n", (uptr)SHADOW_OFFSET);
+ CHECK(SHADOW_SCALE >= 3 && SHADOW_SCALE <= 7);
+ if (kMidMemBeg)
+ CHECK(kMidShadowBeg > kLowShadowEnd &&
+ kMidMemBeg > kMidShadowEnd &&
+ kHighShadowBeg > kMidMemEnd);
+}
+
+#if defined(__thumb__) && defined(__linux__)
+#define START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
+#endif
+
+#ifndef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
+static bool UNUSED __local_asan_dyninit = [] {
+ MaybeStartBackgroudThread();
+ SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
+
+ return false;
+}();
+#endif
+
+static void AsanInitInternal() {
+ if (LIKELY(asan_inited)) return;
+ SanitizerToolName = "AddressSanitizer";
+ CHECK(!asan_init_is_running && "ASan init calls itself!");
+ asan_init_is_running = true;
+
+ CacheBinaryName();
+
+ // Initialize flags. This must be done early, because most of the
+ // initialization steps look at flags().
+ InitializeFlags();
+
+ // Stop performing init at this point if we are being loaded via
+ // dlopen() and the platform supports it.
+ if (SANITIZER_SUPPORTS_INIT_FOR_DLOPEN && UNLIKELY(HandleDlopenInit())) {
+ asan_init_is_running = false;
+ VReport(1, "AddressSanitizer init is being performed for dlopen().\n");
+ return;
+ }
+
+ AsanCheckIncompatibleRT();
+ AsanCheckDynamicRTPrereqs();
+ AvoidCVE_2016_2143();
+
+ SetCanPoisonMemory(flags()->poison_heap);
+ SetMallocContextSize(common_flags()->malloc_context_size);
+
+ InitializePlatformExceptionHandlers();
+
+ InitializeHighMemEnd();
+
+ // Make sure we are not statically linked.
+ AsanDoesNotSupportStaticLinkage();
+
+ // Install tool-specific callbacks in sanitizer_common.
+ AddDieCallback(AsanDie);
+ SetCheckFailedCallback(AsanCheckFailed);
+ SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
+
+ __sanitizer_set_report_path(common_flags()->log_path);
+
+ __asan_option_detect_stack_use_after_return =
+ flags()->detect_stack_use_after_return;
+
+ __sanitizer::InitializePlatformEarly();
+
+ // Re-exec ourselves if we need to set additional env or command line args.
+ MaybeReexec();
+
+ // Setup internal allocator callback.
+ SetLowLevelAllocateMinAlignment(SHADOW_GRANULARITY);
+ SetLowLevelAllocateCallback(OnLowLevelAllocate);
+
+ InitializeAsanInterceptors();
+ CheckASLR();
+
+ // Enable system log ("adb logcat") on Android.
+ // Doing this before interceptors are initialized crashes in:
+ // AsanInitInternal -> android_log_write -> __interceptor_strcmp
+ AndroidLogInit();
+
+ ReplaceSystemMalloc();
+
+ DisableCoreDumperIfNecessary();
+
+ InitializeShadowMemory();
+
+ AsanTSDInit(PlatformTSDDtor);
+ InstallDeadlySignalHandlers(AsanOnDeadlySignal);
+
+ AllocatorOptions allocator_options;
+ allocator_options.SetFrom(flags(), common_flags());
+ InitializeAllocator(allocator_options);
+
+#ifdef START_BACKGROUND_THREAD_IN_ASAN_INTERNAL
+ MaybeStartBackgroudThread();
+ SetSoftRssLimitExceededCallback(AsanSoftRssLimitExceededCallback);
+#endif
+
+ // On Linux AsanThread::ThreadStart() calls malloc() that's why asan_inited
+ // should be set to 1 prior to initializing the threads.
+ asan_inited = 1;
+ asan_init_is_running = false;
+
+ if (flags()->atexit)
+ Atexit(asan_atexit);
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ // Now that ASan runtime is (mostly) initialized, deactivate it if
+ // necessary, so that it can be re-activated when requested.
+ if (flags()->start_deactivated)
+ AsanDeactivate();
+
+ // interceptors
+ InitTlsSize();
+
+ // Create main thread.
+ AsanThread *main_thread = CreateMainThread();
+ CHECK_EQ(0, main_thread->tid());
+ force_interface_symbols(); // no-op.
+ SanitizerInitializeUnwinder();
+
+ if (CAN_SANITIZE_LEAKS) {
+ __lsan::InitCommonLsan();
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit) {
+ if (flags()->halt_on_error)
+ Atexit(__lsan::DoLeakCheck);
+ else
+ Atexit(__lsan::DoRecoverableLeakCheckVoid);
+ }
+ }
+
+#if CAN_SANITIZE_UB
+ __ubsan::InitAsPlugin();
+#endif
+
+ InitializeSuppressions();
+
+ if (CAN_SANITIZE_LEAKS) {
+ // LateInitialize() calls dlsym, which can allocate an error string buffer
+ // in the TLS. Let's ignore the allocation to avoid reporting a leak.
+ __lsan::ScopedInterceptorDisabler disabler;
+ Symbolizer::LateInitialize();
+ } else {
+ Symbolizer::LateInitialize();
+ }
+
+ VReport(1, "AddressSanitizer Init done\n");
+
+ if (flags()->sleep_after_init) {
+ Report("Sleeping for %d second(s)\n", flags()->sleep_after_init);
+ SleepForSeconds(flags()->sleep_after_init);
+ }
+}
+
+// Initialize as requested from some part of ASan runtime library (interceptors,
+// allocator, etc).
+void AsanInitFromRtl() {
+ AsanInitInternal();
+}
+
+#if ASAN_DYNAMIC
+// Initialize runtime in case it's LD_PRELOAD-ed into unsanitized executable
+// (and thus normal initializers from .preinit_array or modules haven't run).
+
+class AsanInitializer {
+public: // NOLINT
+ AsanInitializer() {
+ AsanInitFromRtl();
+ }
+};
+
+static AsanInitializer asan_initializer;
+#endif // ASAN_DYNAMIC
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+void NOINLINE __asan_handle_no_return() {
+ if (asan_init_is_running)
+ return;
+
+ int local_stack;
+ AsanThread *curr_thread = GetCurrentThread();
+ uptr PageSize = GetPageSizeCached();
+ uptr top, bottom;
+ if (curr_thread) {
+ top = curr_thread->stack_top();
+ bottom = ((uptr)&local_stack - PageSize) & ~(PageSize - 1);
+ } else if (SANITIZER_RTEMS) {
+ // Give up On RTEMS.
+ return;
+ } else {
+ CHECK(!SANITIZER_FUCHSIA);
+ // If we haven't seen this thread, try asking the OS for stack bounds.
+ uptr tls_addr, tls_size, stack_size;
+ GetThreadStackAndTls(/*main=*/false, &bottom, &stack_size, &tls_addr,
+ &tls_size);
+ top = bottom + stack_size;
+ }
+ static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
+ if (top - bottom > kMaxExpectedCleanupSize) {
+ static bool reported_warning = false;
+ if (reported_warning)
+ return;
+ reported_warning = true;
+ Report("WARNING: ASan is ignoring requested __asan_handle_no_return: "
+ "stack top: %p; bottom %p; size: %p (%zd)\n"
+ "False positive error reports may follow\n"
+ "For details see "
+ "https://github.com/google/sanitizers/issues/189\n",
+ top, bottom, top - bottom, top - bottom);
+ return;
+ }
+ PoisonShadow(bottom, top - bottom, 0);
+ if (curr_thread && curr_thread->has_fake_stack())
+ curr_thread->fake_stack()->HandleNoReturn();
+}
+
+extern "C" void *__asan_extra_spill_area() {
+ AsanThread *t = GetCurrentThread();
+ CHECK(t);
+ return t->extra_spill_area();
+}
+
+void __asan_handle_vfork(void *sp) {
+ AsanThread *t = GetCurrentThread();
+ CHECK(t);
+ uptr bottom = t->stack_bottom();
+ PoisonShadow(bottom, (uptr)sp - bottom, 0);
+}
+
+void NOINLINE __asan_set_death_callback(void (*callback)(void)) {
+ SetUserDieCallback(callback);
+}
+
+// Initialize as requested from instrumented application code.
+// We use this call as a trigger to wake up ASan from deactivated state.
+void __asan_init() {
+ AsanActivate();
+ AsanInitInternal();
+}
+
+void __asan_version_mismatch_check() {
+ // Do nothing.
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_posix.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_posix.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_posix.cc (revision 351984)
@@ -0,0 +1,117 @@
+//===-- asan_posix.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Posix-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_POSIX
+
+#include "asan_internal.h"
+#include "asan_interceptors.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <unistd.h>
+
+namespace __asan {
+
+void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ StartReportDeadlySignal();
+ SignalContext sig(siginfo, context);
+ ReportDeadlySignal(sig);
+}
+
+// ---------------------- TSD ---------------- {{{1
+
+#if SANITIZER_NETBSD && !ASAN_DYNAMIC
+// Thread Static Data cannot be used in early static ASan init on NetBSD.
+// Reuse the Asan TSD API for compatibility with existing code
+// with an alternative implementation.
+
+static void (*tsd_destructor)(void *tsd) = nullptr;
+
+struct tsd_key {
+ tsd_key() : key(nullptr) {}
+ ~tsd_key() {
+ CHECK(tsd_destructor);
+ if (key)
+ (*tsd_destructor)(key);
+ }
+ void *key;
+};
+
+static thread_local struct tsd_key key;
+
+void AsanTSDInit(void (*destructor)(void *tsd)) {
+ CHECK(!tsd_destructor);
+ tsd_destructor = destructor;
+}
+
+void *AsanTSDGet() {
+ CHECK(tsd_destructor);
+ return key.key;
+}
+
+void AsanTSDSet(void *tsd) {
+ CHECK(tsd_destructor);
+ CHECK(tsd);
+ CHECK(!key.key);
+ key.key = tsd;
+}
+
+void PlatformTSDDtor(void *tsd) {
+ CHECK(tsd_destructor);
+ CHECK_EQ(key.key, tsd);
+ key.key = nullptr;
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
+ AsanThread::TSDDtor(tsd);
+}
+#else
+static pthread_key_t tsd_key;
+static bool tsd_key_inited = false;
+void AsanTSDInit(void (*destructor)(void *tsd)) {
+ CHECK(!tsd_key_inited);
+ tsd_key_inited = true;
+ CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
+}
+
+void *AsanTSDGet() {
+ CHECK(tsd_key_inited);
+ return pthread_getspecific(tsd_key);
+}
+
+void AsanTSDSet(void *tsd) {
+ CHECK(tsd_key_inited);
+ pthread_setspecific(tsd_key, tsd);
+}
+
+void PlatformTSDDtor(void *tsd) {
+ AsanThreadContext *context = (AsanThreadContext*)tsd;
+ if (context->destructor_iterations > 1) {
+ context->destructor_iterations--;
+ CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
+ return;
+ }
+ AsanThread::TSDDtor(tsd);
+}
+#endif
+} // namespace __asan
+
+#endif // SANITIZER_POSIX
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation.cc (revision 351984)
@@ -0,0 +1,143 @@
+//===-- asan_activation.cc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan activation/deactivation logic.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_allocator.h"
+#include "asan_flags.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __asan {
+
+static struct AsanDeactivatedFlags {
+ AllocatorOptions allocator_options;
+ int malloc_context_size;
+ bool poison_heap;
+ bool coverage;
+ const char *coverage_dir;
+
+ void RegisterActivationFlags(FlagParser *parser, Flags *f, CommonFlags *cf) {
+#define ASAN_ACTIVATION_FLAG(Type, Name) \
+ RegisterFlag(parser, #Name, "", &f->Name);
+#define COMMON_ACTIVATION_FLAG(Type, Name) \
+ RegisterFlag(parser, #Name, "", &cf->Name);
+#include "asan_activation_flags.inc"
+#undef ASAN_ACTIVATION_FLAG
+#undef COMMON_ACTIVATION_FLAG
+
+ RegisterIncludeFlags(parser, cf);
+ }
+
+ void OverrideFromActivationFlags() {
+ Flags f;
+ CommonFlags cf;
+ FlagParser parser;
+ RegisterActivationFlags(&parser, &f, &cf);
+
+ cf.SetDefaults();
+ // Copy the current activation flags.
+ allocator_options.CopyTo(&f, &cf);
+ cf.malloc_context_size = malloc_context_size;
+ f.poison_heap = poison_heap;
+ cf.coverage = coverage;
+ cf.coverage_dir = coverage_dir;
+ cf.verbosity = Verbosity();
+ cf.help = false; // this is activation-specific help
+
+ // Check if activation flags need to be overriden.
+ if (const char *env = GetEnv("ASAN_ACTIVATION_OPTIONS")) {
+ parser.ParseString(env);
+ }
+
+ InitializeCommonFlags(&cf);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (cf.help) parser.PrintFlagDescriptions();
+
+ allocator_options.SetFrom(&f, &cf);
+ malloc_context_size = cf.malloc_context_size;
+ poison_heap = f.poison_heap;
+ coverage = cf.coverage;
+ coverage_dir = cf.coverage_dir;
+ }
+
+ void Print() {
+ Report(
+ "quarantine_size_mb %d, thread_local_quarantine_size_kb %d, "
+ "max_redzone %d, poison_heap %d, malloc_context_size %d, "
+ "alloc_dealloc_mismatch %d, allocator_may_return_null %d, coverage %d, "
+ "coverage_dir %s, allocator_release_to_os_interval_ms %d\n",
+ allocator_options.quarantine_size_mb,
+ allocator_options.thread_local_quarantine_size_kb,
+ allocator_options.max_redzone, poison_heap, malloc_context_size,
+ allocator_options.alloc_dealloc_mismatch,
+ allocator_options.may_return_null, coverage, coverage_dir,
+ allocator_options.release_to_os_interval_ms);
+ }
+} asan_deactivated_flags;
+
+static bool asan_is_deactivated;
+
+void AsanDeactivate() {
+ CHECK(!asan_is_deactivated);
+ VReport(1, "Deactivating ASan\n");
+
+ // Stash runtime state.
+ GetAllocatorOptions(&asan_deactivated_flags.allocator_options);
+ asan_deactivated_flags.malloc_context_size = GetMallocContextSize();
+ asan_deactivated_flags.poison_heap = CanPoisonMemory();
+ asan_deactivated_flags.coverage = common_flags()->coverage;
+ asan_deactivated_flags.coverage_dir = common_flags()->coverage_dir;
+
+ // Deactivate the runtime.
+ SetCanPoisonMemory(false);
+ SetMallocContextSize(1);
+
+ AllocatorOptions disabled = asan_deactivated_flags.allocator_options;
+ disabled.quarantine_size_mb = 0;
+ disabled.thread_local_quarantine_size_kb = 0;
+ // Redzone must be at least Max(16, granularity) bytes long.
+ disabled.min_redzone = Max(16, (int)SHADOW_GRANULARITY);
+ disabled.max_redzone = disabled.min_redzone;
+ disabled.alloc_dealloc_mismatch = false;
+ disabled.may_return_null = true;
+ ReInitializeAllocator(disabled);
+
+ asan_is_deactivated = true;
+}
+
+void AsanActivate() {
+ if (!asan_is_deactivated) return;
+ VReport(1, "Activating ASan\n");
+
+ UpdateProcessName();
+
+ asan_deactivated_flags.OverrideFromActivationFlags();
+
+ SetCanPoisonMemory(asan_deactivated_flags.poison_heap);
+ SetMallocContextSize(asan_deactivated_flags.malloc_context_size);
+ ReInitializeAllocator(asan_deactivated_flags.allocator_options);
+
+ asan_is_deactivated = false;
+ if (Verbosity()) {
+ Report("Activated with flags:\n");
+ asan_deactivated_flags.Print();
+ }
+}
+
+} // namespace __asan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation.h (revision 351984)
@@ -0,0 +1,22 @@
+//===-- asan_activation.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan activation/deactivation logic.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_ACTIVATION_H
+#define ASAN_ACTIVATION_H
+
+namespace __asan {
+void AsanDeactivate();
+void AsanActivate();
+} // namespace __asan
+
+#endif // ASAN_ACTIVATION_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation_flags.inc (revision 351984)
@@ -0,0 +1,36 @@
+//===-- asan_activation_flags.inc -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A subset of ASan (and common) runtime flags supported at activation time.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_ACTIVATION_FLAG
+# error "Define ASAN_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+#ifndef COMMON_ACTIVATION_FLAG
+# error "Define COMMON_ACTIVATION_FLAG prior to including this file!"
+#endif
+
+// ASAN_ACTIVATION_FLAG(Type, Name)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_ACTIVATION_FLAG(int, redzone)
+ASAN_ACTIVATION_FLAG(int, max_redzone)
+ASAN_ACTIVATION_FLAG(int, quarantine_size_mb)
+ASAN_ACTIVATION_FLAG(int, thread_local_quarantine_size_kb)
+ASAN_ACTIVATION_FLAG(bool, alloc_dealloc_mismatch)
+ASAN_ACTIVATION_FLAG(bool, poison_heap)
+
+COMMON_ACTIVATION_FLAG(bool, allocator_may_return_null)
+COMMON_ACTIVATION_FLAG(int, malloc_context_size)
+COMMON_ACTIVATION_FLAG(bool, coverage)
+COMMON_ACTIVATION_FLAG(const char *, coverage_dir)
+COMMON_ACTIVATION_FLAG(int, verbosity)
+COMMON_ACTIVATION_FLAG(bool, help)
+COMMON_ACTIVATION_FLAG(s32, allocator_release_to_os_interval_ms)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_activation_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_allocator.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_allocator.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_allocator.cc (revision 351984)
@@ -0,0 +1,1119 @@
+//===-- asan_allocator.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Implementation of ASan's memory allocator, 2-nd version.
+// This variant uses the allocator from sanitizer_common, i.e. the one shared
+// with ThreadSanitizer and MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "asan_allocator.h"
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+#include "lsan/lsan_common.h"
+
+namespace __asan {
+
+// Valid redzone sizes are 16, 32, 64, ... 2048, so we encode them in 3 bits.
+// We use adaptive redzones: for larger allocation larger redzones are used.
+static u32 RZLog2Size(u32 rz_log) {
+ CHECK_LT(rz_log, 8);
+ return 16 << rz_log;
+}
+
+static u32 RZSize2Log(u32 rz_size) {
+ CHECK_GE(rz_size, 16);
+ CHECK_LE(rz_size, 2048);
+ CHECK(IsPowerOfTwo(rz_size));
+ u32 res = Log2(rz_size) - 4;
+ CHECK_EQ(rz_size, RZLog2Size(res));
+ return res;
+}
+
+static AsanAllocator &get_allocator();
+
+// The memory chunk allocated from the underlying allocator looks like this:
+// L L L L L L H H U U U U U U R R
+// L -- left redzone words (0 or more bytes)
+// H -- ChunkHeader (16 bytes), which is also a part of the left redzone.
+// U -- user memory.
+// R -- right redzone (0 or more bytes)
+// ChunkBase consists of ChunkHeader and other bytes that overlap with user
+// memory.
+
+// If the left redzone is greater than the ChunkHeader size we store a magic
+// value in the first uptr word of the memory block and store the address of
+// ChunkBase in the next uptr.
+// M B L L L L L L L L L H H U U U U U U
+// | ^
+// ---------------------|
+// M -- magic value kAllocBegMagic
+// B -- address of ChunkHeader pointing to the first 'H'
+static const uptr kAllocBegMagic = 0xCC6E96B9;
+
+struct ChunkHeader {
+ // 1-st 8 bytes.
+ u32 chunk_state : 8; // Must be first.
+ u32 alloc_tid : 24;
+
+ u32 free_tid : 24;
+ u32 from_memalign : 1;
+ u32 alloc_type : 2;
+ u32 rz_log : 3;
+ u32 lsan_tag : 2;
+ // 2-nd 8 bytes
+ // This field is used for small sizes. For large sizes it is equal to
+ // SizeClassMap::kMaxSize and the actual size is stored in the
+ // SecondaryAllocator's metadata.
+ u32 user_requested_size : 29;
+ // align < 8 -> 0
+ // else -> log2(min(align, 512)) - 2
+ u32 user_requested_alignment_log : 3;
+ u32 alloc_context_id;
+};
+
+struct ChunkBase : ChunkHeader {
+ // Header2, intersects with user memory.
+ u32 free_context_id;
+};
+
+static const uptr kChunkHeaderSize = sizeof(ChunkHeader);
+static const uptr kChunkHeader2Size = sizeof(ChunkBase) - kChunkHeaderSize;
+COMPILER_CHECK(kChunkHeaderSize == 16);
+COMPILER_CHECK(kChunkHeader2Size <= 16);
+
+// Every chunk of memory allocated by this allocator can be in one of 3 states:
+// CHUNK_AVAILABLE: the chunk is in the free list and ready to be allocated.
+// CHUNK_ALLOCATED: the chunk is allocated and not yet freed.
+// CHUNK_QUARANTINE: the chunk was freed and put into quarantine zone.
+enum {
+ CHUNK_AVAILABLE = 0, // 0 is the default value even if we didn't set it.
+ CHUNK_ALLOCATED = 2,
+ CHUNK_QUARANTINE = 3
+};
+
+struct AsanChunk: ChunkBase {
+ uptr Beg() { return reinterpret_cast<uptr>(this) + kChunkHeaderSize; }
+ uptr UsedSize(bool locked_version = false) {
+ if (user_requested_size != SizeClassMap::kMaxSize)
+ return user_requested_size;
+ return *reinterpret_cast<uptr *>(
+ get_allocator().GetMetaData(AllocBeg(locked_version)));
+ }
+ void *AllocBeg(bool locked_version = false) {
+ if (from_memalign) {
+ if (locked_version)
+ return get_allocator().GetBlockBeginFastLocked(
+ reinterpret_cast<void *>(this));
+ return get_allocator().GetBlockBegin(reinterpret_cast<void *>(this));
+ }
+ return reinterpret_cast<void*>(Beg() - RZLog2Size(rz_log));
+ }
+ bool AddrIsInside(uptr addr, bool locked_version = false) {
+ return (addr >= Beg()) && (addr < Beg() + UsedSize(locked_version));
+ }
+};
+
+struct QuarantineCallback {
+ QuarantineCallback(AllocatorCache *cache, BufferedStackTrace *stack)
+ : cache_(cache),
+ stack_(stack) {
+ }
+
+ void Recycle(AsanChunk *m) {
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+ atomic_store((atomic_uint8_t*)m, CHUNK_AVAILABLE, memory_order_relaxed);
+ CHECK_NE(m->alloc_tid, kInvalidTid);
+ CHECK_NE(m->free_tid, kInvalidTid);
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapLeftRedzoneMagic);
+ void *p = reinterpret_cast<void *>(m->AllocBeg());
+ if (p != m) {
+ uptr *alloc_magic = reinterpret_cast<uptr *>(p);
+ CHECK_EQ(alloc_magic[0], kAllocBegMagic);
+ // Clear the magic value, as allocator internals may overwrite the
+ // contents of deallocated chunk, confusing GetAsanChunk lookup.
+ alloc_magic[0] = 0;
+ CHECK_EQ(alloc_magic[1], reinterpret_cast<uptr>(m));
+ }
+
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.real_frees++;
+ thread_stats.really_freed += m->UsedSize();
+
+ get_allocator().Deallocate(cache_, p);
+ }
+
+ void *Allocate(uptr size) {
+ void *res = get_allocator().Allocate(cache_, size, 1);
+ // TODO(alekseys): Consider making quarantine OOM-friendly.
+ if (UNLIKELY(!res))
+ ReportOutOfMemory(size, stack_);
+ return res;
+ }
+
+ void Deallocate(void *p) {
+ get_allocator().Deallocate(cache_, p);
+ }
+
+ private:
+ AllocatorCache* const cache_;
+ BufferedStackTrace* const stack_;
+};
+
+typedef Quarantine<QuarantineCallback, AsanChunk> AsanQuarantine;
+typedef AsanQuarantine::Cache QuarantineCache;
+
+void AsanMapUnmapCallback::OnMap(uptr p, uptr size) const {
+ PoisonShadow(p, size, kAsanHeapLeftRedzoneMagic);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mmaps++;
+ thread_stats.mmaped += size;
+}
+void AsanMapUnmapCallback::OnUnmap(uptr p, uptr size) const {
+ PoisonShadow(p, size, 0);
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ FlushUnneededASanShadowMemory(p, size);
+ // Statistics.
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.munmaps++;
+ thread_stats.munmaped += size;
+}
+
+// We can not use THREADLOCAL because it is not supported on some of the
+// platforms we care about (OSX 10.6, Android).
+// static THREADLOCAL AllocatorCache cache;
+AllocatorCache *GetAllocatorCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ return &ms->allocator_cache;
+}
+
+QuarantineCache *GetQuarantineCache(AsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(QuarantineCache), sizeof(ms->quarantine_cache));
+ return reinterpret_cast<QuarantineCache *>(ms->quarantine_cache);
+}
+
+void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) {
+ quarantine_size_mb = f->quarantine_size_mb;
+ thread_local_quarantine_size_kb = f->thread_local_quarantine_size_kb;
+ min_redzone = f->redzone;
+ max_redzone = f->max_redzone;
+ may_return_null = cf->allocator_may_return_null;
+ alloc_dealloc_mismatch = f->alloc_dealloc_mismatch;
+ release_to_os_interval_ms = cf->allocator_release_to_os_interval_ms;
+}
+
+void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) {
+ f->quarantine_size_mb = quarantine_size_mb;
+ f->thread_local_quarantine_size_kb = thread_local_quarantine_size_kb;
+ f->redzone = min_redzone;
+ f->max_redzone = max_redzone;
+ cf->allocator_may_return_null = may_return_null;
+ f->alloc_dealloc_mismatch = alloc_dealloc_mismatch;
+ cf->allocator_release_to_os_interval_ms = release_to_os_interval_ms;
+}
+
+struct Allocator {
+ static const uptr kMaxAllowedMallocSize =
+ FIRST_32_SECOND_64(3UL << 30, 1ULL << 40);
+
+ AsanAllocator allocator;
+ AsanQuarantine quarantine;
+ StaticSpinMutex fallback_mutex;
+ AllocatorCache fallback_allocator_cache;
+ QuarantineCache fallback_quarantine_cache;
+
+ atomic_uint8_t rss_limit_exceeded;
+
+ // ------------------- Options --------------------------
+ atomic_uint16_t min_redzone;
+ atomic_uint16_t max_redzone;
+ atomic_uint8_t alloc_dealloc_mismatch;
+
+ // ------------------- Initialization ------------------------
+ explicit Allocator(LinkerInitialized)
+ : quarantine(LINKER_INITIALIZED),
+ fallback_quarantine_cache(LINKER_INITIALIZED) {}
+
+ void CheckOptions(const AllocatorOptions &options) const {
+ CHECK_GE(options.min_redzone, 16);
+ CHECK_GE(options.max_redzone, options.min_redzone);
+ CHECK_LE(options.max_redzone, 2048);
+ CHECK(IsPowerOfTwo(options.min_redzone));
+ CHECK(IsPowerOfTwo(options.max_redzone));
+ }
+
+ void SharedInitCode(const AllocatorOptions &options) {
+ CheckOptions(options);
+ quarantine.Init((uptr)options.quarantine_size_mb << 20,
+ (uptr)options.thread_local_quarantine_size_kb << 10);
+ atomic_store(&alloc_dealloc_mismatch, options.alloc_dealloc_mismatch,
+ memory_order_release);
+ atomic_store(&min_redzone, options.min_redzone, memory_order_release);
+ atomic_store(&max_redzone, options.max_redzone, memory_order_release);
+ }
+
+ void InitLinkerInitialized(const AllocatorOptions &options) {
+ SetAllocatorMayReturnNull(options.may_return_null);
+ allocator.InitLinkerInitialized(options.release_to_os_interval_ms);
+ SharedInitCode(options);
+ }
+
+ bool RssLimitExceeded() {
+ return atomic_load(&rss_limit_exceeded, memory_order_relaxed);
+ }
+
+ void SetRssLimitExceeded(bool limit_exceeded) {
+ atomic_store(&rss_limit_exceeded, limit_exceeded, memory_order_relaxed);
+ }
+
+ void RePoisonChunk(uptr chunk) {
+ // This could be a user-facing chunk (with redzones), or some internal
+ // housekeeping chunk, like TransferBatch. Start by assuming the former.
+ AsanChunk *ac = GetAsanChunk((void *)chunk);
+ uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac);
+ uptr beg = ac->Beg();
+ uptr end = ac->Beg() + ac->UsedSize(true);
+ uptr chunk_end = chunk + allocated_size;
+ if (chunk < beg && beg < end && end <= chunk_end &&
+ ac->chunk_state == CHUNK_ALLOCATED) {
+ // Looks like a valid AsanChunk in use, poison redzones only.
+ PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic);
+ uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY);
+ FastPoisonShadowPartialRightRedzone(
+ end_aligned_down, end - end_aligned_down,
+ chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic);
+ } else {
+ // This is either not an AsanChunk or freed or quarantined AsanChunk.
+ // In either case, poison everything.
+ PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+ }
+
+ void ReInitialize(const AllocatorOptions &options) {
+ SetAllocatorMayReturnNull(options.may_return_null);
+ allocator.SetReleaseToOSIntervalMs(options.release_to_os_interval_ms);
+ SharedInitCode(options);
+
+ // Poison all existing allocation's redzones.
+ if (CanPoisonMemory()) {
+ allocator.ForceLock();
+ allocator.ForEachChunk(
+ [](uptr chunk, void *alloc) {
+ ((Allocator *)alloc)->RePoisonChunk(chunk);
+ },
+ this);
+ allocator.ForceUnlock();
+ }
+ }
+
+ void GetOptions(AllocatorOptions *options) const {
+ options->quarantine_size_mb = quarantine.GetSize() >> 20;
+ options->thread_local_quarantine_size_kb = quarantine.GetCacheSize() >> 10;
+ options->min_redzone = atomic_load(&min_redzone, memory_order_acquire);
+ options->max_redzone = atomic_load(&max_redzone, memory_order_acquire);
+ options->may_return_null = AllocatorMayReturnNull();
+ options->alloc_dealloc_mismatch =
+ atomic_load(&alloc_dealloc_mismatch, memory_order_acquire);
+ options->release_to_os_interval_ms = allocator.ReleaseToOSIntervalMs();
+ }
+
+ // -------------------- Helper methods. -------------------------
+ uptr ComputeRZLog(uptr user_requested_size) {
+ u32 rz_log =
+ user_requested_size <= 64 - 16 ? 0 :
+ user_requested_size <= 128 - 32 ? 1 :
+ user_requested_size <= 512 - 64 ? 2 :
+ user_requested_size <= 4096 - 128 ? 3 :
+ user_requested_size <= (1 << 14) - 256 ? 4 :
+ user_requested_size <= (1 << 15) - 512 ? 5 :
+ user_requested_size <= (1 << 16) - 1024 ? 6 : 7;
+ u32 min_rz = atomic_load(&min_redzone, memory_order_acquire);
+ u32 max_rz = atomic_load(&max_redzone, memory_order_acquire);
+ return Min(Max(rz_log, RZSize2Log(min_rz)), RZSize2Log(max_rz));
+ }
+
+ static uptr ComputeUserRequestedAlignmentLog(uptr user_requested_alignment) {
+ if (user_requested_alignment < 8)
+ return 0;
+ if (user_requested_alignment > 512)
+ user_requested_alignment = 512;
+ return Log2(user_requested_alignment) - 2;
+ }
+
+ static uptr ComputeUserAlignment(uptr user_requested_alignment_log) {
+ if (user_requested_alignment_log == 0)
+ return 0;
+ return 1LL << (user_requested_alignment_log + 2);
+ }
+
+ // We have an address between two chunks, and we want to report just one.
+ AsanChunk *ChooseChunk(uptr addr, AsanChunk *left_chunk,
+ AsanChunk *right_chunk) {
+ // Prefer an allocated chunk over freed chunk and freed chunk
+ // over available chunk.
+ if (left_chunk->chunk_state != right_chunk->chunk_state) {
+ if (left_chunk->chunk_state == CHUNK_ALLOCATED)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_ALLOCATED)
+ return right_chunk;
+ if (left_chunk->chunk_state == CHUNK_QUARANTINE)
+ return left_chunk;
+ if (right_chunk->chunk_state == CHUNK_QUARANTINE)
+ return right_chunk;
+ }
+ // Same chunk_state: choose based on offset.
+ sptr l_offset = 0, r_offset = 0;
+ CHECK(AsanChunkView(left_chunk).AddrIsAtRight(addr, 1, &l_offset));
+ CHECK(AsanChunkView(right_chunk).AddrIsAtLeft(addr, 1, &r_offset));
+ if (l_offset < r_offset)
+ return left_chunk;
+ return right_chunk;
+ }
+
+ // -------------------- Allocation/Deallocation routines ---------------
+ void *Allocate(uptr size, uptr alignment, BufferedStackTrace *stack,
+ AllocType alloc_type, bool can_fill) {
+ if (UNLIKELY(!asan_inited))
+ AsanInitFromRtl();
+ if (RssLimitExceeded()) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportRssLimitExceeded(stack);
+ }
+ Flags &fl = *flags();
+ CHECK(stack);
+ const uptr min_alignment = SHADOW_GRANULARITY;
+ const uptr user_requested_alignment_log =
+ ComputeUserRequestedAlignmentLog(alignment);
+ if (alignment < min_alignment)
+ alignment = min_alignment;
+ if (size == 0) {
+ // We'd be happy to avoid allocating memory for zero-size requests, but
+ // some programs/tests depend on this behavior and assume that malloc
+ // would not return NULL even for zero-size allocations. Moreover, it
+ // looks like operator new should never return NULL, and results of
+ // consecutive "new" calls must be different even if the allocated size
+ // is zero.
+ size = 1;
+ }
+ CHECK(IsPowerOfTwo(alignment));
+ uptr rz_log = ComputeRZLog(size);
+ uptr rz_size = RZLog2Size(rz_log);
+ uptr rounded_size = RoundUpTo(Max(size, kChunkHeader2Size), alignment);
+ uptr needed_size = rounded_size + rz_size;
+ if (alignment > min_alignment)
+ needed_size += alignment;
+ bool using_primary_allocator = true;
+ // If we are allocating from the secondary allocator, there will be no
+ // automatic right redzone, so add the right redzone manually.
+ if (!PrimaryAllocator::CanAllocate(needed_size, alignment)) {
+ needed_size += rz_size;
+ using_primary_allocator = false;
+ }
+ CHECK(IsAligned(needed_size, min_alignment));
+ if (size > kMaxAllowedMallocSize || needed_size > kMaxAllowedMallocSize) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: AddressSanitizer failed to allocate 0x%zx bytes\n",
+ (void*)size);
+ return nullptr;
+ }
+ ReportAllocationSizeTooBig(size, needed_size, kMaxAllowedMallocSize,
+ stack);
+ }
+
+ AsanThread *t = GetCurrentThread();
+ void *allocated;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated = allocator.Allocate(cache, needed_size, 8);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated = allocator.Allocate(cache, needed_size, 8);
+ }
+ if (UNLIKELY(!allocated)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, stack);
+ }
+
+ if (*(u8 *)MEM_TO_SHADOW((uptr)allocated) == 0 && CanPoisonMemory()) {
+ // Heap poisoning is enabled, but the allocator provides an unpoisoned
+ // chunk. This is possible if CanPoisonMemory() was false for some
+ // time, for example, due to flags()->start_disabled.
+ // Anyway, poison the block before using it for anything else.
+ uptr allocated_size = allocator.GetActuallyAllocatedSize(allocated);
+ PoisonShadow((uptr)allocated, allocated_size, kAsanHeapLeftRedzoneMagic);
+ }
+
+ uptr alloc_beg = reinterpret_cast<uptr>(allocated);
+ uptr alloc_end = alloc_beg + needed_size;
+ uptr beg_plus_redzone = alloc_beg + rz_size;
+ uptr user_beg = beg_plus_redzone;
+ if (!IsAligned(user_beg, alignment))
+ user_beg = RoundUpTo(user_beg, alignment);
+ uptr user_end = user_beg + size;
+ CHECK_LE(user_end, alloc_end);
+ uptr chunk_beg = user_beg - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+ m->alloc_type = alloc_type;
+ m->rz_log = rz_log;
+ u32 alloc_tid = t ? t->tid() : 0;
+ m->alloc_tid = alloc_tid;
+ CHECK_EQ(alloc_tid, m->alloc_tid); // Does alloc_tid fit into the bitfield?
+ m->free_tid = kInvalidTid;
+ m->from_memalign = user_beg != beg_plus_redzone;
+ if (alloc_beg != chunk_beg) {
+ CHECK_LE(alloc_beg+ 2 * sizeof(uptr), chunk_beg);
+ reinterpret_cast<uptr *>(alloc_beg)[0] = kAllocBegMagic;
+ reinterpret_cast<uptr *>(alloc_beg)[1] = chunk_beg;
+ }
+ if (using_primary_allocator) {
+ CHECK(size);
+ m->user_requested_size = size;
+ CHECK(allocator.FromPrimary(allocated));
+ } else {
+ CHECK(!allocator.FromPrimary(allocated));
+ m->user_requested_size = SizeClassMap::kMaxSize;
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(allocated));
+ meta[0] = size;
+ meta[1] = chunk_beg;
+ }
+ m->user_requested_alignment_log = user_requested_alignment_log;
+
+ m->alloc_context_id = StackDepotPut(*stack);
+
+ uptr size_rounded_down_to_granularity =
+ RoundDownTo(size, SHADOW_GRANULARITY);
+ // Unpoison the bulk of the memory region.
+ if (size_rounded_down_to_granularity)
+ PoisonShadow(user_beg, size_rounded_down_to_granularity, 0);
+ // Deal with the end of the region if size is not aligned to granularity.
+ if (size != size_rounded_down_to_granularity && CanPoisonMemory()) {
+ u8 *shadow =
+ (u8 *)MemToShadow(user_beg + size_rounded_down_to_granularity);
+ *shadow = fl.poison_partial ? (size & (SHADOW_GRANULARITY - 1)) : 0;
+ }
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.mallocs++;
+ thread_stats.malloced += size;
+ thread_stats.malloced_redzones += needed_size - size;
+ if (needed_size > SizeClassMap::kMaxSize)
+ thread_stats.malloc_large++;
+ else
+ thread_stats.malloced_by_size[SizeClassMap::ClassID(needed_size)]++;
+
+ void *res = reinterpret_cast<void *>(user_beg);
+ if (can_fill && fl.max_malloc_fill_size) {
+ uptr fill_size = Min(size, (uptr)fl.max_malloc_fill_size);
+ REAL(memset)(res, fl.malloc_fill_byte, fill_size);
+ }
+#if CAN_SANITIZE_LEAKS
+ m->lsan_tag = __lsan::DisabledInThisThread() ? __lsan::kIgnored
+ : __lsan::kDirectlyLeaked;
+#endif
+ // Must be the last mutation of metadata in this function.
+ atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release);
+ ASAN_MALLOC_HOOK(res, size);
+ return res;
+ }
+
+ // Set quarantine flag if chunk is allocated, issue ASan error report on
+ // available and quarantined chunks. Return true on success, false otherwise.
+ bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr,
+ BufferedStackTrace *stack) {
+ u8 old_chunk_state = CHUNK_ALLOCATED;
+ // Flip the chunk_state atomically to avoid race on double-free.
+ if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state,
+ CHUNK_QUARANTINE,
+ memory_order_acquire)) {
+ ReportInvalidFree(ptr, old_chunk_state, stack);
+ // It's not safe to push a chunk in quarantine on invalid free.
+ return false;
+ }
+ CHECK_EQ(CHUNK_ALLOCATED, old_chunk_state);
+ return true;
+ }
+
+ // Expects the chunk to already be marked as quarantined by using
+ // AtomicallySetQuarantineFlagIfAllocated.
+ void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) {
+ CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE);
+ CHECK_GE(m->alloc_tid, 0);
+ if (SANITIZER_WORDSIZE == 64) // On 32-bits this resides in user area.
+ CHECK_EQ(m->free_tid, kInvalidTid);
+ AsanThread *t = GetCurrentThread();
+ m->free_tid = t ? t->tid() : 0;
+ m->free_context_id = StackDepotPut(*stack);
+
+ Flags &fl = *flags();
+ if (fl.max_free_fill_size > 0) {
+ // We have to skip the chunk header, it contains free_context_id.
+ uptr scribble_start = (uptr)m + kChunkHeaderSize + kChunkHeader2Size;
+ if (m->UsedSize() >= kChunkHeader2Size) { // Skip Header2 in user area.
+ uptr size_to_fill = m->UsedSize() - kChunkHeader2Size;
+ size_to_fill = Min(size_to_fill, (uptr)fl.max_free_fill_size);
+ REAL(memset)((void *)scribble_start, fl.free_fill_byte, size_to_fill);
+ }
+ }
+
+ // Poison the region.
+ PoisonShadow(m->Beg(),
+ RoundUpTo(m->UsedSize(), SHADOW_GRANULARITY),
+ kAsanHeapFreeMagic);
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.frees++;
+ thread_stats.freed += m->UsedSize();
+
+ // Push into quarantine.
+ if (t) {
+ AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ quarantine.Put(GetQuarantineCache(ms), QuarantineCallback(ac, stack), m,
+ m->UsedSize());
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *ac = &fallback_allocator_cache;
+ quarantine.Put(&fallback_quarantine_cache, QuarantineCallback(ac, stack),
+ m, m->UsedSize());
+ }
+ }
+
+ void Deallocate(void *ptr, uptr delete_size, uptr delete_alignment,
+ BufferedStackTrace *stack, AllocType alloc_type) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ if (p == 0) return;
+
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+
+ // On Windows, uninstrumented DLLs may allocate memory before ASan hooks
+ // malloc. Don't report an invalid free in this case.
+ if (SANITIZER_WINDOWS &&
+ !get_allocator().PointerIsMine(ptr)) {
+ if (!IsSystemHeapAddress(p))
+ ReportFreeNotMalloced(p, stack);
+ return;
+ }
+
+ ASAN_FREE_HOOK(ptr);
+
+ // Must mark the chunk as quarantined before any changes to its metadata.
+ // Do not quarantine given chunk if we failed to set CHUNK_QUARANTINE flag.
+ if (!AtomicallySetQuarantineFlagIfAllocated(m, ptr, stack)) return;
+
+ if (m->alloc_type != alloc_type) {
+ if (atomic_load(&alloc_dealloc_mismatch, memory_order_acquire)) {
+ ReportAllocTypeMismatch((uptr)ptr, stack, (AllocType)m->alloc_type,
+ (AllocType)alloc_type);
+ }
+ } else {
+ if (flags()->new_delete_type_mismatch &&
+ (alloc_type == FROM_NEW || alloc_type == FROM_NEW_BR) &&
+ ((delete_size && delete_size != m->UsedSize()) ||
+ ComputeUserRequestedAlignmentLog(delete_alignment) !=
+ m->user_requested_alignment_log)) {
+ ReportNewDeleteTypeMismatch(p, delete_size, delete_alignment, stack);
+ }
+ }
+
+ QuarantineChunk(m, ptr, stack);
+ }
+
+ void *Reallocate(void *old_ptr, uptr new_size, BufferedStackTrace *stack) {
+ CHECK(old_ptr && new_size);
+ uptr p = reinterpret_cast<uptr>(old_ptr);
+ uptr chunk_beg = p - kChunkHeaderSize;
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(chunk_beg);
+
+ AsanStats &thread_stats = GetCurrentThreadStats();
+ thread_stats.reallocs++;
+ thread_stats.realloced += new_size;
+
+ void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true);
+ if (new_ptr) {
+ u8 chunk_state = m->chunk_state;
+ if (chunk_state != CHUNK_ALLOCATED)
+ ReportInvalidFree(old_ptr, chunk_state, stack);
+ CHECK_NE(REAL(memcpy), nullptr);
+ uptr memcpy_size = Min(new_size, m->UsedSize());
+ // If realloc() races with free(), we may start copying freed memory.
+ // However, we will report racy double-free later anyway.
+ REAL(memcpy)(new_ptr, old_ptr, memcpy_size);
+ Deallocate(old_ptr, 0, 0, stack, FROM_MALLOC);
+ }
+ return new_ptr;
+ }
+
+ void *Calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, stack);
+ }
+ void *ptr = Allocate(nmemb * size, 8, stack, FROM_MALLOC, false);
+ // If the memory comes from the secondary allocator no need to clear it
+ // as it comes directly from mmap.
+ if (ptr && allocator.FromPrimary(ptr))
+ REAL(memset)(ptr, 0, nmemb * size);
+ return ptr;
+ }
+
+ void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) {
+ if (chunk_state == CHUNK_QUARANTINE)
+ ReportDoubleFree((uptr)ptr, stack);
+ else
+ ReportFreeNotMalloced((uptr)ptr, stack);
+ }
+
+ void CommitBack(AsanThreadLocalMallocStorage *ms, BufferedStackTrace *stack) {
+ AllocatorCache *ac = GetAllocatorCache(ms);
+ quarantine.Drain(GetQuarantineCache(ms), QuarantineCallback(ac, stack));
+ allocator.SwallowCache(ac);
+ }
+
+ // -------------------------- Chunk lookup ----------------------
+
+ // Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+ AsanChunk *GetAsanChunk(void *alloc_beg) {
+ if (!alloc_beg) return nullptr;
+ if (!allocator.FromPrimary(alloc_beg)) {
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
+ AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
+ return m;
+ }
+ uptr *alloc_magic = reinterpret_cast<uptr *>(alloc_beg);
+ if (alloc_magic[0] == kAllocBegMagic)
+ return reinterpret_cast<AsanChunk *>(alloc_magic[1]);
+ return reinterpret_cast<AsanChunk *>(alloc_beg);
+ }
+
+ AsanChunk *GetAsanChunkByAddr(uptr p) {
+ void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+ }
+
+ // Allocator must be locked when this function is called.
+ AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
+ void *alloc_beg =
+ allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+ }
+
+ uptr AllocationSize(uptr p) {
+ AsanChunk *m = GetAsanChunkByAddr(p);
+ if (!m) return 0;
+ if (m->chunk_state != CHUNK_ALLOCATED) return 0;
+ if (m->Beg() != p) return 0;
+ return m->UsedSize();
+ }
+
+ AsanChunkView FindHeapChunkByAddress(uptr addr) {
+ AsanChunk *m1 = GetAsanChunkByAddr(addr);
+ if (!m1) return AsanChunkView(m1);
+ sptr offset = 0;
+ if (AsanChunkView(m1).AddrIsAtLeft(addr, 1, &offset)) {
+ // The address is in the chunk's left redzone, so maybe it is actually
+ // a right buffer overflow from the other chunk to the left.
+ // Search a bit to the left to see if there is another chunk.
+ AsanChunk *m2 = nullptr;
+ for (uptr l = 1; l < GetPageSizeCached(); l++) {
+ m2 = GetAsanChunkByAddr(addr - l);
+ if (m2 == m1) continue; // Still the same chunk.
+ break;
+ }
+ if (m2 && AsanChunkView(m2).AddrIsAtRight(addr, 1, &offset))
+ m1 = ChooseChunk(addr, m2, m1);
+ }
+ return AsanChunkView(m1);
+ }
+
+ void Purge(BufferedStackTrace *stack) {
+ AsanThread *t = GetCurrentThread();
+ if (t) {
+ AsanThreadLocalMallocStorage *ms = &t->malloc_storage();
+ quarantine.DrainAndRecycle(GetQuarantineCache(ms),
+ QuarantineCallback(GetAllocatorCache(ms),
+ stack));
+ }
+ {
+ SpinMutexLock l(&fallback_mutex);
+ quarantine.DrainAndRecycle(&fallback_quarantine_cache,
+ QuarantineCallback(&fallback_allocator_cache,
+ stack));
+ }
+
+ allocator.ForceReleaseToOS();
+ }
+
+ void PrintStats() {
+ allocator.PrintStats();
+ quarantine.PrintStats();
+ }
+
+ void ForceLock() {
+ allocator.ForceLock();
+ fallback_mutex.Lock();
+ }
+
+ void ForceUnlock() {
+ fallback_mutex.Unlock();
+ allocator.ForceUnlock();
+ }
+};
+
+static Allocator instance(LINKER_INITIALIZED);
+
+static AsanAllocator &get_allocator() {
+ return instance.allocator;
+}
+
+bool AsanChunkView::IsValid() const {
+ return chunk_ && chunk_->chunk_state != CHUNK_AVAILABLE;
+}
+bool AsanChunkView::IsAllocated() const {
+ return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED;
+}
+bool AsanChunkView::IsQuarantined() const {
+ return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE;
+}
+uptr AsanChunkView::Beg() const { return chunk_->Beg(); }
+uptr AsanChunkView::End() const { return Beg() + UsedSize(); }
+uptr AsanChunkView::UsedSize() const { return chunk_->UsedSize(); }
+u32 AsanChunkView::UserRequestedAlignment() const {
+ return Allocator::ComputeUserAlignment(chunk_->user_requested_alignment_log);
+}
+uptr AsanChunkView::AllocTid() const { return chunk_->alloc_tid; }
+uptr AsanChunkView::FreeTid() const { return chunk_->free_tid; }
+AllocType AsanChunkView::GetAllocType() const {
+ return (AllocType)chunk_->alloc_type;
+}
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; }
+u32 AsanChunkView::GetFreeStackId() const { return chunk_->free_context_id; }
+
+StackTrace AsanChunkView::GetAllocStack() const {
+ return GetStackTraceFromId(GetAllocStackId());
+}
+
+StackTrace AsanChunkView::GetFreeStack() const {
+ return GetStackTraceFromId(GetFreeStackId());
+}
+
+void InitializeAllocator(const AllocatorOptions &options) {
+ instance.InitLinkerInitialized(options);
+}
+
+void ReInitializeAllocator(const AllocatorOptions &options) {
+ instance.ReInitialize(options);
+}
+
+void GetAllocatorOptions(AllocatorOptions *options) {
+ instance.GetOptions(options);
+}
+
+AsanChunkView FindHeapChunkByAddress(uptr addr) {
+ return instance.FindHeapChunkByAddress(addr);
+}
+AsanChunkView FindHeapChunkByAllocBeg(uptr addr) {
+ return AsanChunkView(instance.GetAsanChunk(reinterpret_cast<void*>(addr)));
+}
+
+void AsanThreadLocalMallocStorage::CommitBack() {
+ GET_STACK_TRACE_MALLOC;
+ instance.CommitBack(this, &stack);
+}
+
+void PrintInternalAllocatorStats() {
+ instance.PrintStats();
+}
+
+void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type) {
+ instance.Deallocate(ptr, 0, 0, stack, alloc_type);
+}
+
+void asan_delete(void *ptr, uptr size, uptr alignment,
+ BufferedStackTrace *stack, AllocType alloc_type) {
+ instance.Deallocate(ptr, size, alignment, stack, alloc_type);
+}
+
+void *asan_malloc(uptr size, BufferedStackTrace *stack) {
+ return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
+}
+
+void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack) {
+ return SetErrnoOnNull(instance.Calloc(nmemb, size, stack));
+}
+
+void *asan_reallocarray(void *p, uptr nmemb, uptr size,
+ BufferedStackTrace *stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportReallocArrayOverflow(nmemb, size, stack);
+ }
+ return asan_realloc(p, nmemb * size, stack);
+}
+
+void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack) {
+ if (!p)
+ return SetErrnoOnNull(instance.Allocate(size, 8, stack, FROM_MALLOC, true));
+ if (size == 0) {
+ if (flags()->allocator_frees_and_returns_null_on_realloc_zero) {
+ instance.Deallocate(p, 0, 0, stack, FROM_MALLOC);
+ return nullptr;
+ }
+ // Allocate a size of 1 if we shouldn't free() on Realloc to 0
+ size = 1;
+ }
+ return SetErrnoOnNull(instance.Reallocate(p, size, stack));
+}
+
+void *asan_valloc(uptr size, BufferedStackTrace *stack) {
+ return SetErrnoOnNull(
+ instance.Allocate(size, GetPageSizeCached(), stack, FROM_MALLOC, true));
+}
+
+void *asan_pvalloc(uptr size, BufferedStackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(
+ instance.Allocate(size, PageSize, stack, FROM_MALLOC, true));
+}
+
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, stack);
+ }
+ return SetErrnoOnNull(
+ instance.Allocate(size, alignment, stack, alloc_type, true));
+}
+
+void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, stack);
+ }
+ return SetErrnoOnNull(
+ instance.Allocate(size, alignment, stack, FROM_MALLOC, true));
+}
+
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ BufferedStackTrace *stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, stack);
+ }
+ void *ptr = instance.Allocate(size, alignment, stack, FROM_MALLOC, true);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by Allocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp) {
+ if (!ptr) return 0;
+ uptr usable_size = instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+ if (flags()->check_malloc_usable_size && (usable_size == 0)) {
+ GET_STACK_TRACE_FATAL(pc, bp);
+ ReportMallocUsableSizeNotOwned((uptr)ptr, &stack);
+ }
+ return usable_size;
+}
+
+uptr asan_mz_size(const void *ptr) {
+ return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
+}
+
+void asan_mz_force_lock() {
+ instance.ForceLock();
+}
+
+void asan_mz_force_unlock() {
+ instance.ForceUnlock();
+}
+
+void AsanSoftRssLimitExceededCallback(bool limit_exceeded) {
+ instance.SetRssLimitExceeded(limit_exceeded);
+}
+
+} // namespace __asan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+void LockAllocator() {
+ __asan::get_allocator().ForceLock();
+}
+
+void UnlockAllocator() {
+ __asan::get_allocator().ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&__asan::get_allocator();
+ *end = *begin + sizeof(__asan::get_allocator());
+}
+
+uptr PointsIntoChunk(void* p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr);
+ if (!m) return 0;
+ uptr chunk = m->Beg();
+ if (m->chunk_state != __asan::CHUNK_ALLOCATED)
+ return 0;
+ if (m->AddrIsInside(addr, /*locked_version=*/true))
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true),
+ addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(chunk);
+ CHECK(m);
+ return m->Beg();
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
+}
+
+bool LsanMetadata::allocated() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->chunk_state == __asan::CHUNK_ALLOCATED;
+}
+
+ChunkTag LsanMetadata::tag() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return static_cast<ChunkTag>(m->lsan_tag);
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ m->lsan_tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->UsedSize(/*locked_version=*/true);
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_);
+ return m->alloc_context_id;
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ __asan::get_allocator().ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr);
+ if (!m) return kIgnoreObjectInvalid;
+ if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) {
+ if (m->lsan_tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->lsan_tag = __lsan::kIgnored;
+ return kIgnoreObjectSuccess;
+ } else {
+ return kIgnoreObjectInvalid;
+ }
+}
+} // namespace __lsan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+// ASan allocator doesn't reserve extra bytes, so normally we would
+// just return "size". We don't want to expose our redzone sizes, etc here.
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+int __sanitizer_get_ownership(const void *p) {
+ uptr ptr = reinterpret_cast<uptr>(p);
+ return instance.AllocationSize(ptr) > 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+ if (!p) return 0;
+ uptr ptr = reinterpret_cast<uptr>(p);
+ uptr allocated_size = instance.AllocationSize(ptr);
+ // Die if p is not malloced or if it is already freed.
+ if (allocated_size == 0) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportSanitizerGetAllocatedSizeNotOwned(ptr, &stack);
+ }
+ return allocated_size;
+}
+
+void __sanitizer_purge_allocator() {
+ GET_STACK_TRACE_MALLOC;
+ instance.Purge(&stack);
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
+ void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *ptr) {
+ (void)ptr;
+}
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_allocator.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_allocator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_allocator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_allocator.h (revision 351984)
@@ -0,0 +1,230 @@
+//===-- asan_allocator.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_allocator.cc.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_ALLOCATOR_H
+#define ASAN_ALLOCATOR_H
+
+#include "asan_flags.h"
+#include "asan_internal.h"
+#include "asan_interceptors.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_list.h"
+
+namespace __asan {
+
+enum AllocType {
+ FROM_MALLOC = 1, // Memory block came from malloc, calloc, realloc, etc.
+ FROM_NEW = 2, // Memory block came from operator new.
+ FROM_NEW_BR = 3 // Memory block came from operator new [ ]
+};
+
+struct AsanChunk;
+
+struct AllocatorOptions {
+ u32 quarantine_size_mb;
+ u32 thread_local_quarantine_size_kb;
+ u16 min_redzone;
+ u16 max_redzone;
+ u8 may_return_null;
+ u8 alloc_dealloc_mismatch;
+ s32 release_to_os_interval_ms;
+
+ void SetFrom(const Flags *f, const CommonFlags *cf);
+ void CopyTo(Flags *f, CommonFlags *cf);
+};
+
+void InitializeAllocator(const AllocatorOptions &options);
+void ReInitializeAllocator(const AllocatorOptions &options);
+void GetAllocatorOptions(AllocatorOptions *options);
+
+class AsanChunkView {
+ public:
+ explicit AsanChunkView(AsanChunk *chunk) : chunk_(chunk) {}
+ bool IsValid() const; // Checks if AsanChunkView points to a valid
+ // allocated or quarantined chunk.
+ bool IsAllocated() const; // Checks if the memory is currently allocated.
+ bool IsQuarantined() const; // Checks if the memory is currently quarantined.
+ uptr Beg() const; // First byte of user memory.
+ uptr End() const; // Last byte of user memory.
+ uptr UsedSize() const; // Size requested by the user.
+ u32 UserRequestedAlignment() const; // Originally requested alignment.
+ uptr AllocTid() const;
+ uptr FreeTid() const;
+ bool Eq(const AsanChunkView &c) const { return chunk_ == c.chunk_; }
+ u32 GetAllocStackId() const;
+ u32 GetFreeStackId() const;
+ StackTrace GetAllocStack() const;
+ StackTrace GetFreeStack() const;
+ AllocType GetAllocType() const;
+ bool AddrIsInside(uptr addr, uptr access_size, sptr *offset) const {
+ if (addr >= Beg() && (addr + access_size) <= End()) {
+ *offset = addr - Beg();
+ return true;
+ }
+ return false;
+ }
+ bool AddrIsAtLeft(uptr addr, uptr access_size, sptr *offset) const {
+ (void)access_size;
+ if (addr < Beg()) {
+ *offset = Beg() - addr;
+ return true;
+ }
+ return false;
+ }
+ bool AddrIsAtRight(uptr addr, uptr access_size, sptr *offset) const {
+ if (addr + access_size > End()) {
+ *offset = addr - End();
+ return true;
+ }
+ return false;
+ }
+
+ private:
+ AsanChunk *const chunk_;
+};
+
+AsanChunkView FindHeapChunkByAddress(uptr address);
+AsanChunkView FindHeapChunkByAllocBeg(uptr address);
+
+// List of AsanChunks with total size.
+class AsanChunkFifoList: public IntrusiveList<AsanChunk> {
+ public:
+ explicit AsanChunkFifoList(LinkerInitialized) { }
+ AsanChunkFifoList() { clear(); }
+ void Push(AsanChunk *n);
+ void PushList(AsanChunkFifoList *q);
+ AsanChunk *Pop();
+ uptr size() { return size_; }
+ void clear() {
+ IntrusiveList<AsanChunk>::clear();
+ size_ = 0;
+ }
+ private:
+ uptr size_;
+};
+
+struct AsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const;
+ void OnUnmap(uptr p, uptr size) const;
+};
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+# if SANITIZER_FUCHSIA
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif defined(__powerpc64__)
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif defined(__aarch64__) && SANITIZER_ANDROID
+// Android needs to support 39, 42 and 48 bit VMA.
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x2000000000ULL; // 128G.
+typedef VeryCompactSizeClassMap SizeClassMap;
+# elif defined(__aarch64__)
+// AArch64/SANITIZER_CAN_USE_ALLOCATOR64 is only for 42-bit VMA
+// so no need to different values for different VMA.
+const uptr kAllocatorSpace = 0x10000000000ULL;
+const uptr kAllocatorSize = 0x10000000000ULL; // 3T.
+typedef DefaultSizeClassMap SizeClassMap;
+#elif defined(__sparc__)
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+typedef DefaultSizeClassMap SizeClassMap;
+# elif SANITIZER_WINDOWS
+const uptr kAllocatorSpace = ~(uptr)0;
+const uptr kAllocatorSize = 0x8000000000ULL; // 500G
+typedef DefaultSizeClassMap SizeClassMap;
+# else
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+typedef DefaultSizeClassMap SizeClassMap;
+# endif
+template <typename AddressSpaceViewTy>
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef __asan::SizeClassMap SizeClassMap;
+ typedef AsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = AddressSpaceViewTy;
+};
+
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#else // Fallback to SizeClassAllocator32.
+typedef CompactSizeClassMap SizeClassMap;
+template <typename AddressSpaceViewTy>
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 16;
+ typedef __asan::SizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = AddressSpaceViewTy;
+ typedef AsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView> >;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
+static const uptr kNumberOfSizeClasses = SizeClassMap::kNumClasses;
+
+template <typename AddressSpaceView>
+using AsanAllocatorASVT =
+ CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;
+using AsanAllocator = AsanAllocatorASVT<LocalAddressSpaceView>;
+using AllocatorCache = AsanAllocator::AllocatorCache;
+
+struct AsanThreadLocalMallocStorage {
+ uptr quarantine_cache[16];
+ AllocatorCache allocator_cache;
+ void CommitBack();
+ private:
+ // These objects are allocated via mmap() and are zero-initialized.
+ AsanThreadLocalMallocStorage() {}
+};
+
+void *asan_memalign(uptr alignment, uptr size, BufferedStackTrace *stack,
+ AllocType alloc_type);
+void asan_free(void *ptr, BufferedStackTrace *stack, AllocType alloc_type);
+void asan_delete(void *ptr, uptr size, uptr alignment,
+ BufferedStackTrace *stack, AllocType alloc_type);
+
+void *asan_malloc(uptr size, BufferedStackTrace *stack);
+void *asan_calloc(uptr nmemb, uptr size, BufferedStackTrace *stack);
+void *asan_realloc(void *p, uptr size, BufferedStackTrace *stack);
+void *asan_reallocarray(void *p, uptr nmemb, uptr size,
+ BufferedStackTrace *stack);
+void *asan_valloc(uptr size, BufferedStackTrace *stack);
+void *asan_pvalloc(uptr size, BufferedStackTrace *stack);
+
+void *asan_aligned_alloc(uptr alignment, uptr size, BufferedStackTrace *stack);
+int asan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ BufferedStackTrace *stack);
+uptr asan_malloc_usable_size(const void *ptr, uptr pc, uptr bp);
+
+uptr asan_mz_size(const void *ptr);
+void asan_mz_force_lock();
+void asan_mz_force_unlock();
+
+void PrintInternalAllocatorStats();
+void AsanSoftRssLimitExceededCallback(bool exceeded);
+
+} // namespace __asan
+#endif // ASAN_ALLOCATOR_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_debugging.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_debugging.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_debugging.cc (revision 351984)
@@ -0,0 +1,146 @@
+//===-- asan_debugging.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file contains various functions that are generally useful to call when
+// using a debugger (LLDB, GDB).
+//===----------------------------------------------------------------------===//
+
+#include "asan_allocator.h"
+#include "asan_descriptions.h"
+#include "asan_flags.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_thread.h"
+
+namespace {
+using namespace __asan;
+
+static void FindInfoForStackVar(uptr addr, const char *frame_descr, uptr offset,
+ char *name, uptr name_size,
+ uptr &region_address, uptr &region_size) {
+ InternalMmapVector<StackVarDescr> vars;
+ vars.reserve(16);
+ if (!ParseFrameDescription(frame_descr, &vars)) {
+ return;
+ }
+
+ for (uptr i = 0; i < vars.size(); i++) {
+ if (offset <= vars[i].beg + vars[i].size) {
+ // We use name_len + 1 because strlcpy will guarantee a \0 at the end, so
+ // if we're limiting the copy due to name_len, we add 1 to ensure we copy
+ // the whole name and then terminate with '\0'.
+ internal_strlcpy(name, vars[i].name_pos,
+ Min(name_size, vars[i].name_len + 1));
+ region_address = addr - (offset - vars[i].beg);
+ region_size = vars[i].size;
+ return;
+ }
+ }
+}
+
+uptr AsanGetStack(uptr addr, uptr *trace, u32 size, u32 *thread_id,
+ bool alloc_stack) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+ if (!chunk.IsValid()) return 0;
+
+ StackTrace stack(nullptr, 0);
+ if (alloc_stack) {
+ if (chunk.AllocTid() == kInvalidTid) return 0;
+ stack = chunk.GetAllocStack();
+ if (thread_id) *thread_id = chunk.AllocTid();
+ } else {
+ if (chunk.FreeTid() == kInvalidTid) return 0;
+ stack = chunk.GetFreeStack();
+ if (thread_id) *thread_id = chunk.FreeTid();
+ }
+
+ if (trace && size) {
+ size = Min(size, Min(stack.size, kStackTraceMax));
+ for (uptr i = 0; i < size; i++)
+ trace[i] = StackTrace::GetPreviousInstructionPc(stack.trace[i]);
+
+ return size;
+ }
+
+ return 0;
+}
+
+} // namespace
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__asan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ AddressDescription descr(addr);
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (auto shadow = descr.AsShadow()) {
+ // region_{address,size} are already 0
+ switch (shadow->kind) {
+ case kShadowKindLow:
+ region_kind = "low shadow";
+ break;
+ case kShadowKindGap:
+ region_kind = "shadow gap";
+ break;
+ case kShadowKindHigh:
+ region_kind = "high shadow";
+ break;
+ }
+ } else if (auto heap = descr.AsHeap()) {
+ region_kind = "heap";
+ region_address = heap->chunk_access.chunk_begin;
+ region_size = heap->chunk_access.chunk_size;
+ } else if (auto stack = descr.AsStack()) {
+ region_kind = "stack";
+ if (!stack->frame_descr) {
+ // region_{address,size} are already 0
+ } else {
+ FindInfoForStackVar(addr, stack->frame_descr, stack->offset, name,
+ name_size, region_address, region_size);
+ }
+ } else if (auto global = descr.AsGlobal()) {
+ region_kind = "global";
+ auto &g = global->globals[0];
+ internal_strlcpy(name, g.name, name_size);
+ region_address = g.beg;
+ region_size = g.size;
+ } else {
+ // region_{address,size} are already 0
+ region_kind = "heap-invalid";
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
+ return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ true);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size, u32 *thread_id) {
+ return AsanGetStack(addr, trace, size, thread_id, /* alloc_stack */ false);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset) {
+ if (shadow_scale)
+ *shadow_scale = SHADOW_SCALE;
+ if (shadow_offset)
+ *shadow_offset = SHADOW_OFFSET;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_debugging.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_descriptions.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_descriptions.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_descriptions.cc (revision 351984)
@@ -0,0 +1,501 @@
+//===-- asan_descriptions.cc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan functions for getting information about an address and/or printing it.
+//===----------------------------------------------------------------------===//
+
+#include "asan_descriptions.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+AsanThreadIdAndName::AsanThreadIdAndName(AsanThreadContext *t) {
+ Init(t->tid, t->name);
+}
+
+AsanThreadIdAndName::AsanThreadIdAndName(u32 tid) {
+ if (tid == kInvalidTid) {
+ Init(tid, "");
+ } else {
+ asanThreadRegistry().CheckLocked();
+ AsanThreadContext *t = GetThreadContextByTidLocked(tid);
+ Init(tid, t->name);
+ }
+}
+
+void AsanThreadIdAndName::Init(u32 tid, const char *tname) {
+ int len = internal_snprintf(name, sizeof(name), "T%d", tid);
+ CHECK(((unsigned int)len) < sizeof(name));
+ if (tname[0] != '\0')
+ internal_snprintf(&name[len], sizeof(name) - len, " (%s)", tname);
+}
+
+void DescribeThread(AsanThreadContext *context) {
+ CHECK(context);
+ asanThreadRegistry().CheckLocked();
+ // No need to announce the main thread.
+ if (context->tid == 0 || context->announced) {
+ return;
+ }
+ context->announced = true;
+ InternalScopedString str(1024);
+ str.append("Thread %s", AsanThreadIdAndName(context).c_str());
+ if (context->parent_tid == kInvalidTid) {
+ str.append(" created by unknown thread\n");
+ Printf("%s", str.data());
+ return;
+ }
+ str.append(" created by %s here:\n",
+ AsanThreadIdAndName(context->parent_tid).c_str());
+ Printf("%s", str.data());
+ StackDepotGet(context->stack_id).Print();
+ // Recursively described parent thread if needed.
+ if (flags()->print_full_thread_history) {
+ AsanThreadContext *parent_context =
+ GetThreadContextByTidLocked(context->parent_tid);
+ DescribeThread(parent_context);
+ }
+}
+
+// Shadow descriptions
+static bool GetShadowKind(uptr addr, ShadowKind *shadow_kind) {
+ CHECK(!AddrIsInMem(addr));
+ if (AddrIsInShadowGap(addr)) {
+ *shadow_kind = kShadowKindGap;
+ } else if (AddrIsInHighShadow(addr)) {
+ *shadow_kind = kShadowKindHigh;
+ } else if (AddrIsInLowShadow(addr)) {
+ *shadow_kind = kShadowKindLow;
+ } else {
+ CHECK(0 && "Address is not in memory and not in shadow?");
+ return false;
+ }
+ return true;
+}
+
+bool DescribeAddressIfShadow(uptr addr) {
+ ShadowAddressDescription descr;
+ if (!GetShadowAddressInformation(addr, &descr)) return false;
+ descr.Print();
+ return true;
+}
+
+bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr) {
+ if (AddrIsInMem(addr)) return false;
+ ShadowKind shadow_kind;
+ if (!GetShadowKind(addr, &shadow_kind)) return false;
+ if (shadow_kind != kShadowKindGap) descr->shadow_byte = *(u8 *)addr;
+ descr->addr = addr;
+ descr->kind = shadow_kind;
+ return true;
+}
+
+// Heap descriptions
+static void GetAccessToHeapChunkInformation(ChunkAccess *descr,
+ AsanChunkView chunk, uptr addr,
+ uptr access_size) {
+ descr->bad_addr = addr;
+ if (chunk.AddrIsAtLeft(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeLeft;
+ } else if (chunk.AddrIsAtRight(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeRight;
+ if (descr->offset < 0) {
+ descr->bad_addr -= descr->offset;
+ descr->offset = 0;
+ }
+ } else if (chunk.AddrIsInside(addr, access_size, &descr->offset)) {
+ descr->access_type = kAccessTypeInside;
+ } else {
+ descr->access_type = kAccessTypeUnknown;
+ }
+ descr->chunk_begin = chunk.Beg();
+ descr->chunk_size = chunk.UsedSize();
+ descr->user_requested_alignment = chunk.UserRequestedAlignment();
+ descr->alloc_type = chunk.GetAllocType();
+}
+
+static void PrintHeapChunkAccess(uptr addr, const ChunkAccess &descr) {
+ Decorator d;
+ InternalScopedString str(4096);
+ str.append("%s", d.Location());
+ switch (descr.access_type) {
+ case kAccessTypeLeft:
+ str.append("%p is located %zd bytes to the left of",
+ (void *)descr.bad_addr, descr.offset);
+ break;
+ case kAccessTypeRight:
+ str.append("%p is located %zd bytes to the right of",
+ (void *)descr.bad_addr, descr.offset);
+ break;
+ case kAccessTypeInside:
+ str.append("%p is located %zd bytes inside of", (void *)descr.bad_addr,
+ descr.offset);
+ break;
+ case kAccessTypeUnknown:
+ str.append(
+ "%p is located somewhere around (this is AddressSanitizer bug!)",
+ (void *)descr.bad_addr);
+ }
+ str.append(" %zu-byte region [%p,%p)\n", descr.chunk_size,
+ (void *)descr.chunk_begin,
+ (void *)(descr.chunk_begin + descr.chunk_size));
+ str.append("%s", d.Default());
+ Printf("%s", str.data());
+}
+
+bool GetHeapAddressInformation(uptr addr, uptr access_size,
+ HeapAddressDescription *descr) {
+ AsanChunkView chunk = FindHeapChunkByAddress(addr);
+ if (!chunk.IsValid()) {
+ return false;
+ }
+ descr->addr = addr;
+ GetAccessToHeapChunkInformation(&descr->chunk_access, chunk, addr,
+ access_size);
+ CHECK_NE(chunk.AllocTid(), kInvalidTid);
+ descr->alloc_tid = chunk.AllocTid();
+ descr->alloc_stack_id = chunk.GetAllocStackId();
+ descr->free_tid = chunk.FreeTid();
+ if (descr->free_tid != kInvalidTid)
+ descr->free_stack_id = chunk.GetFreeStackId();
+ return true;
+}
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+bool DescribeAddressIfHeap(uptr addr, uptr access_size) {
+ HeapAddressDescription descr;
+ if (!GetHeapAddressInformation(addr, access_size, &descr)) {
+ Printf(
+ "AddressSanitizer can not describe address in more detail "
+ "(wild memory access suspected).\n");
+ return false;
+ }
+ descr.Print();
+ return true;
+}
+
+// Stack descriptions
+bool GetStackAddressInformation(uptr addr, uptr access_size,
+ StackAddressDescription *descr) {
+ AsanThread *t = FindThreadByStackAddress(addr);
+ if (!t) return false;
+
+ descr->addr = addr;
+ descr->tid = t->tid();
+ // Try to fetch precise stack frame for this access.
+ AsanThread::StackFrameAccess access;
+ if (!t->GetStackFrameAccessByAddr(addr, &access)) {
+ descr->frame_descr = nullptr;
+ return true;
+ }
+
+ descr->offset = access.offset;
+ descr->access_size = access_size;
+ descr->frame_pc = access.frame_pc;
+ descr->frame_descr = access.frame_descr;
+
+#if SANITIZER_PPC64V1
+ // On PowerPC64 ELFv1, the address of a function actually points to a
+ // three-doubleword data structure with the first field containing
+ // the address of the function's code.
+ descr->frame_pc = *reinterpret_cast<uptr *>(descr->frame_pc);
+#endif
+ descr->frame_pc += 16;
+
+ return true;
+}
+
+static void PrintAccessAndVarIntersection(const StackVarDescr &var, uptr addr,
+ uptr access_size, uptr prev_var_end,
+ uptr next_var_beg) {
+ uptr var_end = var.beg + var.size;
+ uptr addr_end = addr + access_size;
+ const char *pos_descr = nullptr;
+ // If the variable [var.beg, var_end) is the nearest variable to the
+ // current memory access, indicate it in the log.
+ if (addr >= var.beg) {
+ if (addr_end <= var_end)
+ pos_descr = "is inside"; // May happen if this is a use-after-return.
+ else if (addr < var_end)
+ pos_descr = "partially overflows";
+ else if (addr_end <= next_var_beg &&
+ next_var_beg - addr_end >= addr - var_end)
+ pos_descr = "overflows";
+ } else {
+ if (addr_end > var.beg)
+ pos_descr = "partially underflows";
+ else if (addr >= prev_var_end && addr - prev_var_end >= var.beg - addr_end)
+ pos_descr = "underflows";
+ }
+ InternalScopedString str(1024);
+ str.append(" [%zd, %zd)", var.beg, var_end);
+ // Render variable name.
+ str.append(" '");
+ for (uptr i = 0; i < var.name_len; ++i) {
+ str.append("%c", var.name_pos[i]);
+ }
+ str.append("'");
+ if (var.line > 0) {
+ str.append(" (line %d)", var.line);
+ }
+ if (pos_descr) {
+ Decorator d;
+ // FIXME: we may want to also print the size of the access here,
+ // but in case of accesses generated by memset it may be confusing.
+ str.append("%s <== Memory access at offset %zd %s this variable%s\n",
+ d.Location(), addr, pos_descr, d.Default());
+ } else {
+ str.append("\n");
+ }
+ Printf("%s", str.data());
+}
+
+bool DescribeAddressIfStack(uptr addr, uptr access_size) {
+ StackAddressDescription descr;
+ if (!GetStackAddressInformation(addr, access_size, &descr)) return false;
+ descr.Print();
+ return true;
+}
+
+// Global descriptions
+static void DescribeAddressRelativeToGlobal(uptr addr, uptr access_size,
+ const __asan_global &g) {
+ InternalScopedString str(4096);
+ Decorator d;
+ str.append("%s", d.Location());
+ if (addr < g.beg) {
+ str.append("%p is located %zd bytes to the left", (void *)addr,
+ g.beg - addr);
+ } else if (addr + access_size > g.beg + g.size) {
+ if (addr < g.beg + g.size) addr = g.beg + g.size;
+ str.append("%p is located %zd bytes to the right", (void *)addr,
+ addr - (g.beg + g.size));
+ } else {
+ // Can it happen?
+ str.append("%p is located %zd bytes inside", (void *)addr, addr - g.beg);
+ }
+ str.append(" of global variable '%s' defined in '",
+ MaybeDemangleGlobalName(g.name));
+ PrintGlobalLocation(&str, g);
+ str.append("' (0x%zx) of size %zu\n", g.beg, g.size);
+ str.append("%s", d.Default());
+ PrintGlobalNameIfASCII(&str, g);
+ Printf("%s", str.data());
+}
+
+bool GetGlobalAddressInformation(uptr addr, uptr access_size,
+ GlobalAddressDescription *descr) {
+ descr->addr = addr;
+ int globals_num = GetGlobalsForAddress(addr, descr->globals, descr->reg_sites,
+ ARRAY_SIZE(descr->globals));
+ descr->size = globals_num;
+ descr->access_size = access_size;
+ return globals_num != 0;
+}
+
+bool DescribeAddressIfGlobal(uptr addr, uptr access_size,
+ const char *bug_type) {
+ GlobalAddressDescription descr;
+ if (!GetGlobalAddressInformation(addr, access_size, &descr)) return false;
+
+ descr.Print(bug_type);
+ return true;
+}
+
+void ShadowAddressDescription::Print() const {
+ Printf("Address %p is located in the %s area.\n", addr, ShadowNames[kind]);
+}
+
+void GlobalAddressDescription::Print(const char *bug_type) const {
+ for (int i = 0; i < size; i++) {
+ DescribeAddressRelativeToGlobal(addr, access_size, globals[i]);
+ if (bug_type &&
+ 0 == internal_strcmp(bug_type, "initialization-order-fiasco") &&
+ reg_sites[i]) {
+ Printf(" registered at:\n");
+ StackDepotGet(reg_sites[i]).Print();
+ }
+ }
+}
+
+bool GlobalAddressDescription::PointsInsideTheSameVariable(
+ const GlobalAddressDescription &other) const {
+ if (size == 0 || other.size == 0) return false;
+
+ for (uptr i = 0; i < size; i++) {
+ const __asan_global &a = globals[i];
+ for (uptr j = 0; j < other.size; j++) {
+ const __asan_global &b = other.globals[j];
+ if (a.beg == b.beg &&
+ a.beg <= addr &&
+ b.beg <= other.addr &&
+ (addr + access_size) < (a.beg + a.size) &&
+ (other.addr + other.access_size) < (b.beg + b.size))
+ return true;
+ }
+ }
+
+ return false;
+}
+
+void StackAddressDescription::Print() const {
+ Decorator d;
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread %s", addr,
+ AsanThreadIdAndName(tid).c_str());
+
+ if (!frame_descr) {
+ Printf("%s\n", d.Default());
+ return;
+ }
+ Printf(" at offset %zu in frame%s\n", offset, d.Default());
+
+ // Now we print the frame where the alloca has happened.
+ // We print this frame as a stack trace with one element.
+ // The symbolizer may print more than one frame if inlining was involved.
+ // The frame numbers may be different than those in the stack trace printed
+ // previously. That's unfortunate, but I have no better solution,
+ // especially given that the alloca may be from entirely different place
+ // (e.g. use-after-scope, or different thread's stack).
+ Printf("%s", d.Default());
+ StackTrace alloca_stack(&frame_pc, 1);
+ alloca_stack.Print();
+
+ InternalMmapVector<StackVarDescr> vars;
+ vars.reserve(16);
+ if (!ParseFrameDescription(frame_descr, &vars)) {
+ Printf(
+ "AddressSanitizer can't parse the stack frame "
+ "descriptor: |%s|\n",
+ frame_descr);
+ // 'addr' is a stack address, so return true even if we can't parse frame
+ return;
+ }
+ uptr n_objects = vars.size();
+ // Report the number of stack objects.
+ Printf(" This frame has %zu object(s):\n", n_objects);
+
+ // Report all objects in this frame.
+ for (uptr i = 0; i < n_objects; i++) {
+ uptr prev_var_end = i ? vars[i - 1].beg + vars[i - 1].size : 0;
+ uptr next_var_beg = i + 1 < n_objects ? vars[i + 1].beg : ~(0UL);
+ PrintAccessAndVarIntersection(vars[i], offset, access_size, prev_var_end,
+ next_var_beg);
+ }
+ Printf(
+ "HINT: this may be a false positive if your program uses "
+ "some custom stack unwind mechanism, swapcontext or vfork\n");
+ if (SANITIZER_WINDOWS)
+ Printf(" (longjmp, SEH and C++ exceptions *are* supported)\n");
+ else
+ Printf(" (longjmp and C++ exceptions *are* supported)\n");
+
+ DescribeThread(GetThreadContextByTidLocked(tid));
+}
+
+void HeapAddressDescription::Print() const {
+ PrintHeapChunkAccess(addr, chunk_access);
+
+ asanThreadRegistry().CheckLocked();
+ AsanThreadContext *alloc_thread = GetThreadContextByTidLocked(alloc_tid);
+ StackTrace alloc_stack = GetStackTraceFromId(alloc_stack_id);
+
+ Decorator d;
+ AsanThreadContext *free_thread = nullptr;
+ if (free_tid != kInvalidTid) {
+ free_thread = GetThreadContextByTidLocked(free_tid);
+ Printf("%sfreed by thread %s here:%s\n", d.Allocation(),
+ AsanThreadIdAndName(free_thread).c_str(), d.Default());
+ StackTrace free_stack = GetStackTraceFromId(free_stack_id);
+ free_stack.Print();
+ Printf("%spreviously allocated by thread %s here:%s\n", d.Allocation(),
+ AsanThreadIdAndName(alloc_thread).c_str(), d.Default());
+ } else {
+ Printf("%sallocated by thread %s here:%s\n", d.Allocation(),
+ AsanThreadIdAndName(alloc_thread).c_str(), d.Default());
+ }
+ alloc_stack.Print();
+ DescribeThread(GetCurrentThread());
+ if (free_thread) DescribeThread(free_thread);
+ DescribeThread(alloc_thread);
+}
+
+AddressDescription::AddressDescription(uptr addr, uptr access_size,
+ bool shouldLockThreadRegistry) {
+ if (GetShadowAddressInformation(addr, &data.shadow)) {
+ data.kind = kAddressKindShadow;
+ return;
+ }
+ if (GetHeapAddressInformation(addr, access_size, &data.heap)) {
+ data.kind = kAddressKindHeap;
+ return;
+ }
+
+ bool isStackMemory = false;
+ if (shouldLockThreadRegistry) {
+ ThreadRegistryLock l(&asanThreadRegistry());
+ isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
+ } else {
+ isStackMemory = GetStackAddressInformation(addr, access_size, &data.stack);
+ }
+ if (isStackMemory) {
+ data.kind = kAddressKindStack;
+ return;
+ }
+
+ if (GetGlobalAddressInformation(addr, access_size, &data.global)) {
+ data.kind = kAddressKindGlobal;
+ return;
+ }
+ data.kind = kAddressKindWild;
+ addr = 0;
+}
+
+void PrintAddressDescription(uptr addr, uptr access_size,
+ const char *bug_type) {
+ ShadowAddressDescription shadow_descr;
+ if (GetShadowAddressInformation(addr, &shadow_descr)) {
+ shadow_descr.Print();
+ return;
+ }
+
+ GlobalAddressDescription global_descr;
+ if (GetGlobalAddressInformation(addr, access_size, &global_descr)) {
+ global_descr.Print(bug_type);
+ return;
+ }
+
+ StackAddressDescription stack_descr;
+ if (GetStackAddressInformation(addr, access_size, &stack_descr)) {
+ stack_descr.Print();
+ return;
+ }
+
+ HeapAddressDescription heap_descr;
+ if (GetHeapAddressInformation(addr, access_size, &heap_descr)) {
+ heap_descr.Print();
+ return;
+ }
+
+ // We exhausted our possibilities. Bail out.
+ Printf(
+ "AddressSanitizer can not describe address in more detail "
+ "(wild memory access suspected).\n");
+}
+} // namespace __asan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_descriptions.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_descriptions.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_descriptions.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_descriptions.h (revision 351984)
@@ -0,0 +1,262 @@
+//===-- asan_descriptions.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_descriptions.cc.
+// TODO(filcab): Most struct definitions should move to the interface headers.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_DESCRIPTIONS_H
+#define ASAN_DESCRIPTIONS_H
+
+#include "asan_allocator.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+
+namespace __asan {
+
+void DescribeThread(AsanThreadContext *context);
+static inline void DescribeThread(AsanThread *t) {
+ if (t) DescribeThread(t->context());
+}
+
+class AsanThreadIdAndName {
+ public:
+ explicit AsanThreadIdAndName(AsanThreadContext *t);
+ explicit AsanThreadIdAndName(u32 tid);
+
+ // Contains "T%tid (%name)" or "T%tid" if the name is empty.
+ const char *c_str() const { return &name[0]; }
+
+ private:
+ void Init(u32 tid, const char *tname);
+
+ char name[128];
+};
+
+class Decorator : public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Access() { return Blue(); }
+ const char *Location() { return Green(); }
+ const char *Allocation() { return Magenta(); }
+
+ const char *ShadowByte(u8 byte) {
+ switch (byte) {
+ case kAsanHeapLeftRedzoneMagic:
+ case kAsanArrayCookieMagic:
+ return Red();
+ case kAsanHeapFreeMagic:
+ return Magenta();
+ case kAsanStackLeftRedzoneMagic:
+ case kAsanStackMidRedzoneMagic:
+ case kAsanStackRightRedzoneMagic:
+ return Red();
+ case kAsanStackAfterReturnMagic:
+ return Magenta();
+ case kAsanInitializationOrderMagic:
+ return Cyan();
+ case kAsanUserPoisonedMemoryMagic:
+ case kAsanContiguousContainerOOBMagic:
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ return Blue();
+ case kAsanStackUseAfterScopeMagic:
+ return Magenta();
+ case kAsanGlobalRedzoneMagic:
+ return Red();
+ case kAsanInternalHeapMagic:
+ return Yellow();
+ case kAsanIntraObjectRedzone:
+ return Yellow();
+ default:
+ return Default();
+ }
+ }
+};
+
+enum ShadowKind : u8 {
+ kShadowKindLow,
+ kShadowKindGap,
+ kShadowKindHigh,
+};
+static const char *const ShadowNames[] = {"low shadow", "shadow gap",
+ "high shadow"};
+
+struct ShadowAddressDescription {
+ uptr addr;
+ ShadowKind kind;
+ u8 shadow_byte;
+
+ void Print() const;
+};
+
+bool GetShadowAddressInformation(uptr addr, ShadowAddressDescription *descr);
+bool DescribeAddressIfShadow(uptr addr);
+
+enum AccessType {
+ kAccessTypeLeft,
+ kAccessTypeRight,
+ kAccessTypeInside,
+ kAccessTypeUnknown, // This means we have an AddressSanitizer bug!
+};
+
+struct ChunkAccess {
+ uptr bad_addr;
+ sptr offset;
+ uptr chunk_begin;
+ uptr chunk_size;
+ u32 user_requested_alignment : 12;
+ u32 access_type : 2;
+ u32 alloc_type : 2;
+};
+
+struct HeapAddressDescription {
+ uptr addr;
+ uptr alloc_tid;
+ uptr free_tid;
+ u32 alloc_stack_id;
+ u32 free_stack_id;
+ ChunkAccess chunk_access;
+
+ void Print() const;
+};
+
+bool GetHeapAddressInformation(uptr addr, uptr access_size,
+ HeapAddressDescription *descr);
+bool DescribeAddressIfHeap(uptr addr, uptr access_size = 1);
+
+struct StackAddressDescription {
+ uptr addr;
+ uptr tid;
+ uptr offset;
+ uptr frame_pc;
+ uptr access_size;
+ const char *frame_descr;
+
+ void Print() const;
+};
+
+bool GetStackAddressInformation(uptr addr, uptr access_size,
+ StackAddressDescription *descr);
+
+struct GlobalAddressDescription {
+ uptr addr;
+ // Assume address is close to at most four globals.
+ static const int kMaxGlobals = 4;
+ __asan_global globals[kMaxGlobals];
+ u32 reg_sites[kMaxGlobals];
+ uptr access_size;
+ u8 size;
+
+ void Print(const char *bug_type = "") const;
+
+ // Returns true when this descriptions points inside the same global variable
+ // as other. Descriptions can have different address within the variable
+ bool PointsInsideTheSameVariable(const GlobalAddressDescription &other) const;
+};
+
+bool GetGlobalAddressInformation(uptr addr, uptr access_size,
+ GlobalAddressDescription *descr);
+bool DescribeAddressIfGlobal(uptr addr, uptr access_size, const char *bug_type);
+
+// General function to describe an address. Will try to describe the address as
+// a shadow, global (variable), stack, or heap address.
+// bug_type is optional and is used for checking if we're reporting an
+// initialization-order-fiasco
+// The proper access_size should be passed for stack, global, and heap
+// addresses. Defaults to 1.
+// Each of the *AddressDescription functions has its own Print() member, which
+// may take access_size and bug_type parameters if needed.
+void PrintAddressDescription(uptr addr, uptr access_size = 1,
+ const char *bug_type = "");
+
+enum AddressKind {
+ kAddressKindWild,
+ kAddressKindShadow,
+ kAddressKindHeap,
+ kAddressKindStack,
+ kAddressKindGlobal,
+};
+
+class AddressDescription {
+ struct AddressDescriptionData {
+ AddressKind kind;
+ union {
+ ShadowAddressDescription shadow;
+ HeapAddressDescription heap;
+ StackAddressDescription stack;
+ GlobalAddressDescription global;
+ uptr addr;
+ };
+ };
+
+ AddressDescriptionData data;
+
+ public:
+ AddressDescription() = default;
+ // shouldLockThreadRegistry allows us to skip locking if we're sure we already
+ // have done it.
+ AddressDescription(uptr addr, bool shouldLockThreadRegistry = true)
+ : AddressDescription(addr, 1, shouldLockThreadRegistry) {}
+ AddressDescription(uptr addr, uptr access_size,
+ bool shouldLockThreadRegistry = true);
+
+ uptr Address() const {
+ switch (data.kind) {
+ case kAddressKindWild:
+ return data.addr;
+ case kAddressKindShadow:
+ return data.shadow.addr;
+ case kAddressKindHeap:
+ return data.heap.addr;
+ case kAddressKindStack:
+ return data.stack.addr;
+ case kAddressKindGlobal:
+ return data.global.addr;
+ }
+ UNREACHABLE("AddressInformation kind is invalid");
+ }
+ void Print(const char *bug_descr = nullptr) const {
+ switch (data.kind) {
+ case kAddressKindWild:
+ Printf("Address %p is a wild pointer.\n", data.addr);
+ return;
+ case kAddressKindShadow:
+ return data.shadow.Print();
+ case kAddressKindHeap:
+ return data.heap.Print();
+ case kAddressKindStack:
+ return data.stack.Print();
+ case kAddressKindGlobal:
+ // initialization-order-fiasco has a special Print()
+ return data.global.Print(bug_descr);
+ }
+ UNREACHABLE("AddressInformation kind is invalid");
+ }
+
+ void StoreTo(AddressDescriptionData *dst) const { *dst = data; }
+
+ const ShadowAddressDescription *AsShadow() const {
+ return data.kind == kAddressKindShadow ? &data.shadow : nullptr;
+ }
+ const HeapAddressDescription *AsHeap() const {
+ return data.kind == kAddressKindHeap ? &data.heap : nullptr;
+ }
+ const StackAddressDescription *AsStack() const {
+ return data.kind == kAddressKindStack ? &data.stack : nullptr;
+ }
+ const GlobalAddressDescription *AsGlobal() const {
+ return data.kind == kAddressKindGlobal ? &data.global : nullptr;
+ }
+};
+
+} // namespace __asan
+
+#endif // ASAN_DESCRIPTIONS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_descriptions.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_errors.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_errors.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_errors.cc (revision 351984)
@@ -0,0 +1,597 @@
+//===-- asan_errors.cc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan implementation for error structures.
+//===----------------------------------------------------------------------===//
+
+#include "asan_errors.h"
+#include "asan_descriptions.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+static void OnStackUnwind(const SignalContext &sig,
+ const void *callback_context,
+ BufferedStackTrace *stack) {
+ bool fast = common_flags()->fast_unwind_on_fatal;
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+ // On FreeBSD the slow unwinding that leverages _Unwind_Backtrace()
+ // yields the call stack of the signal's handler and not of the code
+ // that raised the signal (as it does on Linux).
+ fast = true;
+#endif
+ // Tests and maybe some users expect that scariness is going to be printed
+ // just before the stack. As only asan has scariness score we have no
+ // corresponding code in the sanitizer_common and we use this callback to
+ // print it.
+ static_cast<const ScarinessScoreBase *>(callback_context)->Print();
+ stack->Unwind(sig.pc, sig.bp, sig.context, fast);
+}
+
+void ErrorDeadlySignal::Print() {
+ ReportDeadlySignal(signal, tid, &OnStackUnwind, &scariness);
+}
+
+void ErrorDoubleFree::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: attempting %s on %p in thread %s:\n",
+ scariness.GetDescription(), addr_description.addr,
+ AsanThreadIdAndName(tid).c_str());
+ Printf("%s", d.Default());
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(second_free_stack->trace[0],
+ second_free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+void ErrorNewDeleteTypeMismatch::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: %s on %p in thread %s:\n",
+ scariness.GetDescription(), addr_description.addr,
+ AsanThreadIdAndName(tid).c_str());
+ Printf("%s object passed to delete has wrong type:\n", d.Default());
+ if (delete_size != 0) {
+ Printf(
+ " size of the allocated type: %zd bytes;\n"
+ " size of the deallocated type: %zd bytes.\n",
+ addr_description.chunk_access.chunk_size, delete_size);
+ }
+ const uptr user_alignment =
+ addr_description.chunk_access.user_requested_alignment;
+ if (delete_alignment != user_alignment) {
+ char user_alignment_str[32];
+ char delete_alignment_str[32];
+ internal_snprintf(user_alignment_str, sizeof(user_alignment_str),
+ "%zd bytes", user_alignment);
+ internal_snprintf(delete_alignment_str, sizeof(delete_alignment_str),
+ "%zd bytes", delete_alignment);
+ static const char *kDefaultAlignment = "default-aligned";
+ Printf(
+ " alignment of the allocated type: %s;\n"
+ " alignment of the deallocated type: %s.\n",
+ user_alignment > 0 ? user_alignment_str : kDefaultAlignment,
+ delete_alignment > 0 ? delete_alignment_str : kDefaultAlignment);
+ }
+ CHECK_GT(free_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=new_delete_type_mismatch=0\n");
+}
+
+void ErrorFreeNotMalloced::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: attempting free on address "
+ "which was not malloc()-ed: %p in thread %s\n",
+ addr_description.Address(), AsanThreadIdAndName(tid).c_str());
+ Printf("%s", d.Default());
+ CHECK_GT(free_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(free_stack->trace[0], free_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+void ErrorAllocTypeMismatch::Print() {
+ static const char *alloc_names[] = {"INVALID", "malloc", "operator new",
+ "operator new []"};
+ static const char *dealloc_names[] = {"INVALID", "free", "operator delete",
+ "operator delete []"};
+ CHECK_NE(alloc_type, dealloc_type);
+ Decorator d;
+ Printf("%s", d.Error());
+ Report("ERROR: AddressSanitizer: %s (%s vs %s) on %p\n",
+ scariness.GetDescription(), alloc_names[alloc_type],
+ dealloc_names[dealloc_type], addr_description.Address());
+ Printf("%s", d.Default());
+ CHECK_GT(dealloc_stack->size, 0);
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(dealloc_stack->trace[0], dealloc_stack->top_frame_bp);
+ stack.Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=alloc_dealloc_mismatch=0\n");
+}
+
+void ErrorMallocUsableSizeNotOwned::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: attempting to call malloc_usable_size() for "
+ "pointer which is not owned: %p\n",
+ addr_description.Address());
+ Printf("%s", d.Default());
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorSanitizerGetAllocatedSizeNotOwned::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: attempting to call "
+ "__sanitizer_get_allocated_size() for pointer which is not owned: %p\n",
+ addr_description.Address());
+ Printf("%s", d.Default());
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorCallocOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: calloc parameters overflow: count * size "
+ "(%zd * %zd) cannot be represented in type size_t (thread %s)\n",
+ count, size, AsanThreadIdAndName(tid).c_str());
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorReallocArrayOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: reallocarray parameters overflow: count * size "
+ "(%zd * %zd) cannot be represented in type size_t (thread %s)\n",
+ count, size, AsanThreadIdAndName(tid).c_str());
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorPvallocOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: pvalloc parameters overflow: size 0x%zx "
+ "rounded up to system page size 0x%zx cannot be represented in type "
+ "size_t (thread %s)\n",
+ size, GetPageSizeCached(), AsanThreadIdAndName(tid).c_str());
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorInvalidAllocationAlignment::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: invalid allocation alignment: %zd, "
+ "alignment must be a power of two (thread %s)\n",
+ alignment, AsanThreadIdAndName(tid).c_str());
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorInvalidAlignedAllocAlignment::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+#if SANITIZER_POSIX
+ Report("ERROR: AddressSanitizer: invalid alignment requested in "
+ "aligned_alloc: %zd, alignment must be a power of two and the "
+ "requested size 0x%zx must be a multiple of alignment "
+ "(thread %s)\n", alignment, size, AsanThreadIdAndName(tid).c_str());
+#else
+ Report("ERROR: AddressSanitizer: invalid alignment requested in "
+ "aligned_alloc: %zd, the requested size 0x%zx must be a multiple of "
+ "alignment (thread %s)\n", alignment, size,
+ AsanThreadIdAndName(tid).c_str());
+#endif
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorInvalidPosixMemalignAlignment::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: invalid alignment requested in posix_memalign: "
+ "%zd, alignment must be a power of two and a multiple of sizeof(void*) "
+ "== %zd (thread %s)\n",
+ alignment, sizeof(void*), AsanThreadIdAndName(tid).c_str()); // NOLINT
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorAllocationSizeTooBig::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: requested allocation size 0x%zx (0x%zx after "
+ "adjustments for alignment, red zones etc.) exceeds maximum supported "
+ "size of 0x%zx (thread %s)\n",
+ user_size, total_size, max_size, AsanThreadIdAndName(tid).c_str());
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorRssLimitExceeded::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: specified RSS limit exceeded, currently set to "
+ "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb);
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorOutOfMemory::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: allocator is out of memory trying to allocate "
+ "0x%zx bytes\n", requested_size);
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorStringFunctionMemoryRangesOverlap::Print() {
+ Decorator d;
+ char bug_type[100];
+ internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
+ Printf("%s", d.Error());
+ Report(
+ "ERROR: AddressSanitizer: %s: memory ranges [%p,%p) and [%p, %p) "
+ "overlap\n",
+ bug_type, addr1_description.Address(),
+ addr1_description.Address() + length1, addr2_description.Address(),
+ addr2_description.Address() + length2);
+ Printf("%s", d.Default());
+ scariness.Print();
+ stack->Print();
+ addr1_description.Print();
+ addr2_description.Print();
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ErrorStringFunctionSizeOverflow::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report("ERROR: AddressSanitizer: %s: (size=%zd)\n",
+ scariness.GetDescription(), size);
+ Printf("%s", d.Default());
+ scariness.Print();
+ stack->Print();
+ addr_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorBadParamsToAnnotateContiguousContainer::Print() {
+ Report(
+ "ERROR: AddressSanitizer: bad parameters to "
+ "__sanitizer_annotate_contiguous_container:\n"
+ " beg : %p\n"
+ " end : %p\n"
+ " old_mid : %p\n"
+ " new_mid : %p\n",
+ beg, end, old_mid, new_mid);
+ uptr granularity = SHADOW_GRANULARITY;
+ if (!IsAligned(beg, granularity))
+ Report("ERROR: beg is not aligned by %d\n", granularity);
+ stack->Print();
+ ReportErrorSummary(scariness.GetDescription(), stack);
+}
+
+void ErrorODRViolation::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report("ERROR: AddressSanitizer: %s (%p):\n", scariness.GetDescription(),
+ global1.beg);
+ Printf("%s", d.Default());
+ InternalScopedString g1_loc(256), g2_loc(256);
+ PrintGlobalLocation(&g1_loc, global1);
+ PrintGlobalLocation(&g2_loc, global2);
+ Printf(" [1] size=%zd '%s' %s\n", global1.size,
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ Printf(" [2] size=%zd '%s' %s\n", global2.size,
+ MaybeDemangleGlobalName(global2.name), g2_loc.data());
+ if (stack_id1 && stack_id2) {
+ Printf("These globals were registered at these points:\n");
+ Printf(" [1]:\n");
+ StackDepotGet(stack_id1).Print();
+ Printf(" [2]:\n");
+ StackDepotGet(stack_id2).Print();
+ }
+ Report(
+ "HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=detect_odr_violation=0\n");
+ InternalScopedString error_msg(256);
+ error_msg.append("%s: global '%s' at %s", scariness.GetDescription(),
+ MaybeDemangleGlobalName(global1.name), g1_loc.data());
+ ReportErrorSummary(error_msg.data());
+}
+
+void ErrorInvalidPointerPair::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ Report("ERROR: AddressSanitizer: %s: %p %p\n", scariness.GetDescription(),
+ addr1_description.Address(), addr2_description.Address());
+ Printf("%s", d.Default());
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+ addr1_description.Print();
+ addr2_description.Print();
+ ReportErrorSummary(scariness.GetDescription(), &stack);
+}
+
+static bool AdjacentShadowValuesAreFullyPoisoned(u8 *s) {
+ return s[-1] > 127 && s[1] > 127;
+}
+
+ErrorGeneric::ErrorGeneric(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr addr,
+ bool is_write_, uptr access_size_)
+ : ErrorBase(tid),
+ addr_description(addr, access_size_, /*shouldLockThreadRegistry=*/false),
+ pc(pc_),
+ bp(bp_),
+ sp(sp_),
+ access_size(access_size_),
+ is_write(is_write_),
+ shadow_val(0) {
+ scariness.Clear();
+ if (access_size) {
+ if (access_size <= 9) {
+ char desr[] = "?-byte";
+ desr[0] = '0' + access_size;
+ scariness.Scare(access_size + access_size / 2, desr);
+ } else if (access_size >= 10) {
+ scariness.Scare(15, "multi-byte");
+ }
+ is_write ? scariness.Scare(20, "write") : scariness.Scare(1, "read");
+
+ // Determine the error type.
+ bug_descr = "unknown-crash";
+ if (AddrIsInMem(addr)) {
+ u8 *shadow_addr = (u8 *)MemToShadow(addr);
+ // If we are accessing 16 bytes, look at the second shadow byte.
+ if (*shadow_addr == 0 && access_size > SHADOW_GRANULARITY) shadow_addr++;
+ // If we are in the partial right redzone, look at the next shadow byte.
+ if (*shadow_addr > 0 && *shadow_addr < 128) shadow_addr++;
+ bool far_from_bounds = false;
+ shadow_val = *shadow_addr;
+ int bug_type_score = 0;
+ // For use-after-frees reads are almost as bad as writes.
+ int read_after_free_bonus = 0;
+ switch (shadow_val) {
+ case kAsanHeapLeftRedzoneMagic:
+ case kAsanArrayCookieMagic:
+ bug_descr = "heap-buffer-overflow";
+ bug_type_score = 10;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanHeapFreeMagic:
+ bug_descr = "heap-use-after-free";
+ bug_type_score = 20;
+ if (!is_write) read_after_free_bonus = 18;
+ break;
+ case kAsanStackLeftRedzoneMagic:
+ bug_descr = "stack-buffer-underflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanInitializationOrderMagic:
+ bug_descr = "initialization-order-fiasco";
+ bug_type_score = 1;
+ break;
+ case kAsanStackMidRedzoneMagic:
+ case kAsanStackRightRedzoneMagic:
+ bug_descr = "stack-buffer-overflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanStackAfterReturnMagic:
+ bug_descr = "stack-use-after-return";
+ bug_type_score = 30;
+ if (!is_write) read_after_free_bonus = 18;
+ break;
+ case kAsanUserPoisonedMemoryMagic:
+ bug_descr = "use-after-poison";
+ bug_type_score = 20;
+ break;
+ case kAsanContiguousContainerOOBMagic:
+ bug_descr = "container-overflow";
+ bug_type_score = 10;
+ break;
+ case kAsanStackUseAfterScopeMagic:
+ bug_descr = "stack-use-after-scope";
+ bug_type_score = 10;
+ break;
+ case kAsanGlobalRedzoneMagic:
+ bug_descr = "global-buffer-overflow";
+ bug_type_score = 10;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ case kAsanIntraObjectRedzone:
+ bug_descr = "intra-object-overflow";
+ bug_type_score = 10;
+ break;
+ case kAsanAllocaLeftMagic:
+ case kAsanAllocaRightMagic:
+ bug_descr = "dynamic-stack-buffer-overflow";
+ bug_type_score = 25;
+ far_from_bounds = AdjacentShadowValuesAreFullyPoisoned(shadow_addr);
+ break;
+ }
+ scariness.Scare(bug_type_score + read_after_free_bonus, bug_descr);
+ if (far_from_bounds) scariness.Scare(10, "far-from-bounds");
+ }
+ }
+}
+
+static void PrintContainerOverflowHint() {
+ Printf("HINT: if you don't care about these errors you may set "
+ "ASAN_OPTIONS=detect_container_overflow=0.\n"
+ "If you suspect a false positive see also: "
+ "https://github.com/google/sanitizers/wiki/"
+ "AddressSanitizerContainerOverflow.\n");
+}
+
+static void PrintShadowByte(InternalScopedString *str, const char *before,
+ u8 byte, const char *after = "\n") {
+ PrintMemoryByte(str, before, byte, /*in_shadow*/true, after);
+}
+
+static void PrintLegend(InternalScopedString *str) {
+ str->append(
+ "Shadow byte legend (one shadow byte represents %d "
+ "application bytes):\n",
+ (int)SHADOW_GRANULARITY);
+ PrintShadowByte(str, " Addressable: ", 0);
+ str->append(" Partially addressable: ");
+ for (u8 i = 1; i < SHADOW_GRANULARITY; i++) PrintShadowByte(str, "", i, " ");
+ str->append("\n");
+ PrintShadowByte(str, " Heap left redzone: ",
+ kAsanHeapLeftRedzoneMagic);
+ PrintShadowByte(str, " Freed heap region: ", kAsanHeapFreeMagic);
+ PrintShadowByte(str, " Stack left redzone: ",
+ kAsanStackLeftRedzoneMagic);
+ PrintShadowByte(str, " Stack mid redzone: ",
+ kAsanStackMidRedzoneMagic);
+ PrintShadowByte(str, " Stack right redzone: ",
+ kAsanStackRightRedzoneMagic);
+ PrintShadowByte(str, " Stack after return: ",
+ kAsanStackAfterReturnMagic);
+ PrintShadowByte(str, " Stack use after scope: ",
+ kAsanStackUseAfterScopeMagic);
+ PrintShadowByte(str, " Global redzone: ", kAsanGlobalRedzoneMagic);
+ PrintShadowByte(str, " Global init order: ",
+ kAsanInitializationOrderMagic);
+ PrintShadowByte(str, " Poisoned by user: ",
+ kAsanUserPoisonedMemoryMagic);
+ PrintShadowByte(str, " Container overflow: ",
+ kAsanContiguousContainerOOBMagic);
+ PrintShadowByte(str, " Array cookie: ",
+ kAsanArrayCookieMagic);
+ PrintShadowByte(str, " Intra object redzone: ",
+ kAsanIntraObjectRedzone);
+ PrintShadowByte(str, " ASan internal: ", kAsanInternalHeapMagic);
+ PrintShadowByte(str, " Left alloca redzone: ", kAsanAllocaLeftMagic);
+ PrintShadowByte(str, " Right alloca redzone: ", kAsanAllocaRightMagic);
+ PrintShadowByte(str, " Shadow gap: ", kAsanShadowGap);
+}
+
+static void PrintShadowBytes(InternalScopedString *str, const char *before,
+ u8 *bytes, u8 *guilty, uptr n) {
+ Decorator d;
+ if (before) str->append("%s%p:", before, bytes);
+ for (uptr i = 0; i < n; i++) {
+ u8 *p = bytes + i;
+ const char *before =
+ p == guilty ? "[" : (p - 1 == guilty && i != 0) ? "" : " ";
+ const char *after = p == guilty ? "]" : "";
+ PrintShadowByte(str, before, *p, after);
+ }
+ str->append("\n");
+}
+
+static void PrintShadowMemoryForAddress(uptr addr) {
+ if (!AddrIsInMem(addr)) return;
+ uptr shadow_addr = MemToShadow(addr);
+ const uptr n_bytes_per_row = 16;
+ uptr aligned_shadow = shadow_addr & ~(n_bytes_per_row - 1);
+ InternalScopedString str(4096 * 8);
+ str.append("Shadow bytes around the buggy address:\n");
+ for (int i = -5; i <= 5; i++) {
+ uptr row_shadow_addr = aligned_shadow + i * n_bytes_per_row;
+ // Skip rows that would be outside the shadow range. This can happen when
+ // the user address is near the bottom, top, or shadow gap of the address
+ // space.
+ if (!AddrIsInShadow(row_shadow_addr)) continue;
+ const char *prefix = (i == 0) ? "=>" : " ";
+ PrintShadowBytes(&str, prefix, (u8 *)row_shadow_addr, (u8 *)shadow_addr,
+ n_bytes_per_row);
+ }
+ if (flags()->print_legend) PrintLegend(&str);
+ Printf("%s", str.data());
+}
+
+void ErrorGeneric::Print() {
+ Decorator d;
+ Printf("%s", d.Error());
+ uptr addr = addr_description.Address();
+ Report("ERROR: AddressSanitizer: %s on address %p at pc %p bp %p sp %p\n",
+ bug_descr, (void *)addr, pc, bp, sp);
+ Printf("%s", d.Default());
+
+ Printf("%s%s of size %zu at %p thread %s%s\n", d.Access(),
+ access_size ? (is_write ? "WRITE" : "READ") : "ACCESS", access_size,
+ (void *)addr, AsanThreadIdAndName(tid).c_str(), d.Default());
+
+ scariness.Print();
+ GET_STACK_TRACE_FATAL(pc, bp);
+ stack.Print();
+
+ // Pass bug_descr because we have a special case for
+ // initialization-order-fiasco
+ addr_description.Print(bug_descr);
+ if (shadow_val == kAsanContiguousContainerOOBMagic)
+ PrintContainerOverflowHint();
+ ReportErrorSummary(bug_descr, &stack);
+ PrintShadowMemoryForAddress(addr);
+}
+
+} // namespace __asan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_errors.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_errors.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_errors.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_errors.h (revision 351984)
@@ -0,0 +1,455 @@
+//===-- asan_errors.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for error structures.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_ERRORS_H
+#define ASAN_ERRORS_H
+
+#include "asan_descriptions.h"
+#include "asan_scariness_score.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __asan {
+
+// (*) VS2013 does not implement unrestricted unions, so we need a trivial
+// default constructor explicitly defined for each particular error.
+
+// None of the error classes own the stack traces mentioned in them.
+
+struct ErrorBase {
+ ScarinessScoreBase scariness;
+ u32 tid;
+
+ ErrorBase() = default; // (*)
+ explicit ErrorBase(u32 tid_) : tid(tid_) {}
+ ErrorBase(u32 tid_, int initial_score, const char *reason) : tid(tid_) {
+ scariness.Clear();
+ scariness.Scare(initial_score, reason);
+ }
+};
+
+struct ErrorDeadlySignal : ErrorBase {
+ SignalContext signal;
+
+ ErrorDeadlySignal() = default; // (*)
+ ErrorDeadlySignal(u32 tid, const SignalContext &sig)
+ : ErrorBase(tid),
+ signal(sig) {
+ scariness.Clear();
+ if (signal.IsStackOverflow()) {
+ scariness.Scare(10, "stack-overflow");
+ } else if (!signal.is_memory_access) {
+ scariness.Scare(10, "signal");
+ } else if (signal.addr < GetPageSizeCached()) {
+ scariness.Scare(10, "null-deref");
+ } else if (signal.addr == signal.pc) {
+ scariness.Scare(60, "wild-jump");
+ } else if (signal.write_flag == SignalContext::WRITE) {
+ scariness.Scare(30, "wild-addr-write");
+ } else if (signal.write_flag == SignalContext::READ) {
+ scariness.Scare(20, "wild-addr-read");
+ } else {
+ scariness.Scare(25, "wild-addr");
+ }
+ }
+ void Print();
+};
+
+struct ErrorDoubleFree : ErrorBase {
+ const BufferedStackTrace *second_free_stack;
+ HeapAddressDescription addr_description;
+
+ ErrorDoubleFree() = default; // (*)
+ ErrorDoubleFree(u32 tid, BufferedStackTrace *stack, uptr addr)
+ : ErrorBase(tid, 42, "double-free"),
+ second_free_stack(stack) {
+ CHECK_GT(second_free_stack->size, 0);
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ }
+ void Print();
+};
+
+struct ErrorNewDeleteTypeMismatch : ErrorBase {
+ const BufferedStackTrace *free_stack;
+ HeapAddressDescription addr_description;
+ uptr delete_size;
+ uptr delete_alignment;
+
+ ErrorNewDeleteTypeMismatch() = default; // (*)
+ ErrorNewDeleteTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
+ uptr delete_size_, uptr delete_alignment_)
+ : ErrorBase(tid, 10, "new-delete-type-mismatch"),
+ free_stack(stack),
+ delete_size(delete_size_),
+ delete_alignment(delete_alignment_) {
+ GetHeapAddressInformation(addr, 1, &addr_description);
+ }
+ void Print();
+};
+
+struct ErrorFreeNotMalloced : ErrorBase {
+ const BufferedStackTrace *free_stack;
+ AddressDescription addr_description;
+
+ ErrorFreeNotMalloced() = default; // (*)
+ ErrorFreeNotMalloced(u32 tid, BufferedStackTrace *stack, uptr addr)
+ : ErrorBase(tid, 40, "bad-free"),
+ free_stack(stack),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {}
+ void Print();
+};
+
+struct ErrorAllocTypeMismatch : ErrorBase {
+ const BufferedStackTrace *dealloc_stack;
+ AllocType alloc_type, dealloc_type;
+ AddressDescription addr_description;
+
+ ErrorAllocTypeMismatch() = default; // (*)
+ ErrorAllocTypeMismatch(u32 tid, BufferedStackTrace *stack, uptr addr,
+ AllocType alloc_type_, AllocType dealloc_type_)
+ : ErrorBase(tid, 10, "alloc-dealloc-mismatch"),
+ dealloc_stack(stack),
+ alloc_type(alloc_type_),
+ dealloc_type(dealloc_type_),
+ addr_description(addr, 1, false) {}
+ void Print();
+};
+
+struct ErrorMallocUsableSizeNotOwned : ErrorBase {
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+
+ ErrorMallocUsableSizeNotOwned() = default; // (*)
+ ErrorMallocUsableSizeNotOwned(u32 tid, BufferedStackTrace *stack_, uptr addr)
+ : ErrorBase(tid, 10, "bad-malloc_usable_size"),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {}
+ void Print();
+};
+
+struct ErrorSanitizerGetAllocatedSizeNotOwned : ErrorBase {
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+
+ ErrorSanitizerGetAllocatedSizeNotOwned() = default; // (*)
+ ErrorSanitizerGetAllocatedSizeNotOwned(u32 tid, BufferedStackTrace *stack_,
+ uptr addr)
+ : ErrorBase(tid, 10, "bad-__sanitizer_get_allocated_size"),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false) {}
+ void Print();
+};
+
+struct ErrorCallocOverflow : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr count;
+ uptr size;
+
+ ErrorCallocOverflow() = default; // (*)
+ ErrorCallocOverflow(u32 tid, BufferedStackTrace *stack_, uptr count_,
+ uptr size_)
+ : ErrorBase(tid, 10, "calloc-overflow"),
+ stack(stack_),
+ count(count_),
+ size(size_) {}
+ void Print();
+};
+
+struct ErrorReallocArrayOverflow : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr count;
+ uptr size;
+
+ ErrorReallocArrayOverflow() = default; // (*)
+ ErrorReallocArrayOverflow(u32 tid, BufferedStackTrace *stack_, uptr count_,
+ uptr size_)
+ : ErrorBase(tid, 10, "reallocarray-overflow"),
+ stack(stack_),
+ count(count_),
+ size(size_) {}
+ void Print();
+};
+
+struct ErrorPvallocOverflow : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr size;
+
+ ErrorPvallocOverflow() = default; // (*)
+ ErrorPvallocOverflow(u32 tid, BufferedStackTrace *stack_, uptr size_)
+ : ErrorBase(tid, 10, "pvalloc-overflow"),
+ stack(stack_),
+ size(size_) {}
+ void Print();
+};
+
+struct ErrorInvalidAllocationAlignment : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr alignment;
+
+ ErrorInvalidAllocationAlignment() = default; // (*)
+ ErrorInvalidAllocationAlignment(u32 tid, BufferedStackTrace *stack_,
+ uptr alignment_)
+ : ErrorBase(tid, 10, "invalid-allocation-alignment"),
+ stack(stack_),
+ alignment(alignment_) {}
+ void Print();
+};
+
+struct ErrorInvalidAlignedAllocAlignment : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr size;
+ uptr alignment;
+
+ ErrorInvalidAlignedAllocAlignment() = default; // (*)
+ ErrorInvalidAlignedAllocAlignment(u32 tid, BufferedStackTrace *stack_,
+ uptr size_, uptr alignment_)
+ : ErrorBase(tid, 10, "invalid-aligned-alloc-alignment"),
+ stack(stack_),
+ size(size_),
+ alignment(alignment_) {}
+ void Print();
+};
+
+struct ErrorInvalidPosixMemalignAlignment : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr alignment;
+
+ ErrorInvalidPosixMemalignAlignment() = default; // (*)
+ ErrorInvalidPosixMemalignAlignment(u32 tid, BufferedStackTrace *stack_,
+ uptr alignment_)
+ : ErrorBase(tid, 10, "invalid-posix-memalign-alignment"),
+ stack(stack_),
+ alignment(alignment_) {}
+ void Print();
+};
+
+struct ErrorAllocationSizeTooBig : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr user_size;
+ uptr total_size;
+ uptr max_size;
+
+ ErrorAllocationSizeTooBig() = default; // (*)
+ ErrorAllocationSizeTooBig(u32 tid, BufferedStackTrace *stack_,
+ uptr user_size_, uptr total_size_, uptr max_size_)
+ : ErrorBase(tid, 10, "allocation-size-too-big"),
+ stack(stack_),
+ user_size(user_size_),
+ total_size(total_size_),
+ max_size(max_size_) {}
+ void Print();
+};
+
+struct ErrorRssLimitExceeded : ErrorBase {
+ const BufferedStackTrace *stack;
+
+ ErrorRssLimitExceeded() = default; // (*)
+ ErrorRssLimitExceeded(u32 tid, BufferedStackTrace *stack_)
+ : ErrorBase(tid, 10, "rss-limit-exceeded"),
+ stack(stack_) {}
+ void Print();
+};
+
+struct ErrorOutOfMemory : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr requested_size;
+
+ ErrorOutOfMemory() = default; // (*)
+ ErrorOutOfMemory(u32 tid, BufferedStackTrace *stack_, uptr requested_size_)
+ : ErrorBase(tid, 10, "out-of-memory"),
+ stack(stack_),
+ requested_size(requested_size_) {}
+ void Print();
+};
+
+struct ErrorStringFunctionMemoryRangesOverlap : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr length1, length2;
+ AddressDescription addr1_description;
+ AddressDescription addr2_description;
+ const char *function;
+
+ ErrorStringFunctionMemoryRangesOverlap() = default; // (*)
+ ErrorStringFunctionMemoryRangesOverlap(u32 tid, BufferedStackTrace *stack_,
+ uptr addr1, uptr length1_, uptr addr2,
+ uptr length2_, const char *function_)
+ : ErrorBase(tid),
+ stack(stack_),
+ length1(length1_),
+ length2(length2_),
+ addr1_description(addr1, length1, /*shouldLockThreadRegistry=*/false),
+ addr2_description(addr2, length2, /*shouldLockThreadRegistry=*/false),
+ function(function_) {
+ char bug_type[100];
+ internal_snprintf(bug_type, sizeof(bug_type), "%s-param-overlap", function);
+ scariness.Clear();
+ scariness.Scare(10, bug_type);
+ }
+ void Print();
+};
+
+struct ErrorStringFunctionSizeOverflow : ErrorBase {
+ const BufferedStackTrace *stack;
+ AddressDescription addr_description;
+ uptr size;
+
+ ErrorStringFunctionSizeOverflow() = default; // (*)
+ ErrorStringFunctionSizeOverflow(u32 tid, BufferedStackTrace *stack_,
+ uptr addr, uptr size_)
+ : ErrorBase(tid, 10, "negative-size-param"),
+ stack(stack_),
+ addr_description(addr, /*shouldLockThreadRegistry=*/false),
+ size(size_) {}
+ void Print();
+};
+
+struct ErrorBadParamsToAnnotateContiguousContainer : ErrorBase {
+ const BufferedStackTrace *stack;
+ uptr beg, end, old_mid, new_mid;
+
+ ErrorBadParamsToAnnotateContiguousContainer() = default; // (*)
+ // PS4: Do we want an AddressDescription for beg?
+ ErrorBadParamsToAnnotateContiguousContainer(u32 tid,
+ BufferedStackTrace *stack_,
+ uptr beg_, uptr end_,
+ uptr old_mid_, uptr new_mid_)
+ : ErrorBase(tid, 10, "bad-__sanitizer_annotate_contiguous_container"),
+ stack(stack_),
+ beg(beg_),
+ end(end_),
+ old_mid(old_mid_),
+ new_mid(new_mid_) {}
+ void Print();
+};
+
+struct ErrorODRViolation : ErrorBase {
+ __asan_global global1, global2;
+ u32 stack_id1, stack_id2;
+
+ ErrorODRViolation() = default; // (*)
+ ErrorODRViolation(u32 tid, const __asan_global *g1, u32 stack_id1_,
+ const __asan_global *g2, u32 stack_id2_)
+ : ErrorBase(tid, 10, "odr-violation"),
+ global1(*g1),
+ global2(*g2),
+ stack_id1(stack_id1_),
+ stack_id2(stack_id2_) {}
+ void Print();
+};
+
+struct ErrorInvalidPointerPair : ErrorBase {
+ uptr pc, bp, sp;
+ AddressDescription addr1_description;
+ AddressDescription addr2_description;
+
+ ErrorInvalidPointerPair() = default; // (*)
+ ErrorInvalidPointerPair(u32 tid, uptr pc_, uptr bp_, uptr sp_, uptr p1,
+ uptr p2)
+ : ErrorBase(tid, 10, "invalid-pointer-pair"),
+ pc(pc_),
+ bp(bp_),
+ sp(sp_),
+ addr1_description(p1, 1, /*shouldLockThreadRegistry=*/false),
+ addr2_description(p2, 1, /*shouldLockThreadRegistry=*/false) {}
+ void Print();
+};
+
+struct ErrorGeneric : ErrorBase {
+ AddressDescription addr_description;
+ uptr pc, bp, sp;
+ uptr access_size;
+ const char *bug_descr;
+ bool is_write;
+ u8 shadow_val;
+
+ ErrorGeneric() = default; // (*)
+ ErrorGeneric(u32 tid, uptr addr, uptr pc_, uptr bp_, uptr sp_, bool is_write_,
+ uptr access_size_);
+ void Print();
+};
+
+// clang-format off
+#define ASAN_FOR_EACH_ERROR_KIND(macro) \
+ macro(DeadlySignal) \
+ macro(DoubleFree) \
+ macro(NewDeleteTypeMismatch) \
+ macro(FreeNotMalloced) \
+ macro(AllocTypeMismatch) \
+ macro(MallocUsableSizeNotOwned) \
+ macro(SanitizerGetAllocatedSizeNotOwned) \
+ macro(CallocOverflow) \
+ macro(ReallocArrayOverflow) \
+ macro(PvallocOverflow) \
+ macro(InvalidAllocationAlignment) \
+ macro(InvalidAlignedAllocAlignment) \
+ macro(InvalidPosixMemalignAlignment) \
+ macro(AllocationSizeTooBig) \
+ macro(RssLimitExceeded) \
+ macro(OutOfMemory) \
+ macro(StringFunctionMemoryRangesOverlap) \
+ macro(StringFunctionSizeOverflow) \
+ macro(BadParamsToAnnotateContiguousContainer) \
+ macro(ODRViolation) \
+ macro(InvalidPointerPair) \
+ macro(Generic)
+// clang-format on
+
+#define ASAN_DEFINE_ERROR_KIND(name) kErrorKind##name,
+#define ASAN_ERROR_DESCRIPTION_MEMBER(name) Error##name name;
+#define ASAN_ERROR_DESCRIPTION_CONSTRUCTOR(name) \
+ ErrorDescription(Error##name const &e) : kind(kErrorKind##name) { \
+ internal_memcpy(&name, &e, sizeof(name)); \
+ }
+#define ASAN_ERROR_DESCRIPTION_PRINT(name) \
+ case kErrorKind##name: \
+ return name.Print();
+
+enum ErrorKind {
+ kErrorKindInvalid = 0,
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_DEFINE_ERROR_KIND)
+};
+
+struct ErrorDescription {
+ ErrorKind kind;
+ // We're using a tagged union because it allows us to have a trivially
+ // copiable type and use the same structures as the public interface.
+ //
+ // We can add a wrapper around it to make it "more c++-like", but that would
+ // add a lot of code and the benefit wouldn't be that big.
+ union {
+ ErrorBase Base;
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_MEMBER)
+ };
+
+ ErrorDescription() { internal_memset(this, 0, sizeof(*this)); }
+ explicit ErrorDescription(LinkerInitialized) {}
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_CONSTRUCTOR)
+
+ bool IsValid() { return kind != kErrorKindInvalid; }
+ void Print() {
+ switch (kind) {
+ ASAN_FOR_EACH_ERROR_KIND(ASAN_ERROR_DESCRIPTION_PRINT)
+ case kErrorKindInvalid:
+ CHECK(0);
+ }
+ CHECK(0);
+ }
+};
+
+#undef ASAN_FOR_EACH_ERROR_KIND
+#undef ASAN_DEFINE_ERROR_KIND
+#undef ASAN_ERROR_DESCRIPTION_MEMBER
+#undef ASAN_ERROR_DESCRIPTION_CONSTRUCTOR
+#undef ASAN_ERROR_DESCRIPTION_PRINT
+
+} // namespace __asan
+
+#endif // ASAN_ERRORS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_errors.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fake_stack.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fake_stack.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fake_stack.cc (revision 351984)
@@ -0,0 +1,282 @@
+//===-- asan_fake_stack.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// FakeStack is used to detect use-after-return bugs.
+//===----------------------------------------------------------------------===//
+
+#include "asan_allocator.h"
+#include "asan_poisoning.h"
+#include "asan_thread.h"
+
+namespace __asan {
+
+static const u64 kMagic1 = kAsanStackAfterReturnMagic;
+static const u64 kMagic2 = (kMagic1 << 8) | kMagic1;
+static const u64 kMagic4 = (kMagic2 << 16) | kMagic2;
+static const u64 kMagic8 = (kMagic4 << 32) | kMagic4;
+
+static const u64 kAllocaRedzoneSize = 32UL;
+static const u64 kAllocaRedzoneMask = 31UL;
+
+// For small size classes inline PoisonShadow for better performance.
+ALWAYS_INLINE void SetShadow(uptr ptr, uptr size, uptr class_id, u64 magic) {
+ u64 *shadow = reinterpret_cast<u64*>(MemToShadow(ptr));
+ if (SHADOW_SCALE == 3 && class_id <= 6) {
+ // This code expects SHADOW_SCALE=3.
+ for (uptr i = 0; i < (((uptr)1) << class_id); i++) {
+ shadow[i] = magic;
+ // Make sure this does not become memset.
+ SanitizerBreakOptimization(nullptr);
+ }
+ } else {
+ // The size class is too big, it's cheaper to poison only size bytes.
+ PoisonShadow(ptr, size, static_cast<u8>(magic));
+ }
+}
+
+FakeStack *FakeStack::Create(uptr stack_size_log) {
+ static uptr kMinStackSizeLog = 16;
+ static uptr kMaxStackSizeLog = FIRST_32_SECOND_64(24, 28);
+ if (stack_size_log < kMinStackSizeLog)
+ stack_size_log = kMinStackSizeLog;
+ if (stack_size_log > kMaxStackSizeLog)
+ stack_size_log = kMaxStackSizeLog;
+ uptr size = RequiredSize(stack_size_log);
+ FakeStack *res = reinterpret_cast<FakeStack *>(
+ flags()->uar_noreserve ? MmapNoReserveOrDie(size, "FakeStack")
+ : MmapOrDie(size, "FakeStack"));
+ res->stack_size_log_ = stack_size_log;
+ u8 *p = reinterpret_cast<u8 *>(res);
+ VReport(1, "T%d: FakeStack created: %p -- %p stack_size_log: %zd; "
+ "mmapped %zdK, noreserve=%d \n",
+ GetCurrentTidOrInvalid(), p,
+ p + FakeStack::RequiredSize(stack_size_log), stack_size_log,
+ size >> 10, flags()->uar_noreserve);
+ return res;
+}
+
+void FakeStack::Destroy(int tid) {
+ PoisonAll(0);
+ if (Verbosity() >= 2) {
+ InternalScopedString str(kNumberOfSizeClasses * 50);
+ for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++)
+ str.append("%zd: %zd/%zd; ", class_id, hint_position_[class_id],
+ NumberOfFrames(stack_size_log(), class_id));
+ Report("T%d: FakeStack destroyed: %s\n", tid, str.data());
+ }
+ uptr size = RequiredSize(stack_size_log_);
+ FlushUnneededASanShadowMemory(reinterpret_cast<uptr>(this), size);
+ UnmapOrDie(this, size);
+}
+
+void FakeStack::PoisonAll(u8 magic) {
+ PoisonShadow(reinterpret_cast<uptr>(this), RequiredSize(stack_size_log()),
+ magic);
+}
+
+#if !defined(_MSC_VER) || defined(__clang__)
+ALWAYS_INLINE USED
+#endif
+FakeFrame *FakeStack::Allocate(uptr stack_size_log, uptr class_id,
+ uptr real_stack) {
+ CHECK_LT(class_id, kNumberOfSizeClasses);
+ if (needs_gc_)
+ GC(real_stack);
+ uptr &hint_position = hint_position_[class_id];
+ const int num_iter = NumberOfFrames(stack_size_log, class_id);
+ u8 *flags = GetFlags(stack_size_log, class_id);
+ for (int i = 0; i < num_iter; i++) {
+ uptr pos = ModuloNumberOfFrames(stack_size_log, class_id, hint_position++);
+ // This part is tricky. On one hand, checking and setting flags[pos]
+ // should be atomic to ensure async-signal safety. But on the other hand,
+ // if the signal arrives between checking and setting flags[pos], the
+ // signal handler's fake stack will start from a different hint_position
+ // and so will not touch this particular byte. So, it is safe to do this
+ // with regular non-atomic load and store (at least I was not able to make
+ // this code crash).
+ if (flags[pos]) continue;
+ flags[pos] = 1;
+ FakeFrame *res = reinterpret_cast<FakeFrame *>(
+ GetFrame(stack_size_log, class_id, pos));
+ res->real_stack = real_stack;
+ *SavedFlagPtr(reinterpret_cast<uptr>(res), class_id) = &flags[pos];
+ return res;
+ }
+ return nullptr; // We are out of fake stack.
+}
+
+uptr FakeStack::AddrIsInFakeStack(uptr ptr, uptr *frame_beg, uptr *frame_end) {
+ uptr stack_size_log = this->stack_size_log();
+ uptr beg = reinterpret_cast<uptr>(GetFrame(stack_size_log, 0, 0));
+ uptr end = reinterpret_cast<uptr>(this) + RequiredSize(stack_size_log);
+ if (ptr < beg || ptr >= end) return 0;
+ uptr class_id = (ptr - beg) >> stack_size_log;
+ uptr base = beg + (class_id << stack_size_log);
+ CHECK_LE(base, ptr);
+ CHECK_LT(ptr, base + (((uptr)1) << stack_size_log));
+ uptr pos = (ptr - base) >> (kMinStackFrameSizeLog + class_id);
+ uptr res = base + pos * BytesInSizeClass(class_id);
+ *frame_end = res + BytesInSizeClass(class_id);
+ *frame_beg = res + sizeof(FakeFrame);
+ return res;
+}
+
+void FakeStack::HandleNoReturn() {
+ needs_gc_ = true;
+}
+
+// When throw, longjmp or some such happens we don't call OnFree() and
+// as the result may leak one or more fake frames, but the good news is that
+// we are notified about all such events by HandleNoReturn().
+// If we recently had such no-return event we need to collect garbage frames.
+// We do it based on their 'real_stack' values -- everything that is lower
+// than the current real_stack is garbage.
+NOINLINE void FakeStack::GC(uptr real_stack) {
+ uptr collected = 0;
+ for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
+ u8 *flags = GetFlags(stack_size_log(), class_id);
+ for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
+ i++) {
+ if (flags[i] == 0) continue; // not allocated.
+ FakeFrame *ff = reinterpret_cast<FakeFrame *>(
+ GetFrame(stack_size_log(), class_id, i));
+ if (ff->real_stack < real_stack) {
+ flags[i] = 0;
+ collected++;
+ }
+ }
+ }
+ needs_gc_ = false;
+}
+
+void FakeStack::ForEachFakeFrame(RangeIteratorCallback callback, void *arg) {
+ for (uptr class_id = 0; class_id < kNumberOfSizeClasses; class_id++) {
+ u8 *flags = GetFlags(stack_size_log(), class_id);
+ for (uptr i = 0, n = NumberOfFrames(stack_size_log(), class_id); i < n;
+ i++) {
+ if (flags[i] == 0) continue; // not allocated.
+ FakeFrame *ff = reinterpret_cast<FakeFrame *>(
+ GetFrame(stack_size_log(), class_id, i));
+ uptr begin = reinterpret_cast<uptr>(ff);
+ callback(begin, begin + FakeStack::BytesInSizeClass(class_id), arg);
+ }
+ }
+}
+
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
+static THREADLOCAL FakeStack *fake_stack_tls;
+
+FakeStack *GetTLSFakeStack() {
+ return fake_stack_tls;
+}
+void SetTLSFakeStack(FakeStack *fs) {
+ fake_stack_tls = fs;
+}
+#else
+FakeStack *GetTLSFakeStack() { return 0; }
+void SetTLSFakeStack(FakeStack *fs) { }
+#endif // (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_FUCHSIA
+
+static FakeStack *GetFakeStack() {
+ AsanThread *t = GetCurrentThread();
+ if (!t) return nullptr;
+ return t->fake_stack();
+}
+
+static FakeStack *GetFakeStackFast() {
+ if (FakeStack *fs = GetTLSFakeStack())
+ return fs;
+ if (!__asan_option_detect_stack_use_after_return)
+ return nullptr;
+ return GetFakeStack();
+}
+
+ALWAYS_INLINE uptr OnMalloc(uptr class_id, uptr size) {
+ FakeStack *fs = GetFakeStackFast();
+ if (!fs) return 0;
+ uptr local_stack;
+ uptr real_stack = reinterpret_cast<uptr>(&local_stack);
+ FakeFrame *ff = fs->Allocate(fs->stack_size_log(), class_id, real_stack);
+ if (!ff) return 0; // Out of fake stack.
+ uptr ptr = reinterpret_cast<uptr>(ff);
+ SetShadow(ptr, size, class_id, 0);
+ return ptr;
+}
+
+ALWAYS_INLINE void OnFree(uptr ptr, uptr class_id, uptr size) {
+ FakeStack::Deallocate(ptr, class_id);
+ SetShadow(ptr, size, class_id, kMagic8);
+}
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan;
+#define DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(class_id) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr \
+ __asan_stack_malloc_##class_id(uptr size) { \
+ return OnMalloc(class_id, size); \
+ } \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __asan_stack_free_##class_id( \
+ uptr ptr, uptr size) { \
+ OnFree(ptr, class_id, size); \
+ }
+
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(0)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(1)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(2)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(3)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(4)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(5)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(6)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(7)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(8)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(9)
+DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID(10)
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_get_current_fake_stack() { return GetFakeStackFast(); }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end) {
+ FakeStack *fs = reinterpret_cast<FakeStack*>(fake_stack);
+ if (!fs) return nullptr;
+ uptr frame_beg, frame_end;
+ FakeFrame *frame = reinterpret_cast<FakeFrame *>(fs->AddrIsInFakeStack(
+ reinterpret_cast<uptr>(addr), &frame_beg, &frame_end));
+ if (!frame) return nullptr;
+ if (frame->magic != kCurrentStackFrameMagic)
+ return nullptr;
+ if (beg) *beg = reinterpret_cast<void*>(frame_beg);
+ if (end) *end = reinterpret_cast<void*>(frame_end);
+ return reinterpret_cast<void*>(frame->real_stack);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_alloca_poison(uptr addr, uptr size) {
+ uptr LeftRedzoneAddr = addr - kAllocaRedzoneSize;
+ uptr PartialRzAddr = addr + size;
+ uptr RightRzAddr = (PartialRzAddr + kAllocaRedzoneMask) & ~kAllocaRedzoneMask;
+ uptr PartialRzAligned = PartialRzAddr & ~(SHADOW_GRANULARITY - 1);
+ FastPoisonShadow(LeftRedzoneAddr, kAllocaRedzoneSize, kAsanAllocaLeftMagic);
+ FastPoisonShadowPartialRightRedzone(
+ PartialRzAligned, PartialRzAddr % SHADOW_GRANULARITY,
+ RightRzAddr - PartialRzAligned, kAsanAllocaRightMagic);
+ FastPoisonShadow(RightRzAddr, kAllocaRedzoneSize, kAsanAllocaRightMagic);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_allocas_unpoison(uptr top, uptr bottom) {
+ if ((!top) || (top > bottom)) return;
+ REAL(memset)(reinterpret_cast<void*>(MemToShadow(top)), 0,
+ (bottom - top) / SHADOW_GRANULARITY);
+}
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fake_stack.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fake_stack.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fake_stack.h (revision 351984)
@@ -0,0 +1,175 @@
+//===-- asan_fake_stack.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_fake_stack.cc, implements FakeStack.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_FAKE_STACK_H
+#define ASAN_FAKE_STACK_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __asan {
+
+// Fake stack frame contains local variables of one function.
+struct FakeFrame {
+ uptr magic; // Modified by the instrumented code.
+ uptr descr; // Modified by the instrumented code.
+ uptr pc; // Modified by the instrumented code.
+ uptr real_stack;
+};
+
+// For each thread we create a fake stack and place stack objects on this fake
+// stack instead of the real stack. The fake stack is not really a stack but
+// a fast malloc-like allocator so that when a function exits the fake stack
+// is not popped but remains there for quite some time until gets used again.
+// So, we poison the objects on the fake stack when function returns.
+// It helps us find use-after-return bugs.
+//
+// The FakeStack objects is allocated by a single mmap call and has no other
+// pointers. The size of the fake stack depends on the actual thread stack size
+// and thus can not be a constant.
+// stack_size is a power of two greater or equal to the thread's stack size;
+// we store it as its logarithm (stack_size_log).
+// FakeStack has kNumberOfSizeClasses (11) size classes, each size class
+// is a power of two, starting from 64 bytes. Each size class occupies
+// stack_size bytes and thus can allocate
+// NumberOfFrames=(stack_size/BytesInSizeClass) fake frames (also a power of 2).
+// For each size class we have NumberOfFrames allocation flags,
+// each flag indicates whether the given frame is currently allocated.
+// All flags for size classes 0 .. 10 are stored in a single contiguous region
+// followed by another contiguous region which contains the actual memory for
+// size classes. The addresses are computed by GetFlags and GetFrame without
+// any memory accesses solely based on 'this' and stack_size_log.
+// Allocate() flips the appropriate allocation flag atomically, thus achieving
+// async-signal safety.
+// This allocator does not have quarantine per se, but it tries to allocate the
+// frames in round robin fashion to maximize the delay between a deallocation
+// and the next allocation.
+class FakeStack {
+ static const uptr kMinStackFrameSizeLog = 6; // Min frame is 64B.
+ static const uptr kMaxStackFrameSizeLog = 16; // Max stack frame is 64K.
+
+ public:
+ static const uptr kNumberOfSizeClasses =
+ kMaxStackFrameSizeLog - kMinStackFrameSizeLog + 1;
+
+ // CTOR: create the FakeStack as a single mmap-ed object.
+ static FakeStack *Create(uptr stack_size_log);
+
+ void Destroy(int tid);
+
+ // stack_size_log is at least 15 (stack_size >= 32K).
+ static uptr SizeRequiredForFlags(uptr stack_size_log) {
+ return ((uptr)1) << (stack_size_log + 1 - kMinStackFrameSizeLog);
+ }
+
+ // Each size class occupies stack_size bytes.
+ static uptr SizeRequiredForFrames(uptr stack_size_log) {
+ return (((uptr)1) << stack_size_log) * kNumberOfSizeClasses;
+ }
+
+ // Number of bytes requires for the whole object.
+ static uptr RequiredSize(uptr stack_size_log) {
+ return kFlagsOffset + SizeRequiredForFlags(stack_size_log) +
+ SizeRequiredForFrames(stack_size_log);
+ }
+
+ // Offset of the given flag from the first flag.
+ // The flags for class 0 begin at offset 000000000
+ // The flags for class 1 begin at offset 100000000
+ // ....................2................ 110000000
+ // ....................3................ 111000000
+ // and so on.
+ static uptr FlagsOffset(uptr stack_size_log, uptr class_id) {
+ uptr t = kNumberOfSizeClasses - 1 - class_id;
+ const uptr all_ones = (((uptr)1) << (kNumberOfSizeClasses - 1)) - 1;
+ return ((all_ones >> t) << t) << (stack_size_log - 15);
+ }
+
+ static uptr NumberOfFrames(uptr stack_size_log, uptr class_id) {
+ return ((uptr)1) << (stack_size_log - kMinStackFrameSizeLog - class_id);
+ }
+
+ // Divide n by the number of frames in size class.
+ static uptr ModuloNumberOfFrames(uptr stack_size_log, uptr class_id, uptr n) {
+ return n & (NumberOfFrames(stack_size_log, class_id) - 1);
+ }
+
+ // The pointer to the flags of the given class_id.
+ u8 *GetFlags(uptr stack_size_log, uptr class_id) {
+ return reinterpret_cast<u8 *>(this) + kFlagsOffset +
+ FlagsOffset(stack_size_log, class_id);
+ }
+
+ // Get frame by class_id and pos.
+ u8 *GetFrame(uptr stack_size_log, uptr class_id, uptr pos) {
+ return reinterpret_cast<u8 *>(this) + kFlagsOffset +
+ SizeRequiredForFlags(stack_size_log) +
+ (((uptr)1) << stack_size_log) * class_id +
+ BytesInSizeClass(class_id) * pos;
+ }
+
+ // Allocate the fake frame.
+ FakeFrame *Allocate(uptr stack_size_log, uptr class_id, uptr real_stack);
+
+ // Deallocate the fake frame: read the saved flag address and write 0 there.
+ static void Deallocate(uptr x, uptr class_id) {
+ **SavedFlagPtr(x, class_id) = 0;
+ }
+
+ // Poison the entire FakeStack's shadow with the magic value.
+ void PoisonAll(u8 magic);
+
+ // Return the beginning of the FakeFrame or 0 if the address is not ours.
+ uptr AddrIsInFakeStack(uptr addr, uptr *frame_beg, uptr *frame_end);
+ USED uptr AddrIsInFakeStack(uptr addr) {
+ uptr t1, t2;
+ return AddrIsInFakeStack(addr, &t1, &t2);
+ }
+
+ // Number of bytes in a fake frame of this size class.
+ static uptr BytesInSizeClass(uptr class_id) {
+ return ((uptr)1) << (class_id + kMinStackFrameSizeLog);
+ }
+
+ // The fake frame is guaranteed to have a right redzone.
+ // We use the last word of that redzone to store the address of the flag
+ // that corresponds to the current frame to make faster deallocation.
+ static u8 **SavedFlagPtr(uptr x, uptr class_id) {
+ return reinterpret_cast<u8 **>(x + BytesInSizeClass(class_id) - sizeof(x));
+ }
+
+ uptr stack_size_log() const { return stack_size_log_; }
+
+ void HandleNoReturn();
+ void GC(uptr real_stack);
+
+ void ForEachFakeFrame(RangeIteratorCallback callback, void *arg);
+
+ private:
+ FakeStack() { }
+ static const uptr kFlagsOffset = 4096; // This is were the flags begin.
+ // Must match the number of uses of DEFINE_STACK_MALLOC_FREE_WITH_CLASS_ID
+ COMPILER_CHECK(kNumberOfSizeClasses == 11);
+ static const uptr kMaxStackMallocSize = ((uptr)1) << kMaxStackFrameSizeLog;
+
+ uptr hint_position_[kNumberOfSizeClasses];
+ uptr stack_size_log_;
+ // a bit is set if something was allocated from the corresponding size class.
+ bool needs_gc_;
+};
+
+FakeStack *GetTLSFakeStack();
+void SetTLSFakeStack(FakeStack *fs);
+
+} // namespace __asan
+
+#endif // ASAN_FAKE_STACK_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.cc (revision 351984)
@@ -0,0 +1,214 @@
+//===-- asan_flags.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "asan_activation.h"
+#include "asan_flags.h"
+#include "asan_interface_internal.h"
+#include "asan_stack.h"
+#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "ubsan/ubsan_flags.h"
+#include "ubsan/ubsan_platform.h"
+
+namespace __asan {
+
+Flags asan_flags_dont_use_directly; // use via flags().
+
+static const char *MaybeCallAsanDefaultOptions() {
+ return (&__asan_default_options) ? __asan_default_options() : "";
+}
+
+static const char *MaybeUseAsanDefaultOptionsCompileDefinition() {
+#ifdef ASAN_DEFAULT_OPTIONS
+ return SANITIZER_STRINGIFY(ASAN_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+void Flags::SetDefaults() {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+static void RegisterAsanFlags(FlagParser *parser, Flags *f) {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+}
+
+void InitializeFlags() {
+ // Set the default values and prepare for parsing ASan and common flags.
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.detect_leaks = cf.detect_leaks && CAN_SANITIZE_LEAKS;
+ cf.external_symbolizer_path = GetEnv("ASAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = kDefaultMallocContextSize;
+ cf.intercept_tls_get_addr = true;
+ cf.exitcode = 1;
+ OverrideCommonFlags(cf);
+ }
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser asan_parser;
+ RegisterAsanFlags(&asan_parser, f);
+ RegisterCommonFlags(&asan_parser);
+
+ // Set the default values and prepare for parsing LSan and UBSan flags
+ // (which can also overwrite common flags).
+#if CAN_SANITIZE_LEAKS
+ __lsan::Flags *lf = __lsan::flags();
+ lf->SetDefaults();
+
+ FlagParser lsan_parser;
+ __lsan::RegisterLsanFlags(&lsan_parser, lf);
+ RegisterCommonFlags(&lsan_parser);
+#endif
+
+#if CAN_SANITIZE_UB
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+#endif
+
+ if (SANITIZER_MAC) {
+ // Support macOS MallocScribble and MallocPreScribble:
+ // <https://developer.apple.com/library/content/documentation/Performance/
+ // Conceptual/ManagingMemory/Articles/MallocDebug.html>
+ if (GetEnv("MallocScribble")) {
+ f->max_free_fill_size = 0x1000;
+ }
+ if (GetEnv("MallocPreScribble")) {
+ f->malloc_fill_byte = 0xaa;
+ }
+ }
+
+ // Override from ASan compile definition.
+ const char *asan_compile_def = MaybeUseAsanDefaultOptionsCompileDefinition();
+ asan_parser.ParseString(asan_compile_def);
+
+ // Override from user-specified string.
+ const char *asan_default_options = MaybeCallAsanDefaultOptions();
+ asan_parser.ParseString(asan_default_options);
+#if CAN_SANITIZE_UB
+ const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ ubsan_parser.ParseString(ubsan_default_options);
+#endif
+#if CAN_SANITIZE_LEAKS
+ const char *lsan_default_options = __lsan::MaybeCallLsanDefaultOptions();
+ lsan_parser.ParseString(lsan_default_options);
+#endif
+
+ // Override from command line.
+ asan_parser.ParseStringFromEnv("ASAN_OPTIONS");
+#if CAN_SANITIZE_LEAKS
+ lsan_parser.ParseStringFromEnv("LSAN_OPTIONS");
+#endif
+#if CAN_SANITIZE_UB
+ ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
+#endif
+
+ InitializeCommonFlags();
+
+ // TODO(eugenis): dump all flags at verbosity>=2?
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) {
+ // TODO(samsonov): print all of the flags (ASan, LSan, common).
+ asan_parser.PrintFlagDescriptions();
+ }
+
+ // Flag validation:
+ if (!CAN_SANITIZE_LEAKS && common_flags()->detect_leaks) {
+ Report("%s: detect_leaks is not supported on this platform.\n",
+ SanitizerToolName);
+ Die();
+ }
+ // Ensure that redzone is at least SHADOW_GRANULARITY.
+ if (f->redzone < (int)SHADOW_GRANULARITY)
+ f->redzone = SHADOW_GRANULARITY;
+ // Make "strict_init_order" imply "check_initialization_order".
+ // TODO(samsonov): Use a single runtime flag for an init-order checker.
+ if (f->strict_init_order) {
+ f->check_initialization_order = true;
+ }
+ CHECK_LE((uptr)common_flags()->malloc_context_size, kStackTraceMax);
+ CHECK_LE(f->min_uar_stack_size_log, f->max_uar_stack_size_log);
+ CHECK_GE(f->redzone, 16);
+ CHECK_GE(f->max_redzone, f->redzone);
+ CHECK_LE(f->max_redzone, 2048);
+ CHECK(IsPowerOfTwo(f->redzone));
+ CHECK(IsPowerOfTwo(f->max_redzone));
+ if (SANITIZER_RTEMS) {
+ CHECK(!f->unmap_shadow_on_exit);
+ CHECK(!f->protect_shadow_gap);
+ }
+
+ // quarantine_size is deprecated but we still honor it.
+ // quarantine_size can not be used together with quarantine_size_mb.
+ if (f->quarantine_size >= 0 && f->quarantine_size_mb >= 0) {
+ Report("%s: please use either 'quarantine_size' (deprecated) or "
+ "quarantine_size_mb, but not both\n", SanitizerToolName);
+ Die();
+ }
+ if (f->quarantine_size >= 0)
+ f->quarantine_size_mb = f->quarantine_size >> 20;
+ if (f->quarantine_size_mb < 0) {
+ const int kDefaultQuarantineSizeMb =
+ (ASAN_LOW_MEMORY) ? 1UL << 4 : 1UL << 8;
+ f->quarantine_size_mb = kDefaultQuarantineSizeMb;
+ }
+ if (f->thread_local_quarantine_size_kb < 0) {
+ const u32 kDefaultThreadLocalQuarantineSizeKb =
+ // It is not advised to go lower than 64Kb, otherwise quarantine batches
+ // pushed from thread local quarantine to global one will create too
+ // much overhead. One quarantine batch size is 8Kb and it holds up to
+ // 1021 chunk, which amounts to 1/8 memory overhead per batch when
+ // thread local quarantine is set to 64Kb.
+ (ASAN_LOW_MEMORY) ? 1 << 6 : FIRST_32_SECOND_64(1 << 8, 1 << 10);
+ f->thread_local_quarantine_size_kb = kDefaultThreadLocalQuarantineSizeKb;
+ }
+ if (f->thread_local_quarantine_size_kb == 0 && f->quarantine_size_mb > 0) {
+ Report("%s: thread_local_quarantine_size_kb can be set to 0 only when "
+ "quarantine_size_mb is set to 0\n", SanitizerToolName);
+ Die();
+ }
+ if (!f->replace_str && common_flags()->intercept_strlen) {
+ Report("WARNING: strlen interceptor is enabled even though replace_str=0. "
+ "Use intercept_strlen=0 to disable it.");
+ }
+ if (!f->replace_str && common_flags()->intercept_strchr) {
+ Report("WARNING: strchr* interceptors are enabled even though "
+ "replace_str=0. Use intercept_strchr=0 to disable them.");
+ }
+ if (!f->replace_str && common_flags()->intercept_strndup) {
+ Report("WARNING: strndup* interceptors are enabled even though "
+ "replace_str=0. Use intercept_strndup=0 to disable them.");
+ }
+}
+
+} // namespace __asan
+
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __asan_default_options, void) {
+ return "";
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.h (revision 351984)
@@ -0,0 +1,48 @@
+//===-- asan_flags.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan runtime flags.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_FLAGS_H
+#define ASAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+// ASan flag values can be defined in four ways:
+// 1) initialized with default values at startup.
+// 2) overriden during compilation of ASan runtime by providing
+// compile definition ASAN_DEFAULT_OPTIONS.
+// 3) overriden from string returned by user-specified function
+// __asan_default_options().
+// 4) overriden from env variable ASAN_OPTIONS.
+// 5) overriden during ASan activation (for now used on Android only).
+
+namespace __asan {
+
+struct Flags {
+#define ASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "asan_flags.inc"
+#undef ASAN_FLAG
+
+ void SetDefaults();
+};
+
+extern Flags asan_flags_dont_use_directly;
+inline Flags *flags() {
+ return &asan_flags_dont_use_directly;
+}
+
+void InitializeFlags();
+
+} // namespace __asan
+
+#endif // ASAN_FLAGS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.inc (revision 351984)
@@ -0,0 +1,162 @@
+//===-- asan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ASan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_FLAG
+# error "Define ASAN_FLAG prior to including this file!"
+#endif
+
+// ASAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+ASAN_FLAG(int, quarantine_size, -1,
+ "Deprecated, please use quarantine_size_mb.")
+ASAN_FLAG(int, quarantine_size_mb, -1,
+ "Size (in Mb) of quarantine used to detect use-after-free "
+ "errors. Lower value may reduce memory usage but increase the "
+ "chance of false negatives.")
+ASAN_FLAG(int, thread_local_quarantine_size_kb, -1,
+ "Size (in Kb) of thread local quarantine used to detect "
+ "use-after-free errors. Lower value may reduce memory usage but "
+ "increase the chance of false negatives. It is not advised to go "
+ "lower than 64Kb, otherwise frequent transfers to global quarantine "
+ "might affect performance.")
+ASAN_FLAG(int, redzone, 16,
+ "Minimal size (in bytes) of redzones around heap objects. "
+ "Requirement: redzone >= 16, is a power of two.")
+ASAN_FLAG(int, max_redzone, 2048,
+ "Maximal size (in bytes) of redzones around heap objects.")
+ASAN_FLAG(
+ bool, debug, false,
+ "If set, prints some debugging information and does additional checks.")
+ASAN_FLAG(
+ int, report_globals, 1,
+ "Controls the way to handle globals (0 - don't detect buffer overflow on "
+ "globals, 1 - detect buffer overflow, 2 - print data about registered "
+ "globals).")
+ASAN_FLAG(bool, check_initialization_order, false,
+ "If set, attempts to catch initialization order issues.")
+ASAN_FLAG(
+ bool, replace_str, true,
+ "If set, uses custom wrappers and replacements for libc string functions "
+ "to find more errors.")
+ASAN_FLAG(bool, replace_intrin, true,
+ "If set, uses custom wrappers for memset/memcpy/memmove intrinsics.")
+ASAN_FLAG(bool, detect_stack_use_after_return, false,
+ "Enables stack-use-after-return checking at run-time.")
+ASAN_FLAG(int, min_uar_stack_size_log, 16, // We can't do smaller anyway.
+ "Minimum fake stack size log.")
+ASAN_FLAG(int, max_uar_stack_size_log,
+ 20, // 1Mb per size class, i.e. ~11Mb per thread
+ "Maximum fake stack size log.")
+ASAN_FLAG(bool, uar_noreserve, false,
+ "Use mmap with 'noreserve' flag to allocate fake stack.")
+ASAN_FLAG(
+ int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
+ "ASan allocator flag. max_malloc_fill_size is the maximal amount of "
+ "bytes that will be filled with malloc_fill_byte on malloc.")
+ASAN_FLAG(
+ int, max_free_fill_size, 0,
+ "ASan allocator flag. max_free_fill_size is the maximal amount of "
+ "bytes that will be filled with free_fill_byte during free.")
+ASAN_FLAG(int, malloc_fill_byte, 0xbe,
+ "Value used to fill the newly allocated memory.")
+ASAN_FLAG(int, free_fill_byte, 0x55,
+ "Value used to fill deallocated memory.")
+ASAN_FLAG(bool, allow_user_poisoning, true,
+ "If set, user may manually mark memory regions as poisoned or "
+ "unpoisoned.")
+ASAN_FLAG(
+ int, sleep_before_dying, 0,
+ "Number of seconds to sleep between printing an error report and "
+ "terminating the program. Useful for debugging purposes (e.g. when one "
+ "needs to attach gdb).")
+ASAN_FLAG(
+ int, sleep_after_init, 0,
+ "Number of seconds to sleep after AddressSanitizer is initialized. "
+ "Useful for debugging purposes (e.g. when one needs to attach gdb).")
+ASAN_FLAG(bool, check_malloc_usable_size, true,
+ "Allows the users to work around the bug in Nvidia drivers prior to "
+ "295.*.")
+ASAN_FLAG(bool, unmap_shadow_on_exit, false,
+ "If set, explicitly unmaps the (huge) shadow at exit.")
+ASAN_FLAG(bool, protect_shadow_gap, !SANITIZER_RTEMS,
+ "If set, mprotect the shadow gap")
+ASAN_FLAG(bool, print_stats, false,
+ "Print various statistics after printing an error message or if "
+ "atexit=1.")
+ASAN_FLAG(bool, print_legend, true, "Print the legend for the shadow bytes.")
+ASAN_FLAG(bool, print_scariness, false,
+ "Print the scariness score. Experimental.")
+ASAN_FLAG(bool, atexit, false,
+ "If set, prints ASan exit stats even after program terminates "
+ "successfully.")
+ASAN_FLAG(
+ bool, print_full_thread_history, true,
+ "If set, prints thread creation stacks for the threads involved in the "
+ "report and their ancestors up to the main thread.")
+ASAN_FLAG(
+ bool, poison_heap, true,
+ "Poison (or not) the heap memory on [de]allocation. Zero value is useful "
+ "for benchmarking the allocator or instrumentator.")
+ASAN_FLAG(bool, poison_partial, true,
+ "If true, poison partially addressable 8-byte aligned words "
+ "(default=true). This flag affects heap and global buffers, but not "
+ "stack buffers.")
+ASAN_FLAG(bool, poison_array_cookie, true,
+ "Poison (or not) the array cookie after operator new[].")
+
+// Turn off alloc/dealloc mismatch checker on Mac and Windows for now.
+// https://github.com/google/sanitizers/issues/131
+// https://github.com/google/sanitizers/issues/309
+// TODO(glider,timurrrr): Fix known issues and enable this back.
+ASAN_FLAG(bool, alloc_dealloc_mismatch,
+ !SANITIZER_MAC && !SANITIZER_WINDOWS && !SANITIZER_ANDROID,
+ "Report errors on malloc/delete, new/free, new/delete[], etc.")
+
+ASAN_FLAG(bool, new_delete_type_mismatch, true,
+ "Report errors on mismatch between size of new and delete.")
+ASAN_FLAG(
+ bool, strict_init_order, false,
+ "If true, assume that dynamic initializers can never access globals from "
+ "other modules, even if the latter are already initialized.")
+ASAN_FLAG(
+ bool, start_deactivated, false,
+ "If true, ASan tweaks a bunch of other flags (quarantine, redzone, heap "
+ "poisoning) to reduce memory consumption as much as possible, and "
+ "restores them to original values when the first instrumented module is "
+ "loaded into the process. This is mainly intended to be used on "
+ "Android. ")
+ASAN_FLAG(
+ int, detect_invalid_pointer_pairs, 0,
+ "If >= 2, detect operations like <, <=, >, >= and - on invalid pointer "
+ "pairs (e.g. when pointers belong to different objects); "
+ "If == 1, detect invalid operations only when both pointers are non-null.")
+ASAN_FLAG(
+ bool, detect_container_overflow, true,
+ "If true, honor the container overflow annotations. See "
+ "https://github.com/google/sanitizers/wiki/AddressSanitizerContainerOverflow")
+ASAN_FLAG(int, detect_odr_violation, 2,
+ "If >=2, detect violation of One-Definition-Rule (ODR); "
+ "If ==1, detect ODR-violation only if the two variables "
+ "have different sizes")
+ASAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+ASAN_FLAG(bool, halt_on_error, true,
+ "Crash the program after printing the first error report "
+ "(WARNING: USE AT YOUR OWN RISK!)")
+ASAN_FLAG(bool, allocator_frees_and_returns_null_on_realloc_zero, true,
+ "realloc(p, 0) is equivalent to free(p) by default (Same as the "
+ "POSIX standard). If set to false, realloc(p, 0) will return a "
+ "pointer to an allocated space which can not be used.")
+ASAN_FLAG(bool, verify_asan_link_order, true,
+ "Check position of ASan runtime in library list (needs to be disabled"
+ " when other library has to be preloaded system-wide)")
+ASAN_FLAG(bool, windows_hook_rtl_allocators, false,
+ "(Windows only) enable hooking of Rtl(Allocate|Free|Size|ReAllocate)Heap.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fuchsia.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fuchsia.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fuchsia.cc (revision 351984)
@@ -0,0 +1,224 @@
+//===-- asan_fuchsia.cc --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Fuchsia-specific details.
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_fuchsia.h"
+#if SANITIZER_FUCHSIA
+
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+
+#include <limits.h>
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls.h>
+#include <zircon/threads.h>
+
+namespace __asan {
+
+// The system already set up the shadow memory for us.
+// __sanitizer::GetMaxUserVirtualAddress has already been called by
+// AsanInitInternal->InitializeHighMemEnd (asan_rtl.cc).
+// Just do some additional sanity checks here.
+void InitializeShadowMemory() {
+ if (Verbosity()) PrintAddressSpaceLayout();
+
+ // Make sure SHADOW_OFFSET doesn't use __asan_shadow_memory_dynamic_address.
+ __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
+ DCHECK(kLowShadowBeg != kDefaultShadowSentinel);
+ __asan_shadow_memory_dynamic_address = kLowShadowBeg;
+
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
+ CHECK_EQ(kHighMemEnd, __sanitizer::ShadowBounds.memory_limit - 1);
+ CHECK_EQ(kHighMemBeg, __sanitizer::ShadowBounds.shadow_limit);
+ CHECK_EQ(kHighShadowBeg, __sanitizer::ShadowBounds.shadow_base);
+ CHECK_EQ(kShadowGapEnd, __sanitizer::ShadowBounds.shadow_base - 1);
+ CHECK_EQ(kLowShadowEnd, 0);
+ CHECK_EQ(kLowShadowBeg, 0);
+}
+
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
+ UNIMPLEMENTED();
+}
+
+void AsanCheckDynamicRTPrereqs() {}
+void AsanCheckIncompatibleRT() {}
+void InitializeAsanInterceptors() {}
+
+void *AsanDoesNotSupportStaticLinkage() { return nullptr; }
+
+void InitializePlatformExceptionHandlers() {}
+void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ UNIMPLEMENTED();
+}
+
+// We can use a plain thread_local variable for TSD.
+static thread_local void *per_thread;
+
+void *AsanTSDGet() { return per_thread; }
+
+void AsanTSDSet(void *tsd) { per_thread = tsd; }
+
+// There's no initialization needed, and the passed-in destructor
+// will never be called. Instead, our own thread destruction hook
+// (below) will call AsanThread::TSDDtor directly.
+void AsanTSDInit(void (*destructor)(void *tsd)) {
+ DCHECK(destructor == &PlatformTSDDtor);
+}
+
+void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
+
+static inline size_t AsanThreadMmapSize() {
+ return RoundUpTo(sizeof(AsanThread), PAGE_SIZE);
+}
+
+struct AsanThread::InitOptions {
+ uptr stack_bottom, stack_size;
+};
+
+// Shared setup between thread creation and startup for the initial thread.
+static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
+ uptr user_id, bool detached,
+ const char *name, uptr stack_bottom,
+ uptr stack_size) {
+ // In lieu of AsanThread::Create.
+ AsanThread *thread = (AsanThread *)MmapOrDie(AsanThreadMmapSize(), __func__);
+
+ AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
+ u32 tid =
+ asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
+ asanThreadRegistry().SetThreadName(tid, name);
+
+ // On other systems, AsanThread::Init() is called from the new
+ // thread itself. But on Fuchsia we already know the stack address
+ // range beforehand, so we can do most of the setup right now.
+ const AsanThread::InitOptions options = {stack_bottom, stack_size};
+ thread->Init(&options);
+
+ return thread;
+}
+
+// This gets the same arguments passed to Init by CreateAsanThread, above.
+// We're in the creator thread before the new thread is actually started,
+// but its stack address range is already known. We don't bother tracking
+// the static TLS address range because the system itself already uses an
+// ASan-aware allocator for that.
+void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) {
+ DCHECK_NE(GetCurrentThread(), this);
+ DCHECK_NE(GetCurrentThread(), nullptr);
+ CHECK_NE(options->stack_bottom, 0);
+ CHECK_NE(options->stack_size, 0);
+ stack_bottom_ = options->stack_bottom;
+ stack_top_ = options->stack_bottom + options->stack_size;
+}
+
+// Called by __asan::AsanInitInternal (asan_rtl.c).
+AsanThread *CreateMainThread() {
+ thrd_t self = thrd_current();
+ char name[ZX_MAX_NAME_LEN];
+ CHECK_NE(__sanitizer::MainThreadStackBase, 0);
+ CHECK_GT(__sanitizer::MainThreadStackSize, 0);
+ AsanThread *t = CreateAsanThread(
+ nullptr, 0, reinterpret_cast<uptr>(self), true,
+ _zx_object_get_property(thrd_get_zx_handle(self), ZX_PROP_NAME, name,
+ sizeof(name)) == ZX_OK
+ ? name
+ : nullptr,
+ __sanitizer::MainThreadStackBase, __sanitizer::MainThreadStackSize);
+ SetCurrentThread(t);
+ return t;
+}
+
+// This is called before each thread creation is attempted. So, in
+// its first call, the calling thread is the initial and sole thread.
+static void *BeforeThreadCreateHook(uptr user_id, bool detached,
+ const char *name, uptr stack_bottom,
+ uptr stack_size) {
+ EnsureMainThreadIDIsCorrect();
+ // Strict init-order checking is thread-hostile.
+ if (flags()->strict_init_order) StopInitOrderChecking();
+
+ GET_STACK_TRACE_THREAD;
+ u32 parent_tid = GetCurrentTidOrInvalid();
+
+ return CreateAsanThread(&stack, parent_tid, user_id, detached, name,
+ stack_bottom, stack_size);
+}
+
+// This is called after creating a new thread (in the creating thread),
+// with the pointer returned by BeforeThreadCreateHook (above).
+static void ThreadCreateHook(void *hook, bool aborted) {
+ AsanThread *thread = static_cast<AsanThread *>(hook);
+ if (!aborted) {
+ // The thread was created successfully.
+ // ThreadStartHook is already running in the new thread.
+ } else {
+ // The thread wasn't created after all.
+ // Clean up everything we set up in BeforeThreadCreateHook.
+ asanThreadRegistry().FinishThread(thread->tid());
+ UnmapOrDie(thread, AsanThreadMmapSize());
+ }
+}
+
+// This is called in the newly-created thread before it runs anything else,
+// with the pointer returned by BeforeThreadCreateHook (above).
+// cf. asan_interceptors.cc:asan_thread_start
+static void ThreadStartHook(void *hook, uptr os_id) {
+ AsanThread *thread = static_cast<AsanThread *>(hook);
+ SetCurrentThread(thread);
+
+ // In lieu of AsanThread::ThreadStart.
+ asanThreadRegistry().StartThread(thread->tid(), os_id, ThreadType::Regular,
+ nullptr);
+}
+
+// Each thread runs this just before it exits,
+// with the pointer returned by BeforeThreadCreateHook (above).
+// All per-thread destructors have already been called.
+static void ThreadExitHook(void *hook, uptr os_id) {
+ AsanThread::TSDDtor(per_thread);
+}
+
+bool HandleDlopenInit() {
+ // Not supported on this platform.
+ static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN,
+ "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false");
+ return false;
+}
+
+} // namespace __asan
+
+// These are declared (in extern "C") by <zircon/sanitizer.h>.
+// The system runtime will call our definitions directly.
+
+void *__sanitizer_before_thread_create_hook(thrd_t thread, bool detached,
+ const char *name, void *stack_base,
+ size_t stack_size) {
+ return __asan::BeforeThreadCreateHook(
+ reinterpret_cast<uptr>(thread), detached, name,
+ reinterpret_cast<uptr>(stack_base), stack_size);
+}
+
+void __sanitizer_thread_create_hook(void *hook, thrd_t thread, int error) {
+ __asan::ThreadCreateHook(hook, error != thrd_success);
+}
+
+void __sanitizer_thread_start_hook(void *hook, thrd_t self) {
+ __asan::ThreadStartHook(hook, reinterpret_cast<uptr>(self));
+}
+
+void __sanitizer_thread_exit_hook(void *hook, thrd_t self) {
+ __asan::ThreadExitHook(hook, reinterpret_cast<uptr>(self));
+}
+
+#endif // SANITIZER_FUCHSIA
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_fuchsia.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_globals.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_globals.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_globals.cc (revision 351984)
@@ -0,0 +1,465 @@
+//===-- asan_globals.cc ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Handle globals.
+//===----------------------------------------------------------------------===//
+
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_stats.h"
+#include "asan_suppressions.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+namespace __asan {
+
+typedef __asan_global Global;
+
+struct ListOfGlobals {
+ const Global *g;
+ ListOfGlobals *next;
+};
+
+static BlockingMutex mu_for_globals(LINKER_INITIALIZED);
+static LowLevelAllocator allocator_for_globals;
+static ListOfGlobals *list_of_all_globals;
+
+static const int kDynamicInitGlobalsInitialCapacity = 512;
+struct DynInitGlobal {
+ Global g;
+ bool initialized;
+};
+typedef InternalMmapVector<DynInitGlobal> VectorOfGlobals;
+// Lazy-initialized and never deleted.
+static VectorOfGlobals *dynamic_init_globals;
+
+// We want to remember where a certain range of globals was registered.
+struct GlobalRegistrationSite {
+ u32 stack_id;
+ Global *g_first, *g_last;
+};
+typedef InternalMmapVector<GlobalRegistrationSite> GlobalRegistrationSiteVector;
+static GlobalRegistrationSiteVector *global_registration_site_vector;
+
+ALWAYS_INLINE void PoisonShadowForGlobal(const Global *g, u8 value) {
+ FastPoisonShadow(g->beg, g->size_with_redzone, value);
+}
+
+ALWAYS_INLINE void PoisonRedZones(const Global &g) {
+ uptr aligned_size = RoundUpTo(g.size, SHADOW_GRANULARITY);
+ FastPoisonShadow(g.beg + aligned_size, g.size_with_redzone - aligned_size,
+ kAsanGlobalRedzoneMagic);
+ if (g.size != aligned_size) {
+ FastPoisonShadowPartialRightRedzone(
+ g.beg + RoundDownTo(g.size, SHADOW_GRANULARITY),
+ g.size % SHADOW_GRANULARITY,
+ SHADOW_GRANULARITY,
+ kAsanGlobalRedzoneMagic);
+ }
+}
+
+const uptr kMinimalDistanceFromAnotherGlobal = 64;
+
+static bool IsAddressNearGlobal(uptr addr, const __asan_global &g) {
+ if (addr <= g.beg - kMinimalDistanceFromAnotherGlobal) return false;
+ if (addr >= g.beg + g.size_with_redzone) return false;
+ return true;
+}
+
+static void ReportGlobal(const Global &g, const char *prefix) {
+ Report(
+ "%s Global[%p]: beg=%p size=%zu/%zu name=%s module=%s dyn_init=%zu "
+ "odr_indicator=%p\n",
+ prefix, &g, (void *)g.beg, g.size, g.size_with_redzone, g.name,
+ g.module_name, g.has_dynamic_init, (void *)g.odr_indicator);
+ if (g.location) {
+ Report(" location (%p): name=%s[%p], %d %d\n", g.location,
+ g.location->filename, g.location->filename, g.location->line_no,
+ g.location->column_no);
+ }
+}
+
+static u32 FindRegistrationSite(const Global *g) {
+ mu_for_globals.CheckLocked();
+ CHECK(global_registration_site_vector);
+ for (uptr i = 0, n = global_registration_site_vector->size(); i < n; i++) {
+ GlobalRegistrationSite &grs = (*global_registration_site_vector)[i];
+ if (g >= grs.g_first && g <= grs.g_last)
+ return grs.stack_id;
+ }
+ return 0;
+}
+
+int GetGlobalsForAddress(uptr addr, Global *globals, u32 *reg_sites,
+ int max_globals) {
+ if (!flags()->report_globals) return 0;
+ BlockingMutexLock lock(&mu_for_globals);
+ int res = 0;
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ const Global &g = *l->g;
+ if (flags()->report_globals >= 2)
+ ReportGlobal(g, "Search");
+ if (IsAddressNearGlobal(addr, g)) {
+ internal_memcpy(&globals[res], &g, sizeof(g));
+ if (reg_sites)
+ reg_sites[res] = FindRegistrationSite(&g);
+ res++;
+ if (res == max_globals)
+ break;
+ }
+ }
+ return res;
+}
+
+enum GlobalSymbolState {
+ UNREGISTERED = 0,
+ REGISTERED = 1
+};
+
+// Check ODR violation for given global G via special ODR indicator. We use
+// this method in case compiler instruments global variables through their
+// local aliases.
+static void CheckODRViolationViaIndicator(const Global *g) {
+ // Instrumentation requests to skip ODR check.
+ if (g->odr_indicator == UINTPTR_MAX)
+ return;
+ u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
+ if (*odr_indicator == UNREGISTERED) {
+ *odr_indicator = REGISTERED;
+ return;
+ }
+ // If *odr_indicator is DEFINED, some module have already registered
+ // externally visible symbol with the same name. This is an ODR violation.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->odr_indicator == l->g->odr_indicator &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+ !IsODRViolationSuppressed(g->name))
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
+ }
+}
+
+// Check ODR violation for given global G by checking if it's already poisoned.
+// We use this method in case compiler doesn't use private aliases for global
+// variables.
+static void CheckODRViolationViaPoisoning(const Global *g) {
+ if (__asan_region_is_poisoned(g->beg, g->size_with_redzone)) {
+ // This check may not be enough: if the first global is much larger
+ // the entire redzone of the second global may be within the first global.
+ for (ListOfGlobals *l = list_of_all_globals; l; l = l->next) {
+ if (g->beg == l->g->beg &&
+ (flags()->detect_odr_violation >= 2 || g->size != l->g->size) &&
+ !IsODRViolationSuppressed(g->name))
+ ReportODRViolation(g, FindRegistrationSite(g),
+ l->g, FindRegistrationSite(l->g));
+ }
+ }
+}
+
+// Clang provides two different ways for global variables protection:
+// it can poison the global itself or its private alias. In former
+// case we may poison same symbol multiple times, that can help us to
+// cheaply detect ODR violation: if we try to poison an already poisoned
+// global, we have ODR violation error.
+// In latter case, we poison each symbol exactly once, so we use special
+// indicator symbol to perform similar check.
+// In either case, compiler provides a special odr_indicator field to Global
+// structure, that can contain two kinds of values:
+// 1) Non-zero value. In this case, odr_indicator is an address of
+// corresponding indicator variable for given global.
+// 2) Zero. This means that we don't use private aliases for global variables
+// and can freely check ODR violation with the first method.
+//
+// This routine chooses between two different methods of ODR violation
+// detection.
+static inline bool UseODRIndicator(const Global *g) {
+ return g->odr_indicator > 0;
+}
+
+// Register a global variable.
+// This function may be called more than once for every global
+// so we store the globals in a map.
+static void RegisterGlobal(const Global *g) {
+ CHECK(asan_inited);
+ if (flags()->report_globals >= 2)
+ ReportGlobal(*g, "Added");
+ CHECK(flags()->report_globals);
+ CHECK(AddrIsInMem(g->beg));
+ if (!AddrIsAlignedByGranularity(g->beg)) {
+ Report("The following global variable is not properly aligned.\n");
+ Report("This may happen if another global with the same name\n");
+ Report("resides in another non-instrumented module.\n");
+ Report("Or the global comes from a C file built w/o -fno-common.\n");
+ Report("In either case this is likely an ODR violation bug,\n");
+ Report("but AddressSanitizer can not provide more details.\n");
+ ReportODRViolation(g, FindRegistrationSite(g), g, FindRegistrationSite(g));
+ CHECK(AddrIsAlignedByGranularity(g->beg));
+ }
+ CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
+ if (flags()->detect_odr_violation) {
+ // Try detecting ODR (One Definition Rule) violation, i.e. the situation
+ // where two globals with the same name are defined in different modules.
+ if (UseODRIndicator(g))
+ CheckODRViolationViaIndicator(g);
+ else
+ CheckODRViolationViaPoisoning(g);
+ }
+ if (CanPoisonMemory())
+ PoisonRedZones(*g);
+ ListOfGlobals *l = new(allocator_for_globals) ListOfGlobals;
+ l->g = g;
+ l->next = list_of_all_globals;
+ list_of_all_globals = l;
+ if (g->has_dynamic_init) {
+ if (!dynamic_init_globals) {
+ dynamic_init_globals =
+ new (allocator_for_globals) VectorOfGlobals; // NOLINT
+ dynamic_init_globals->reserve(kDynamicInitGlobalsInitialCapacity);
+ }
+ DynInitGlobal dyn_global = { *g, false };
+ dynamic_init_globals->push_back(dyn_global);
+ }
+}
+
+static void UnregisterGlobal(const Global *g) {
+ CHECK(asan_inited);
+ if (flags()->report_globals >= 2)
+ ReportGlobal(*g, "Removed");
+ CHECK(flags()->report_globals);
+ CHECK(AddrIsInMem(g->beg));
+ CHECK(AddrIsAlignedByGranularity(g->beg));
+ CHECK(AddrIsAlignedByGranularity(g->size_with_redzone));
+ if (CanPoisonMemory())
+ PoisonShadowForGlobal(g, 0);
+ // We unpoison the shadow memory for the global but we do not remove it from
+ // the list because that would require O(n^2) time with the current list
+ // implementation. It might not be worth doing anyway.
+
+ // Release ODR indicator.
+ if (UseODRIndicator(g) && g->odr_indicator != UINTPTR_MAX) {
+ u8 *odr_indicator = reinterpret_cast<u8 *>(g->odr_indicator);
+ *odr_indicator = UNREGISTERED;
+ }
+}
+
+void StopInitOrderChecking() {
+ BlockingMutexLock lock(&mu_for_globals);
+ if (!flags()->check_initialization_order || !dynamic_init_globals)
+ return;
+ flags()->check_initialization_order = false;
+ for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
+ DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
+ const Global *g = &dyn_g.g;
+ // Unpoison the whole global.
+ PoisonShadowForGlobal(g, 0);
+ // Poison redzones back.
+ PoisonRedZones(*g);
+ }
+}
+
+static bool IsASCII(unsigned char c) { return /*0x00 <= c &&*/ c <= 0x7F; }
+
+const char *MaybeDemangleGlobalName(const char *name) {
+ // We can spoil names of globals with C linkage, so use an heuristic
+ // approach to check if the name should be demangled.
+ bool should_demangle = false;
+ if (name[0] == '_' && name[1] == 'Z')
+ should_demangle = true;
+ else if (SANITIZER_WINDOWS && name[0] == '\01' && name[1] == '?')
+ should_demangle = true;
+
+ return should_demangle ? Symbolizer::GetOrInit()->Demangle(name) : name;
+}
+
+// Check if the global is a zero-terminated ASCII string. If so, print it.
+void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g) {
+ for (uptr p = g.beg; p < g.beg + g.size - 1; p++) {
+ unsigned char c = *(unsigned char *)p;
+ if (c == '\0' || !IsASCII(c)) return;
+ }
+ if (*(char *)(g.beg + g.size - 1) != '\0') return;
+ str->append(" '%s' is ascii string '%s'\n", MaybeDemangleGlobalName(g.name),
+ (char *)g.beg);
+}
+
+static const char *GlobalFilename(const __asan_global &g) {
+ const char *res = g.module_name;
+ // Prefer the filename from source location, if is available.
+ if (g.location) res = g.location->filename;
+ CHECK(res);
+ return res;
+}
+
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g) {
+ str->append("%s", GlobalFilename(g));
+ if (!g.location) return;
+ if (g.location->line_no) str->append(":%d", g.location->line_no);
+ if (g.location->column_no) str->append(":%d", g.location->column_no);
+}
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+
+// Apply __asan_register_globals to all globals found in the same loaded
+// executable or shared library as `flag'. The flag tracks whether globals have
+// already been registered or not for this image.
+void __asan_register_image_globals(uptr *flag) {
+ if (*flag)
+ return;
+ AsanApplyToGlobals(__asan_register_globals, flag);
+ *flag = 1;
+}
+
+// This mirrors __asan_register_image_globals.
+void __asan_unregister_image_globals(uptr *flag) {
+ if (!*flag)
+ return;
+ AsanApplyToGlobals(__asan_unregister_globals, flag);
+ *flag = 0;
+}
+
+void __asan_register_elf_globals(uptr *flag, void *start, void *stop) {
+ if (*flag) return;
+ if (!start) return;
+ CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global));
+ __asan_global *globals_start = (__asan_global*)start;
+ __asan_global *globals_stop = (__asan_global*)stop;
+ __asan_register_globals(globals_start, globals_stop - globals_start);
+ *flag = 1;
+}
+
+void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop) {
+ if (!*flag) return;
+ if (!start) return;
+ CHECK_EQ(0, ((uptr)stop - (uptr)start) % sizeof(__asan_global));
+ __asan_global *globals_start = (__asan_global*)start;
+ __asan_global *globals_stop = (__asan_global*)stop;
+ __asan_unregister_globals(globals_start, globals_stop - globals_start);
+ *flag = 0;
+}
+
+// Register an array of globals.
+void __asan_register_globals(__asan_global *globals, uptr n) {
+ if (!flags()->report_globals) return;
+ GET_STACK_TRACE_MALLOC;
+ u32 stack_id = StackDepotPut(stack);
+ BlockingMutexLock lock(&mu_for_globals);
+ if (!global_registration_site_vector) {
+ global_registration_site_vector =
+ new (allocator_for_globals) GlobalRegistrationSiteVector; // NOLINT
+ global_registration_site_vector->reserve(128);
+ }
+ GlobalRegistrationSite site = {stack_id, &globals[0], &globals[n - 1]};
+ global_registration_site_vector->push_back(site);
+ if (flags()->report_globals >= 2) {
+ PRINT_CURRENT_STACK();
+ Printf("=== ID %d; %p %p\n", stack_id, &globals[0], &globals[n - 1]);
+ }
+ for (uptr i = 0; i < n; i++) {
+ if (SANITIZER_WINDOWS && globals[i].beg == 0) {
+ // The MSVC incremental linker may pad globals out to 256 bytes. As long
+ // as __asan_global is less than 256 bytes large and its size is a power
+ // of two, we can skip over the padding.
+ static_assert(
+ sizeof(__asan_global) < 256 &&
+ (sizeof(__asan_global) & (sizeof(__asan_global) - 1)) == 0,
+ "sizeof(__asan_global) incompatible with incremental linker padding");
+ // If these are padding bytes, the rest of the global should be zero.
+ CHECK(globals[i].size == 0 && globals[i].size_with_redzone == 0 &&
+ globals[i].name == nullptr && globals[i].module_name == nullptr &&
+ globals[i].odr_indicator == 0);
+ continue;
+ }
+ RegisterGlobal(&globals[i]);
+ }
+
+ // Poison the metadata. It should not be accessible to user code.
+ PoisonShadow(reinterpret_cast<uptr>(globals), n * sizeof(__asan_global),
+ kAsanGlobalRedzoneMagic);
+}
+
+// Unregister an array of globals.
+// We must do this when a shared objects gets dlclosed.
+void __asan_unregister_globals(__asan_global *globals, uptr n) {
+ if (!flags()->report_globals) return;
+ BlockingMutexLock lock(&mu_for_globals);
+ for (uptr i = 0; i < n; i++) {
+ if (SANITIZER_WINDOWS && globals[i].beg == 0) {
+ // Skip globals that look like padding from the MSVC incremental linker.
+ // See comment in __asan_register_globals.
+ continue;
+ }
+ UnregisterGlobal(&globals[i]);
+ }
+
+ // Unpoison the metadata.
+ PoisonShadow(reinterpret_cast<uptr>(globals), n * sizeof(__asan_global), 0);
+}
+
+// This method runs immediately prior to dynamic initialization in each TU,
+// when all dynamically initialized globals are unpoisoned. This method
+// poisons all global variables not defined in this TU, so that a dynamic
+// initializer can only touch global variables in the same TU.
+void __asan_before_dynamic_init(const char *module_name) {
+ if (!flags()->check_initialization_order ||
+ !CanPoisonMemory() ||
+ !dynamic_init_globals)
+ return;
+ bool strict_init_order = flags()->strict_init_order;
+ CHECK(module_name);
+ CHECK(asan_inited);
+ BlockingMutexLock lock(&mu_for_globals);
+ if (flags()->report_globals >= 3)
+ Printf("DynInitPoison module: %s\n", module_name);
+ for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
+ DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
+ const Global *g = &dyn_g.g;
+ if (dyn_g.initialized)
+ continue;
+ if (g->module_name != module_name)
+ PoisonShadowForGlobal(g, kAsanInitializationOrderMagic);
+ else if (!strict_init_order)
+ dyn_g.initialized = true;
+ }
+}
+
+// This method runs immediately after dynamic initialization in each TU, when
+// all dynamically initialized globals except for those defined in the current
+// TU are poisoned. It simply unpoisons all dynamically initialized globals.
+void __asan_after_dynamic_init() {
+ if (!flags()->check_initialization_order ||
+ !CanPoisonMemory() ||
+ !dynamic_init_globals)
+ return;
+ CHECK(asan_inited);
+ BlockingMutexLock lock(&mu_for_globals);
+ // FIXME: Optionally report that we're unpoisoning globals from a module.
+ for (uptr i = 0, n = dynamic_init_globals->size(); i < n; ++i) {
+ DynInitGlobal &dyn_g = (*dynamic_init_globals)[i];
+ const Global *g = &dyn_g.g;
+ if (!dyn_g.initialized) {
+ // Unpoison the whole global.
+ PoisonShadowForGlobal(g, 0);
+ // Poison redzones back.
+ PoisonRedZones(*g);
+ }
+ }
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_globals_win.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_globals_win.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_globals_win.cc (revision 351984)
@@ -0,0 +1,61 @@
+//===-- asan_globals_win.cc -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Global registration code that is linked into every Windows DLL and EXE.
+//
+//===----------------------------------------------------------------------===//
+
+#include "asan_interface_internal.h"
+#if SANITIZER_WINDOWS
+
+namespace __asan {
+
+#pragma section(".ASAN$GA", read, write) // NOLINT
+#pragma section(".ASAN$GZ", read, write) // NOLINT
+extern "C" __declspec(allocate(".ASAN$GA"))
+ ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_start = {};
+extern "C" __declspec(allocate(".ASAN$GZ"))
+ ALIGNED(sizeof(__asan_global)) __asan_global __asan_globals_end = {};
+#pragma comment(linker, "/merge:.ASAN=.data")
+
+static void call_on_globals(void (*hook)(__asan_global *, uptr)) {
+ __asan_global *start = &__asan_globals_start + 1;
+ __asan_global *end = &__asan_globals_end;
+ uptr bytediff = (uptr)end - (uptr)start;
+ if (bytediff % sizeof(__asan_global) != 0) {
+#if defined(SANITIZER_DLL_THUNK) || defined(SANITIZER_DYNAMIC_RUNTIME_THUNK)
+ __debugbreak();
+#else
+ CHECK("corrupt asan global array");
+#endif
+ }
+ // We know end >= start because the linker sorts the portion after the dollar
+ // sign alphabetically.
+ uptr n = end - start;
+ hook(start, n);
+}
+
+static void register_dso_globals() {
+ call_on_globals(&__asan_register_globals);
+}
+
+static void unregister_dso_globals() {
+ call_on_globals(&__asan_unregister_globals);
+}
+
+// Register globals
+#pragma section(".CRT$XCU", long, read) // NOLINT
+#pragma section(".CRT$XTX", long, read) // NOLINT
+extern "C" __declspec(allocate(".CRT$XCU"))
+void (*const __asan_dso_reg_hook)() = &register_dso_globals;
+extern "C" __declspec(allocate(".CRT$XTX"))
+void (*const __asan_dso_unreg_hook)() = &unregister_dso_globals;
+
+} // namespace __asan
+
+#endif // SANITIZER_WINDOWS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_globals_win.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_init_version.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_init_version.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_init_version.h (revision 351984)
@@ -0,0 +1,44 @@
+//===-- asan_init_version.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This header defines a versioned __asan_init function to be called at the
+// startup of the instrumented program.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_INIT_VERSION_H
+#define ASAN_INIT_VERSION_H
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+extern "C" {
+ // Every time the ASan ABI changes we also change the version number in the
+ // __asan_init function name. Objects built with incompatible ASan ABI
+ // versions will not link with run-time.
+ //
+ // Changes between ABI versions:
+ // v1=>v2: added 'module_name' to __asan_global
+ // v2=>v3: stack frame description (created by the compiler)
+ // contains the function PC as the 3rd field (see
+ // DescribeAddressIfStack)
+ // v3=>v4: added '__asan_global_source_location' to __asan_global
+ // v4=>v5: changed the semantics and format of __asan_stack_malloc_ and
+ // __asan_stack_free_ functions
+ // v5=>v6: changed the name of the version check symbol
+ // v6=>v7: added 'odr_indicator' to __asan_global
+ // v7=>v8: added '__asan_(un)register_image_globals' functions for dead
+ // stripping support on Mach-O platforms
+#if SANITIZER_WORDSIZE == 32 && SANITIZER_ANDROID
+ // v8=>v9: 32-bit Android switched to dynamic shadow
+ #define __asan_version_mismatch_check __asan_version_mismatch_check_v9
+#else
+ #define __asan_version_mismatch_check __asan_version_mismatch_check_v8
+#endif
+}
+
+#endif // ASAN_INIT_VERSION_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_init_version.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors.cc (revision 351984)
@@ -0,0 +1,675 @@
+//===-- asan_interceptors.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Intercept various libc functions.
+//===----------------------------------------------------------------------===//
+
+#include "asan_interceptors.h"
+#include "asan_allocator.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_stats.h"
+#include "asan_suppressions.h"
+#include "lsan/lsan_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+// There is no general interception at all on Fuchsia and RTEMS.
+// Only the functions in asan_interceptors_memintrinsics.cc are
+// really defined to replace libc functions.
+#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+
+#if SANITIZER_POSIX
+#include "sanitizer_common/sanitizer_posix.h"
+#endif
+
+#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION || \
+ ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION
+#include <unwind.h>
+#endif
+
+#if defined(__i386) && SANITIZER_LINUX
+#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.1"
+#elif defined(__mips__) && SANITIZER_LINUX
+#define ASAN_PTHREAD_CREATE_VERSION "GLIBC_2.2"
+#endif
+
+namespace __asan {
+
+#define ASAN_READ_STRING_OF_LEN(ctx, s, len, n) \
+ ASAN_READ_RANGE((ctx), (s), \
+ common_flags()->strict_string_checks ? (len) + 1 : (n))
+
+#define ASAN_READ_STRING(ctx, s, n) \
+ ASAN_READ_STRING_OF_LEN((ctx), (s), REAL(strlen)(s), (n))
+
+static inline uptr MaybeRealStrnlen(const char *s, uptr maxlen) {
+#if SANITIZER_INTERCEPT_STRNLEN
+ if (REAL(strnlen)) {
+ return REAL(strnlen)(s, maxlen);
+ }
+#endif
+ return internal_strnlen(s, maxlen);
+}
+
+void SetThreadName(const char *name) {
+ AsanThread *t = GetCurrentThread();
+ if (t)
+ asanThreadRegistry().SetThreadName(t->tid(), name);
+}
+
+int OnExit() {
+ if (CAN_SANITIZE_LEAKS && common_flags()->detect_leaks &&
+ __lsan::HasReportedLeaks()) {
+ return common_flags()->exitcode;
+ }
+ // FIXME: ask frontend whether we need to return failure.
+ return 0;
+}
+
+} // namespace __asan
+
+// ---------------------- Wrappers ---------------- {{{1
+using namespace __asan; // NOLINT
+
+DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr)
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *)
+
+#define ASAN_INTERCEPTOR_ENTER(ctx, func) \
+ AsanInterceptorContext _ctx = {#func}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx; \
+
+#define COMMON_INTERCEPT_FUNCTION(name) ASAN_INTERCEPT_FUNC(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ ASAN_INTERCEPT_FUNC_VER(name, ver)
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ ASAN_WRITE_RANGE(ctx, ptr, size)
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ ASAN_READ_RANGE(ctx, ptr, size)
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ ASAN_INTERCEPTOR_ENTER(ctx, func); \
+ do { \
+ if (asan_init_is_running) \
+ return REAL(func)(__VA_ARGS__); \
+ if (SANITIZER_MAC && UNLIKELY(!asan_inited)) \
+ return REAL(func)(__VA_ARGS__); \
+ ENSURE_ASAN_INITED(); \
+ } while (false)
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) SetThreadName(name)
+// Should be asanThreadRegistry().SetThreadNameByUserId(thread, name)
+// But asan does not remember UserId's for threads (pthread_t);
+// and remembers all ever existed threads, so the linear search by UserId
+// can be slow.
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
+// Strict init-order checking is dlopen-hostile:
+// https://github.com/google/sanitizers/issues/178
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
+ do { \
+ if (flags()->strict_init_order) \
+ StopInitOrderChecking(); \
+ CheckNoDeepBind(filename, flag); \
+ } while (false)
+#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle)
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED()
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!asan_inited)
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (AsanThread *t = GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
+
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memmove); \
+ ASAN_MEMMOVE_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memcpy); \
+ ASAN_MEMCPY_IMPL(ctx, to, from, size); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ ASAN_INTERCEPTOR_ENTER(ctx, memset); \
+ ASAN_MEMSET_IMPL(ctx, block, c, size); \
+ } while (false)
+
+#include "sanitizer_common/sanitizer_common_interceptors.inc"
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+// Syscall interceptors don't have contexts, we don't support suppressions
+// for them.
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) ASAN_READ_RANGE(nullptr, p, s)
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) ASAN_WRITE_RANGE(nullptr, p, s)
+#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+#include "sanitizer_common/sanitizer_common_syscalls.inc"
+#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
+
+struct ThreadStartParam {
+ atomic_uintptr_t t;
+ atomic_uintptr_t is_registered;
+};
+
+#if ASAN_INTERCEPT_PTHREAD_CREATE
+static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
+ ThreadStartParam *param = reinterpret_cast<ThreadStartParam *>(arg);
+ AsanThread *t = nullptr;
+ while ((t = reinterpret_cast<AsanThread *>(
+ atomic_load(&param->t, memory_order_acquire))) == nullptr)
+ internal_sched_yield();
+ SetCurrentThread(t);
+ return t->ThreadStart(GetTid(), &param->is_registered);
+}
+
+INTERCEPTOR(int, pthread_create, void *thread,
+ void *attr, void *(*start_routine)(void*), void *arg) {
+ EnsureMainThreadIDIsCorrect();
+ // Strict init-order checking is thread-hostile.
+ if (flags()->strict_init_order)
+ StopInitOrderChecking();
+ GET_STACK_TRACE_THREAD;
+ int detached = 0;
+ if (attr)
+ REAL(pthread_attr_getdetachstate)(attr, &detached);
+ ThreadStartParam param;
+ atomic_store(&param.t, 0, memory_order_relaxed);
+ atomic_store(&param.is_registered, 0, memory_order_relaxed);
+ int result;
+ {
+ // Ignore all allocations made by pthread_create: thread stack/TLS may be
+ // stored by pthread for future reuse even after thread destruction, and
+ // the linked list it's stored in doesn't even hold valid pointers to the
+ // objects, the latter are calculated by obscure pointer arithmetic.
+#if CAN_SANITIZE_LEAKS
+ __lsan::ScopedInterceptorDisabler disabler;
+#endif
+ result = REAL(pthread_create)(thread, attr, asan_thread_start, &param);
+ }
+ if (result == 0) {
+ u32 current_tid = GetCurrentTidOrInvalid();
+ AsanThread *t =
+ AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+ atomic_store(&param.t, reinterpret_cast<uptr>(t), memory_order_release);
+ // Wait until the AsanThread object is initialized and the ThreadRegistry
+ // entry is in "started" state. One reason for this is that after this
+ // interceptor exits, the child thread's stack may be the only thing holding
+ // the |arg| pointer. This may cause LSan to report a leak if leak checking
+ // happens at a point when the interceptor has already exited, but the stack
+ // range for the child thread is not yet known.
+ while (atomic_load(&param.is_registered, memory_order_acquire) == 0)
+ internal_sched_yield();
+ }
+ return result;
+}
+
+INTERCEPTOR(int, pthread_join, void *t, void **arg) {
+ return real_pthread_join(t, arg);
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
+#endif // ASAN_INTERCEPT_PTHREAD_CREATE
+
+#if ASAN_INTERCEPT_SWAPCONTEXT
+static void ClearShadowMemoryForContextStack(uptr stack, uptr ssize) {
+ // Align to page size.
+ uptr PageSize = GetPageSizeCached();
+ uptr bottom = stack & ~(PageSize - 1);
+ ssize += stack - bottom;
+ ssize = RoundUpTo(ssize, PageSize);
+ static const uptr kMaxSaneContextStackSize = 1 << 22; // 4 Mb
+ if (AddrIsInMem(bottom) && ssize && ssize <= kMaxSaneContextStackSize) {
+ PoisonShadow(bottom, ssize, 0);
+ }
+}
+
+INTERCEPTOR(int, swapcontext, struct ucontext_t *oucp,
+ struct ucontext_t *ucp) {
+ static bool reported_warning = false;
+ if (!reported_warning) {
+ Report("WARNING: ASan doesn't fully support makecontext/swapcontext "
+ "functions and may produce false positives in some cases!\n");
+ reported_warning = true;
+ }
+ // Clear shadow memory for new context (it may share stack
+ // with current context).
+ uptr stack, ssize;
+ ReadContextStack(ucp, &stack, &ssize);
+ ClearShadowMemoryForContextStack(stack, ssize);
+#if __has_attribute(__indirect_return__) && \
+ (defined(__x86_64__) || defined(__i386__))
+ int (*real_swapcontext)(struct ucontext_t *, struct ucontext_t *)
+ __attribute__((__indirect_return__))
+ = REAL(swapcontext);
+ int res = real_swapcontext(oucp, ucp);
+#else
+ int res = REAL(swapcontext)(oucp, ucp);
+#endif
+ // swapcontext technically does not return, but program may swap context to
+ // "oucp" later, that would look as if swapcontext() returned 0.
+ // We need to clear shadow for ucp once again, as it may be in arbitrary
+ // state.
+ ClearShadowMemoryForContextStack(stack, ssize);
+ return res;
+}
+#endif // ASAN_INTERCEPT_SWAPCONTEXT
+
+#if SANITIZER_NETBSD
+#define longjmp __longjmp14
+#define siglongjmp __siglongjmp14
+#endif
+
+INTERCEPTOR(void, longjmp, void *env, int val) {
+ __asan_handle_no_return();
+ REAL(longjmp)(env, val);
+}
+
+#if ASAN_INTERCEPT__LONGJMP
+INTERCEPTOR(void, _longjmp, void *env, int val) {
+ __asan_handle_no_return();
+ REAL(_longjmp)(env, val);
+}
+#endif
+
+#if ASAN_INTERCEPT___LONGJMP_CHK
+INTERCEPTOR(void, __longjmp_chk, void *env, int val) {
+ __asan_handle_no_return();
+ REAL(__longjmp_chk)(env, val);
+}
+#endif
+
+#if ASAN_INTERCEPT_SIGLONGJMP
+INTERCEPTOR(void, siglongjmp, void *env, int val) {
+ __asan_handle_no_return();
+ REAL(siglongjmp)(env, val);
+}
+#endif
+
+#if ASAN_INTERCEPT___CXA_THROW
+INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
+ CHECK(REAL(__cxa_throw));
+ __asan_handle_no_return();
+ REAL(__cxa_throw)(a, b, c);
+}
+#endif
+
+#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION
+INTERCEPTOR(void, __cxa_rethrow_primary_exception, void *a) {
+ CHECK(REAL(__cxa_rethrow_primary_exception));
+ __asan_handle_no_return();
+ REAL(__cxa_rethrow_primary_exception)(a);
+}
+#endif
+
+#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION
+INTERCEPTOR(_Unwind_Reason_Code, _Unwind_RaiseException,
+ _Unwind_Exception *object) {
+ CHECK(REAL(_Unwind_RaiseException));
+ __asan_handle_no_return();
+ return REAL(_Unwind_RaiseException)(object);
+}
+#endif
+
+#if ASAN_INTERCEPT__SJLJ_UNWIND_RAISEEXCEPTION
+INTERCEPTOR(_Unwind_Reason_Code, _Unwind_SjLj_RaiseException,
+ _Unwind_Exception *object) {
+ CHECK(REAL(_Unwind_SjLj_RaiseException));
+ __asan_handle_no_return();
+ return REAL(_Unwind_SjLj_RaiseException)(object);
+}
+#endif
+
+#if ASAN_INTERCEPT_INDEX
+# if ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
+INTERCEPTOR(char*, index, const char *string, int c)
+ ALIAS(WRAPPER_NAME(strchr));
+# else
+# if SANITIZER_MAC
+DECLARE_REAL(char*, index, const char *string, int c)
+OVERRIDE_FUNCTION(index, strchr);
+# else
+DEFINE_REAL(char*, index, const char *string, int c)
+# endif
+# endif
+#endif // ASAN_INTERCEPT_INDEX
+
+// For both strcat() and strncat() we need to check the validity of |to|
+// argument irrespective of the |from| length.
+INTERCEPTOR(char*, strcat, char *to, const char *from) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strcat); // NOLINT
+ ENSURE_ASAN_INITED();
+ if (flags()->replace_str) {
+ uptr from_length = REAL(strlen)(from);
+ ASAN_READ_RANGE(ctx, from, from_length + 1);
+ uptr to_length = REAL(strlen)(to);
+ ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
+ ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
+ // If the copying actually happens, the |from| string should not overlap
+ // with the resulting string starting at |to|, which has a length of
+ // to_length + from_length + 1.
+ if (from_length > 0) {
+ CHECK_RANGES_OVERLAP("strcat", to, from_length + to_length + 1,
+ from, from_length + 1);
+ }
+ }
+ return REAL(strcat)(to, from); // NOLINT
+}
+
+INTERCEPTOR(char*, strncat, char *to, const char *from, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strncat);
+ ENSURE_ASAN_INITED();
+ if (flags()->replace_str) {
+ uptr from_length = MaybeRealStrnlen(from, size);
+ uptr copy_length = Min(size, from_length + 1);
+ ASAN_READ_RANGE(ctx, from, copy_length);
+ uptr to_length = REAL(strlen)(to);
+ ASAN_READ_STRING_OF_LEN(ctx, to, to_length, to_length);
+ ASAN_WRITE_RANGE(ctx, to + to_length, from_length + 1);
+ if (from_length > 0) {
+ CHECK_RANGES_OVERLAP("strncat", to, to_length + copy_length + 1,
+ from, copy_length);
+ }
+ }
+ return REAL(strncat)(to, from, size);
+}
+
+INTERCEPTOR(char*, strcpy, char *to, const char *from) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strcpy); // NOLINT
+#if SANITIZER_MAC
+ if (UNLIKELY(!asan_inited)) return REAL(strcpy)(to, from); // NOLINT
+#endif
+ // strcpy is called from malloc_default_purgeable_zone()
+ // in __asan::ReplaceSystemAlloc() on Mac.
+ if (asan_init_is_running) {
+ return REAL(strcpy)(to, from); // NOLINT
+ }
+ ENSURE_ASAN_INITED();
+ if (flags()->replace_str) {
+ uptr from_size = REAL(strlen)(from) + 1;
+ CHECK_RANGES_OVERLAP("strcpy", to, from_size, from, from_size);
+ ASAN_READ_RANGE(ctx, from, from_size);
+ ASAN_WRITE_RANGE(ctx, to, from_size);
+ }
+ return REAL(strcpy)(to, from); // NOLINT
+}
+
+INTERCEPTOR(char*, strdup, const char *s) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strdup);
+ if (UNLIKELY(!asan_inited)) return internal_strdup(s);
+ ENSURE_ASAN_INITED();
+ uptr length = REAL(strlen)(s);
+ if (flags()->replace_str) {
+ ASAN_READ_RANGE(ctx, s, length + 1);
+ }
+ GET_STACK_TRACE_MALLOC;
+ void *new_mem = asan_malloc(length + 1, &stack);
+ REAL(memcpy)(new_mem, s, length + 1);
+ return reinterpret_cast<char*>(new_mem);
+}
+
+#if ASAN_INTERCEPT___STRDUP
+INTERCEPTOR(char*, __strdup, const char *s) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strdup);
+ if (UNLIKELY(!asan_inited)) return internal_strdup(s);
+ ENSURE_ASAN_INITED();
+ uptr length = REAL(strlen)(s);
+ if (flags()->replace_str) {
+ ASAN_READ_RANGE(ctx, s, length + 1);
+ }
+ GET_STACK_TRACE_MALLOC;
+ void *new_mem = asan_malloc(length + 1, &stack);
+ REAL(memcpy)(new_mem, s, length + 1);
+ return reinterpret_cast<char*>(new_mem);
+}
+#endif // ASAN_INTERCEPT___STRDUP
+
+INTERCEPTOR(char*, strncpy, char *to, const char *from, uptr size) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strncpy);
+ ENSURE_ASAN_INITED();
+ if (flags()->replace_str) {
+ uptr from_size = Min(size, MaybeRealStrnlen(from, size) + 1);
+ CHECK_RANGES_OVERLAP("strncpy", to, from_size, from, from_size);
+ ASAN_READ_RANGE(ctx, from, from_size);
+ ASAN_WRITE_RANGE(ctx, to, size);
+ }
+ return REAL(strncpy)(to, from, size);
+}
+
+INTERCEPTOR(long, strtol, const char *nptr, // NOLINT
+ char **endptr, int base) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strtol);
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(strtol)(nptr, endptr, base);
+ }
+ char *real_endptr;
+ long result = REAL(strtol)(nptr, &real_endptr, base); // NOLINT
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ return result;
+}
+
+INTERCEPTOR(int, atoi, const char *nptr) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, atoi);
+#if SANITIZER_MAC
+ if (UNLIKELY(!asan_inited)) return REAL(atoi)(nptr);
+#endif
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(atoi)(nptr);
+ }
+ char *real_endptr;
+ // "man atoi" tells that behavior of atoi(nptr) is the same as
+ // strtol(nptr, 0, 10), i.e. it sets errno to ERANGE if the
+ // parsed integer can't be stored in *long* type (even if it's
+ // different from int). So, we just imitate this behavior.
+ int result = REAL(strtol)(nptr, &real_endptr, 10);
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
+ return result;
+}
+
+INTERCEPTOR(long, atol, const char *nptr) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, atol);
+#if SANITIZER_MAC
+ if (UNLIKELY(!asan_inited)) return REAL(atol)(nptr);
+#endif
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(atol)(nptr);
+ }
+ char *real_endptr;
+ long result = REAL(strtol)(nptr, &real_endptr, 10); // NOLINT
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
+ return result;
+}
+
+#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
+INTERCEPTOR(long long, strtoll, const char *nptr, // NOLINT
+ char **endptr, int base) {
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, strtoll);
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(strtoll)(nptr, endptr, base);
+ }
+ char *real_endptr;
+ long long result = REAL(strtoll)(nptr, &real_endptr, base); // NOLINT
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ return result;
+}
+
+INTERCEPTOR(long long, atoll, const char *nptr) { // NOLINT
+ void *ctx;
+ ASAN_INTERCEPTOR_ENTER(ctx, atoll);
+ ENSURE_ASAN_INITED();
+ if (!flags()->replace_str) {
+ return REAL(atoll)(nptr);
+ }
+ char *real_endptr;
+ long long result = REAL(strtoll)(nptr, &real_endptr, 10); // NOLINT
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ ASAN_READ_STRING(ctx, nptr, (real_endptr - nptr) + 1);
+ return result;
+}
+#endif // ASAN_INTERCEPT_ATOLL_AND_STRTOLL
+
+#if ASAN_INTERCEPT___CXA_ATEXIT
+static void AtCxaAtexit(void *unused) {
+ (void)unused;
+ StopInitOrderChecking();
+}
+
+INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
+ void *dso_handle) {
+#if SANITIZER_MAC
+ if (UNLIKELY(!asan_inited)) return REAL(__cxa_atexit)(func, arg, dso_handle);
+#endif
+ ENSURE_ASAN_INITED();
+ int res = REAL(__cxa_atexit)(func, arg, dso_handle);
+ REAL(__cxa_atexit)(AtCxaAtexit, nullptr, nullptr);
+ return res;
+}
+#endif // ASAN_INTERCEPT___CXA_ATEXIT
+
+#if ASAN_INTERCEPT_VFORK
+DEFINE_REAL(int, vfork)
+DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
+#endif
+
+// ---------------------- InitializeAsanInterceptors ---------------- {{{1
+namespace __asan {
+void InitializeAsanInterceptors() {
+ static bool was_called_once;
+ CHECK(!was_called_once);
+ was_called_once = true;
+ InitializeCommonInterceptors();
+ InitializeSignalInterceptors();
+
+ // Intercept str* functions.
+ ASAN_INTERCEPT_FUNC(strcat); // NOLINT
+ ASAN_INTERCEPT_FUNC(strcpy); // NOLINT
+ ASAN_INTERCEPT_FUNC(strncat);
+ ASAN_INTERCEPT_FUNC(strncpy);
+ ASAN_INTERCEPT_FUNC(strdup);
+#if ASAN_INTERCEPT___STRDUP
+ ASAN_INTERCEPT_FUNC(__strdup);
+#endif
+#if ASAN_INTERCEPT_INDEX && ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX
+ ASAN_INTERCEPT_FUNC(index);
+#endif
+
+ ASAN_INTERCEPT_FUNC(atoi);
+ ASAN_INTERCEPT_FUNC(atol);
+ ASAN_INTERCEPT_FUNC(strtol);
+#if ASAN_INTERCEPT_ATOLL_AND_STRTOLL
+ ASAN_INTERCEPT_FUNC(atoll);
+ ASAN_INTERCEPT_FUNC(strtoll);
+#endif
+
+ // Intecept jump-related functions.
+ ASAN_INTERCEPT_FUNC(longjmp);
+
+#if ASAN_INTERCEPT_SWAPCONTEXT
+ ASAN_INTERCEPT_FUNC(swapcontext);
+#endif
+#if ASAN_INTERCEPT__LONGJMP
+ ASAN_INTERCEPT_FUNC(_longjmp);
+#endif
+#if ASAN_INTERCEPT___LONGJMP_CHK
+ ASAN_INTERCEPT_FUNC(__longjmp_chk);
+#endif
+#if ASAN_INTERCEPT_SIGLONGJMP
+ ASAN_INTERCEPT_FUNC(siglongjmp);
+#endif
+
+ // Intercept exception handling functions.
+#if ASAN_INTERCEPT___CXA_THROW
+ ASAN_INTERCEPT_FUNC(__cxa_throw);
+#endif
+#if ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION
+ ASAN_INTERCEPT_FUNC(__cxa_rethrow_primary_exception);
+#endif
+ // Indirectly intercept std::rethrow_exception.
+#if ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION
+ INTERCEPT_FUNCTION(_Unwind_RaiseException);
+#endif
+ // Indirectly intercept std::rethrow_exception.
+#if ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION
+ INTERCEPT_FUNCTION(_Unwind_SjLj_RaiseException);
+#endif
+
+ // Intercept threading-related functions
+#if ASAN_INTERCEPT_PTHREAD_CREATE
+#if defined(ASAN_PTHREAD_CREATE_VERSION)
+ ASAN_INTERCEPT_FUNC_VER(pthread_create, ASAN_PTHREAD_CREATE_VERSION);
+#else
+ ASAN_INTERCEPT_FUNC(pthread_create);
+#endif
+ ASAN_INTERCEPT_FUNC(pthread_join);
+#endif
+
+ // Intercept atexit function.
+#if ASAN_INTERCEPT___CXA_ATEXIT
+ ASAN_INTERCEPT_FUNC(__cxa_atexit);
+#endif
+
+#if ASAN_INTERCEPT_VFORK
+ ASAN_INTERCEPT_FUNC(vfork);
+#endif
+
+ InitializePlatformInterceptors();
+
+ VReport(1, "AddressSanitizer: libc interceptors initialized\n");
+}
+
+} // namespace __asan
+
+#endif // !SANITIZER_FUCHSIA
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors.h (revision 351984)
@@ -0,0 +1,141 @@
+//===-- asan_interceptors.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_interceptors.cc
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_INTERCEPTORS_H
+#define ASAN_INTERCEPTORS_H
+
+#include "asan_internal.h"
+#include "asan_interceptors_memintrinsics.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+namespace __asan {
+
+void InitializeAsanInterceptors();
+void InitializePlatformInterceptors();
+
+#define ENSURE_ASAN_INITED() \
+ do { \
+ CHECK(!asan_init_is_running); \
+ if (UNLIKELY(!asan_inited)) { \
+ AsanInitFromRtl(); \
+ } \
+ } while (0)
+
+} // namespace __asan
+
+// There is no general interception at all on Fuchsia and RTEMS.
+// Only the functions in asan_interceptors_memintrinsics.h are
+// really defined to replace libc functions.
+#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+
+// Use macro to describe if specific function should be
+// intercepted on a given platform.
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 1
+# define ASAN_INTERCEPT__LONGJMP 1
+# define ASAN_INTERCEPT_INDEX 1
+# define ASAN_INTERCEPT_PTHREAD_CREATE 1
+#else
+# define ASAN_INTERCEPT_ATOLL_AND_STRTOLL 0
+# define ASAN_INTERCEPT__LONGJMP 0
+# define ASAN_INTERCEPT_INDEX 0
+# define ASAN_INTERCEPT_PTHREAD_CREATE 0
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
+# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 1
+#else
+# define ASAN_USE_ALIAS_ATTRIBUTE_FOR_INDEX 0
+#endif
+
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_SOLARIS
+# define ASAN_INTERCEPT_SWAPCONTEXT 1
+#else
+# define ASAN_INTERCEPT_SWAPCONTEXT 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT_SIGLONGJMP 1
+#else
+# define ASAN_INTERCEPT_SIGLONGJMP 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define ASAN_INTERCEPT___LONGJMP_CHK 1
+#else
+# define ASAN_INTERCEPT___LONGJMP_CHK 0
+#endif
+
+#if ASAN_HAS_EXCEPTIONS && !SANITIZER_WINDOWS && !SANITIZER_SOLARIS && \
+ !SANITIZER_NETBSD
+# define ASAN_INTERCEPT___CXA_THROW 1
+# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 1
+# if defined(_GLIBCXX_SJLJ_EXCEPTIONS) || (SANITIZER_IOS && defined(__arm__))
+# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 1
+# else
+# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 1
+# endif
+#else
+# define ASAN_INTERCEPT___CXA_THROW 0
+# define ASAN_INTERCEPT___CXA_RETHROW_PRIMARY_EXCEPTION 0
+# define ASAN_INTERCEPT__UNWIND_RAISEEXCEPTION 0
+# define ASAN_INTERCEPT__UNWIND_SJLJ_RAISEEXCEPTION 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define ASAN_INTERCEPT___CXA_ATEXIT 1
+#else
+# define ASAN_INTERCEPT___CXA_ATEXIT 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define ASAN_INTERCEPT___STRDUP 1
+#else
+# define ASAN_INTERCEPT___STRDUP 0
+#endif
+
+#if SANITIZER_LINUX && (defined(__arm__) || defined(__aarch64__) || \
+ defined(__i386__) || defined(__x86_64__))
+# define ASAN_INTERCEPT_VFORK 1
+#else
+# define ASAN_INTERCEPT_VFORK 0
+#endif
+
+DECLARE_REAL(int, memcmp, const void *a1, const void *a2, uptr size)
+DECLARE_REAL(char*, strchr, const char *str, int c)
+DECLARE_REAL(SIZE_T, strlen, const char *s)
+DECLARE_REAL(char*, strncpy, char *to, const char *from, uptr size)
+DECLARE_REAL(uptr, strnlen, const char *s, uptr maxlen)
+DECLARE_REAL(char*, strstr, const char *s1, const char *s2)
+
+#if !SANITIZER_MAC
+#define ASAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "AddressSanitizer: failed to intercept '%s'\n'", #name); \
+ } while (0)
+#define ASAN_INTERCEPT_FUNC_VER(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver)) \
+ VReport(1, "AddressSanitizer: failed to intercept '%s@@%s'\n", #name, \
+ #ver); \
+ } while (0)
+#else
+// OS X interceptors don't need to be initialized with INTERCEPT_FUNCTION.
+#define ASAN_INTERCEPT_FUNC(name)
+#endif // SANITIZER_MAC
+
+#endif // !SANITIZER_FUCHSIA
+
+#endif // ASAN_INTERCEPTORS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_memintrinsics.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_memintrinsics.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_memintrinsics.cc (revision 351984)
@@ -0,0 +1,43 @@
+//===-- asan_interceptors_memintrinsics.cc --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan versions of memcpy, memmove, and memset.
+//===---------------------------------------------------------------------===//
+
+#include "asan_interceptors_memintrinsics.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_suppressions.h"
+
+using namespace __asan; // NOLINT
+
+void *__asan_memcpy(void *to, const void *from, uptr size) {
+ ASAN_MEMCPY_IMPL(nullptr, to, from, size);
+}
+
+void *__asan_memset(void *block, int c, uptr size) {
+ ASAN_MEMSET_IMPL(nullptr, block, c, size);
+}
+
+void *__asan_memmove(void *to, const void *from, uptr size) {
+ ASAN_MEMMOVE_IMPL(nullptr, to, from, size);
+}
+
+#if SANITIZER_FUCHSIA || SANITIZER_RTEMS
+
+// Fuchsia and RTEMS don't use sanitizer_common_interceptors.inc, but
+// the only things there it wants are these three. Just define them
+// as aliases here rather than repeating the contents.
+
+extern "C" decltype(__asan_memcpy) memcpy[[gnu::alias("__asan_memcpy")]];
+extern "C" decltype(__asan_memmove) memmove[[gnu::alias("__asan_memmove")]];
+extern "C" decltype(__asan_memset) memset[[gnu::alias("__asan_memset")]];
+
+#endif // SANITIZER_FUCHSIA || SANITIZER_RTEMS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_memintrinsics.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_memintrinsics.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_memintrinsics.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_memintrinsics.h (revision 351984)
@@ -0,0 +1,154 @@
+//===-- asan_interceptors_memintrinsics.h -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_memintrin.cc
+//===---------------------------------------------------------------------===//
+#ifndef ASAN_MEMINTRIN_H
+#define ASAN_MEMINTRIN_H
+
+#include "asan_interface_internal.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "interception/interception.h"
+
+DECLARE_REAL(void*, memcpy, void *to, const void *from, uptr size)
+DECLARE_REAL(void*, memset, void *block, int c, uptr size)
+
+namespace __asan {
+
+// Return true if we can quickly decide that the region is unpoisoned.
+// We assume that a redzone is at least 16 bytes.
+static inline bool QuickCheckForUnpoisonedRegion(uptr beg, uptr size) {
+ if (size == 0) return true;
+ if (size <= 32)
+ return !AddressIsPoisoned(beg) &&
+ !AddressIsPoisoned(beg + size - 1) &&
+ !AddressIsPoisoned(beg + size / 2);
+ if (size <= 64)
+ return !AddressIsPoisoned(beg) &&
+ !AddressIsPoisoned(beg + size / 4) &&
+ !AddressIsPoisoned(beg + size - 1) &&
+ !AddressIsPoisoned(beg + 3 * size / 4) &&
+ !AddressIsPoisoned(beg + size / 2);
+ return false;
+}
+
+struct AsanInterceptorContext {
+ const char *interceptor_name;
+};
+
+// We implement ACCESS_MEMORY_RANGE, ASAN_READ_RANGE,
+// and ASAN_WRITE_RANGE as macro instead of function so
+// that no extra frames are created, and stack trace contains
+// relevant information only.
+// We check all shadow bytes.
+#define ACCESS_MEMORY_RANGE(ctx, offset, size, isWrite) do { \
+ uptr __offset = (uptr)(offset); \
+ uptr __size = (uptr)(size); \
+ uptr __bad = 0; \
+ if (__offset > __offset + __size) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ ReportStringFunctionSizeOverflow(__offset, __size, &stack); \
+ } \
+ if (!QuickCheckForUnpoisonedRegion(__offset, __size) && \
+ (__bad = __asan_region_is_poisoned(__offset, __size))) { \
+ AsanInterceptorContext *_ctx = (AsanInterceptorContext *)ctx; \
+ bool suppressed = false; \
+ if (_ctx) { \
+ suppressed = IsInterceptorSuppressed(_ctx->interceptor_name); \
+ if (!suppressed && HaveStackTraceBasedSuppressions()) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ suppressed = IsStackTraceSuppressed(&stack); \
+ } \
+ } \
+ if (!suppressed) { \
+ GET_CURRENT_PC_BP_SP; \
+ ReportGenericError(pc, bp, sp, __bad, isWrite, __size, 0, false);\
+ } \
+ } \
+ } while (0)
+
+// memcpy is called during __asan_init() from the internals of printf(...).
+// We do not treat memcpy with to==from as a bug.
+// See http://llvm.org/bugs/show_bug.cgi?id=11763.
+#define ASAN_MEMCPY_IMPL(ctx, to, from, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memcpy(to, from, size); \
+ if (asan_init_is_running) { \
+ return REAL(memcpy)(to, from, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ if (to != from) { \
+ CHECK_RANGES_OVERLAP("memcpy", to, size, from, size); \
+ } \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return REAL(memcpy)(to, from, size); \
+ } while (0)
+
+// memset is called inside Printf.
+#define ASAN_MEMSET_IMPL(ctx, block, c, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memset(block, c, size); \
+ if (asan_init_is_running) { \
+ return REAL(memset)(block, c, size); \
+ } \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_WRITE_RANGE(ctx, block, size); \
+ } \
+ return REAL(memset)(block, c, size); \
+ } while (0)
+
+#define ASAN_MEMMOVE_IMPL(ctx, to, from, size) \
+ do { \
+ if (UNLIKELY(!asan_inited)) return internal_memmove(to, from, size); \
+ ENSURE_ASAN_INITED(); \
+ if (flags()->replace_intrin) { \
+ ASAN_READ_RANGE(ctx, from, size); \
+ ASAN_WRITE_RANGE(ctx, to, size); \
+ } \
+ return internal_memmove(to, from, size); \
+ } while (0)
+
+#define ASAN_READ_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, false)
+#define ASAN_WRITE_RANGE(ctx, offset, size) \
+ ACCESS_MEMORY_RANGE(ctx, offset, size, true)
+
+// Behavior of functions like "memcpy" or "strcpy" is undefined
+// if memory intervals overlap. We report error in this case.
+// Macro is used to avoid creation of new frames.
+static inline bool RangesOverlap(const char *offset1, uptr length1,
+ const char *offset2, uptr length2) {
+ return !((offset1 + length1 <= offset2) || (offset2 + length2 <= offset1));
+}
+#define CHECK_RANGES_OVERLAP(name, _offset1, length1, _offset2, length2) \
+ do { \
+ const char *offset1 = (const char *)_offset1; \
+ const char *offset2 = (const char *)_offset2; \
+ if (RangesOverlap(offset1, length1, offset2, length2)) { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ bool suppressed = IsInterceptorSuppressed(name); \
+ if (!suppressed && HaveStackTraceBasedSuppressions()) { \
+ suppressed = IsStackTraceSuppressed(&stack); \
+ } \
+ if (!suppressed) { \
+ ReportStringFunctionMemoryRangesOverlap(name, offset1, length1, \
+ offset2, length2, &stack); \
+ } \
+ } \
+ } while (0)
+
+} // namespace __asan
+
+#endif // ASAN_MEMINTRIN_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_memintrinsics.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_vfork.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_vfork.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interceptors_vfork.S (revision 351984)
@@ -0,0 +1,12 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if defined(__linux__)
+#define COMMON_INTERCEPTOR_SPILL_AREA __asan_extra_spill_area
+#define COMMON_INTERCEPTOR_HANDLE_VFORK __asan_handle_vfork
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S"
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interface.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interface.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interface.inc (revision 351984)
@@ -0,0 +1,169 @@
+//===-- asan_interface.inc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Asan interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__asan_addr_is_in_fake_stack)
+INTERFACE_FUNCTION(__asan_address_is_poisoned)
+INTERFACE_FUNCTION(__asan_after_dynamic_init)
+INTERFACE_FUNCTION(__asan_alloca_poison)
+INTERFACE_FUNCTION(__asan_allocas_unpoison)
+INTERFACE_FUNCTION(__asan_before_dynamic_init)
+INTERFACE_FUNCTION(__asan_describe_address)
+INTERFACE_FUNCTION(__asan_exp_load1)
+INTERFACE_FUNCTION(__asan_exp_load2)
+INTERFACE_FUNCTION(__asan_exp_load4)
+INTERFACE_FUNCTION(__asan_exp_load8)
+INTERFACE_FUNCTION(__asan_exp_load16)
+INTERFACE_FUNCTION(__asan_exp_loadN)
+INTERFACE_FUNCTION(__asan_exp_store1)
+INTERFACE_FUNCTION(__asan_exp_store2)
+INTERFACE_FUNCTION(__asan_exp_store4)
+INTERFACE_FUNCTION(__asan_exp_store8)
+INTERFACE_FUNCTION(__asan_exp_store16)
+INTERFACE_FUNCTION(__asan_exp_storeN)
+INTERFACE_FUNCTION(__asan_get_alloc_stack)
+INTERFACE_FUNCTION(__asan_get_current_fake_stack)
+INTERFACE_FUNCTION(__asan_get_free_stack)
+INTERFACE_FUNCTION(__asan_get_report_access_size)
+INTERFACE_FUNCTION(__asan_get_report_access_type)
+INTERFACE_FUNCTION(__asan_get_report_address)
+INTERFACE_FUNCTION(__asan_get_report_bp)
+INTERFACE_FUNCTION(__asan_get_report_description)
+INTERFACE_FUNCTION(__asan_get_report_pc)
+INTERFACE_FUNCTION(__asan_get_report_sp)
+INTERFACE_FUNCTION(__asan_get_shadow_mapping)
+INTERFACE_FUNCTION(__asan_handle_no_return)
+INTERFACE_FUNCTION(__asan_handle_vfork)
+INTERFACE_FUNCTION(__asan_init)
+INTERFACE_FUNCTION(__asan_load_cxx_array_cookie)
+INTERFACE_FUNCTION(__asan_load1)
+INTERFACE_FUNCTION(__asan_load2)
+INTERFACE_FUNCTION(__asan_load4)
+INTERFACE_FUNCTION(__asan_load8)
+INTERFACE_FUNCTION(__asan_load16)
+INTERFACE_FUNCTION(__asan_loadN)
+INTERFACE_FUNCTION(__asan_load1_noabort)
+INTERFACE_FUNCTION(__asan_load2_noabort)
+INTERFACE_FUNCTION(__asan_load4_noabort)
+INTERFACE_FUNCTION(__asan_load8_noabort)
+INTERFACE_FUNCTION(__asan_load16_noabort)
+INTERFACE_FUNCTION(__asan_loadN_noabort)
+INTERFACE_FUNCTION(__asan_locate_address)
+INTERFACE_FUNCTION(__asan_memcpy)
+INTERFACE_FUNCTION(__asan_memmove)
+INTERFACE_FUNCTION(__asan_memset)
+INTERFACE_FUNCTION(__asan_poison_cxx_array_cookie)
+INTERFACE_FUNCTION(__asan_poison_intra_object_redzone)
+INTERFACE_FUNCTION(__asan_poison_memory_region)
+INTERFACE_FUNCTION(__asan_poison_stack_memory)
+INTERFACE_FUNCTION(__asan_print_accumulated_stats)
+INTERFACE_FUNCTION(__asan_region_is_poisoned)
+INTERFACE_FUNCTION(__asan_register_globals)
+INTERFACE_FUNCTION(__asan_register_elf_globals)
+INTERFACE_FUNCTION(__asan_register_image_globals)
+INTERFACE_FUNCTION(__asan_report_error)
+INTERFACE_FUNCTION(__asan_report_exp_load1)
+INTERFACE_FUNCTION(__asan_report_exp_load2)
+INTERFACE_FUNCTION(__asan_report_exp_load4)
+INTERFACE_FUNCTION(__asan_report_exp_load8)
+INTERFACE_FUNCTION(__asan_report_exp_load16)
+INTERFACE_FUNCTION(__asan_report_exp_load_n)
+INTERFACE_FUNCTION(__asan_report_exp_store1)
+INTERFACE_FUNCTION(__asan_report_exp_store2)
+INTERFACE_FUNCTION(__asan_report_exp_store4)
+INTERFACE_FUNCTION(__asan_report_exp_store8)
+INTERFACE_FUNCTION(__asan_report_exp_store16)
+INTERFACE_FUNCTION(__asan_report_exp_store_n)
+INTERFACE_FUNCTION(__asan_report_load1)
+INTERFACE_FUNCTION(__asan_report_load2)
+INTERFACE_FUNCTION(__asan_report_load4)
+INTERFACE_FUNCTION(__asan_report_load8)
+INTERFACE_FUNCTION(__asan_report_load16)
+INTERFACE_FUNCTION(__asan_report_load_n)
+INTERFACE_FUNCTION(__asan_report_load1_noabort)
+INTERFACE_FUNCTION(__asan_report_load2_noabort)
+INTERFACE_FUNCTION(__asan_report_load4_noabort)
+INTERFACE_FUNCTION(__asan_report_load8_noabort)
+INTERFACE_FUNCTION(__asan_report_load16_noabort)
+INTERFACE_FUNCTION(__asan_report_load_n_noabort)
+INTERFACE_FUNCTION(__asan_report_present)
+INTERFACE_FUNCTION(__asan_report_store1)
+INTERFACE_FUNCTION(__asan_report_store2)
+INTERFACE_FUNCTION(__asan_report_store4)
+INTERFACE_FUNCTION(__asan_report_store8)
+INTERFACE_FUNCTION(__asan_report_store16)
+INTERFACE_FUNCTION(__asan_report_store_n)
+INTERFACE_FUNCTION(__asan_report_store1_noabort)
+INTERFACE_FUNCTION(__asan_report_store2_noabort)
+INTERFACE_FUNCTION(__asan_report_store4_noabort)
+INTERFACE_FUNCTION(__asan_report_store8_noabort)
+INTERFACE_FUNCTION(__asan_report_store16_noabort)
+INTERFACE_FUNCTION(__asan_report_store_n_noabort)
+INTERFACE_FUNCTION(__asan_set_death_callback)
+INTERFACE_FUNCTION(__asan_set_error_report_callback)
+INTERFACE_FUNCTION(__asan_set_shadow_00)
+INTERFACE_FUNCTION(__asan_set_shadow_f1)
+INTERFACE_FUNCTION(__asan_set_shadow_f2)
+INTERFACE_FUNCTION(__asan_set_shadow_f3)
+INTERFACE_FUNCTION(__asan_set_shadow_f5)
+INTERFACE_FUNCTION(__asan_set_shadow_f8)
+INTERFACE_FUNCTION(__asan_stack_free_0)
+INTERFACE_FUNCTION(__asan_stack_free_1)
+INTERFACE_FUNCTION(__asan_stack_free_2)
+INTERFACE_FUNCTION(__asan_stack_free_3)
+INTERFACE_FUNCTION(__asan_stack_free_4)
+INTERFACE_FUNCTION(__asan_stack_free_5)
+INTERFACE_FUNCTION(__asan_stack_free_6)
+INTERFACE_FUNCTION(__asan_stack_free_7)
+INTERFACE_FUNCTION(__asan_stack_free_8)
+INTERFACE_FUNCTION(__asan_stack_free_9)
+INTERFACE_FUNCTION(__asan_stack_free_10)
+INTERFACE_FUNCTION(__asan_stack_malloc_0)
+INTERFACE_FUNCTION(__asan_stack_malloc_1)
+INTERFACE_FUNCTION(__asan_stack_malloc_2)
+INTERFACE_FUNCTION(__asan_stack_malloc_3)
+INTERFACE_FUNCTION(__asan_stack_malloc_4)
+INTERFACE_FUNCTION(__asan_stack_malloc_5)
+INTERFACE_FUNCTION(__asan_stack_malloc_6)
+INTERFACE_FUNCTION(__asan_stack_malloc_7)
+INTERFACE_FUNCTION(__asan_stack_malloc_8)
+INTERFACE_FUNCTION(__asan_stack_malloc_9)
+INTERFACE_FUNCTION(__asan_stack_malloc_10)
+INTERFACE_FUNCTION(__asan_store1)
+INTERFACE_FUNCTION(__asan_store2)
+INTERFACE_FUNCTION(__asan_store4)
+INTERFACE_FUNCTION(__asan_store8)
+INTERFACE_FUNCTION(__asan_store16)
+INTERFACE_FUNCTION(__asan_storeN)
+INTERFACE_FUNCTION(__asan_store1_noabort)
+INTERFACE_FUNCTION(__asan_store2_noabort)
+INTERFACE_FUNCTION(__asan_store4_noabort)
+INTERFACE_FUNCTION(__asan_store8_noabort)
+INTERFACE_FUNCTION(__asan_store16_noabort)
+INTERFACE_FUNCTION(__asan_storeN_noabort)
+INTERFACE_FUNCTION(__asan_unpoison_intra_object_redzone)
+INTERFACE_FUNCTION(__asan_unpoison_memory_region)
+INTERFACE_FUNCTION(__asan_unpoison_stack_memory)
+INTERFACE_FUNCTION(__asan_unregister_globals)
+INTERFACE_FUNCTION(__asan_unregister_elf_globals)
+INTERFACE_FUNCTION(__asan_unregister_image_globals)
+INTERFACE_FUNCTION(__asan_version_mismatch_check_v8)
+INTERFACE_FUNCTION(__sanitizer_finish_switch_fiber)
+INTERFACE_FUNCTION(__sanitizer_print_stack_trace)
+INTERFACE_FUNCTION(__sanitizer_ptr_cmp)
+INTERFACE_FUNCTION(__sanitizer_ptr_sub)
+INTERFACE_FUNCTION(__sanitizer_start_switch_fiber)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_load64)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store16)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store32)
+INTERFACE_FUNCTION(__sanitizer_unaligned_store64)
+INTERFACE_WEAK_FUNCTION(__asan_default_options)
+INTERFACE_WEAK_FUNCTION(__asan_default_suppressions)
+INTERFACE_WEAK_FUNCTION(__asan_on_error)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interface.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interface_internal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interface_internal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_interface_internal.h (revision 351984)
@@ -0,0 +1,256 @@
+//===-- asan_interface_internal.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This header declares the AddressSanitizer runtime interface functions.
+// The runtime library has to define these functions so the instrumented program
+// could call them.
+//
+// See also include/sanitizer/asan_interface.h
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_INTERFACE_INTERNAL_H
+#define ASAN_INTERFACE_INTERNAL_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#include "asan_init_version.h"
+
+using __sanitizer::uptr;
+using __sanitizer::u64;
+using __sanitizer::u32;
+
+extern "C" {
+ // This function should be called at the very beginning of the process,
+ // before any instrumented code is executed and before any call to malloc.
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_init();
+
+ // This function exists purely to get a linker/loader error when using
+ // incompatible versions of instrumentation and runtime library. Please note
+ // that __asan_version_mismatch_check is a macro that is replaced with
+ // __asan_version_mismatch_check_vXXX at compile-time.
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_version_mismatch_check();
+
+ // This structure is used to describe the source location of a place where
+ // global was defined.
+ struct __asan_global_source_location {
+ const char *filename;
+ int line_no;
+ int column_no;
+ };
+
+ // This structure describes an instrumented global variable.
+ struct __asan_global {
+ uptr beg; // The address of the global.
+ uptr size; // The original size of the global.
+ uptr size_with_redzone; // The size with the redzone.
+ const char *name; // Name as a C string.
+ const char *module_name; // Module name as a C string. This pointer is a
+ // unique identifier of a module.
+ uptr has_dynamic_init; // Non-zero if the global has dynamic initializer.
+ __asan_global_source_location *location; // Source location of a global,
+ // or NULL if it is unknown.
+ uptr odr_indicator; // The address of the ODR indicator symbol.
+ };
+
+ // These functions can be called on some platforms to find globals in the same
+ // loaded image as `flag' and apply __asan_(un)register_globals to them,
+ // filtering out redundant calls.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_register_image_globals(uptr *flag);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unregister_image_globals(uptr *flag);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_register_elf_globals(uptr *flag, void *start, void *stop);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unregister_elf_globals(uptr *flag, void *start, void *stop);
+
+ // These two functions should be called by the instrumented code.
+ // 'globals' is an array of structures describing 'n' globals.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_register_globals(__asan_global *globals, uptr n);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unregister_globals(__asan_global *globals, uptr n);
+
+ // These two functions should be called before and after dynamic initializers
+ // of a single module run, respectively.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_before_dynamic_init(const char *module_name);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_after_dynamic_init();
+
+ // Sets bytes of the given range of the shadow memory into specific value.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_00(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f1(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f2(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f3(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f5(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_shadow_f8(uptr addr, uptr size);
+
+ // These two functions are used by instrumented code in the
+ // use-after-scope mode. They mark memory for local variables as
+ // unaddressable when they leave scope and addressable before the
+ // function exits.
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_poison_stack_memory(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unpoison_stack_memory(uptr addr, uptr size);
+
+ // Performs cleanup before a NoReturn function. Must be called before things
+ // like _exit and execl to avoid false positives on stack.
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_handle_no_return();
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_poison_memory_region(void const volatile *addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unpoison_memory_region(void const volatile *addr, uptr size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __asan_address_is_poisoned(void const volatile *addr);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_region_is_poisoned(uptr beg, uptr size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_describe_address(uptr addr);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __asan_report_present();
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_pc();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_bp();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_sp();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_address();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __asan_get_report_access_type();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_report_access_size();
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char * __asan_get_report_description();
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const char * __asan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_alloc_stack(uptr addr, uptr *trace, uptr size,
+ u32 *thread_id);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_get_free_stack(uptr addr, uptr *trace, uptr size,
+ u32 *thread_id);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_get_shadow_mapping(uptr *shadow_scale, uptr *shadow_offset);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_report_error(uptr pc, uptr bp, uptr sp,
+ uptr addr, int is_write, uptr access_size, u32 exp);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_death_callback(void (*callback)(void));
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_set_error_report_callback(void (*callback)(const char*));
+
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __asan_on_error();
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_print_accumulated_stats();
+
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ const char* __asan_default_options();
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ extern uptr __asan_shadow_memory_dynamic_address;
+
+ // Global flag, copy of ASAN_OPTIONS=detect_stack_use_after_return
+ SANITIZER_INTERFACE_ATTRIBUTE
+ extern int __asan_option_detect_stack_use_after_return;
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ extern uptr *__asan_test_only_reported_buggy_pointer;
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN(uptr p, uptr size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load1_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load2_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load4_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load8_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_load16_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store1_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store2_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store4_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store8_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_store16_noabort(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_loadN_noabort(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_storeN_noabort(uptr p, uptr size);
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load1(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load2(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load4(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load8(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_load16(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store1(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store2(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store4(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store8(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_store16(uptr p, u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_loadN(uptr p, uptr size,
+ u32 exp);
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_exp_storeN(uptr p, uptr size,
+ u32 exp);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memcpy(void *dst, const void *src, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memset(void *s, int c, uptr n);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void* __asan_memmove(void* dest, const void* src, uptr n);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_poison_cxx_array_cookie(uptr p);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ uptr __asan_load_cxx_array_cookie(uptr *p);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_poison_intra_object_redzone(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_unpoison_intra_object_redzone(uptr p, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_alloca_poison(uptr addr, uptr size);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __asan_allocas_unpoison(uptr top, uptr bottom);
+
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ const char* __asan_default_suppressions();
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __asan_handle_vfork(void *sp);
+} // extern "C"
+
+#endif // ASAN_INTERFACE_INTERNAL_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_internal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_internal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_internal.h (revision 351984)
@@ -0,0 +1,163 @@
+//===-- asan_internal.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header which defines various general utilities.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_INTERNAL_H
+#define ASAN_INTERNAL_H
+
+#include "asan_flags.h"
+#include "asan_interface_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+# error "The AddressSanitizer run-time should not be"
+ " instrumented by AddressSanitizer"
+#endif
+
+// Build-time configuration options.
+
+// If set, asan will intercept C++ exception api call(s).
+#ifndef ASAN_HAS_EXCEPTIONS
+# define ASAN_HAS_EXCEPTIONS 1
+#endif
+
+// If set, values like allocator chunk size, as well as defaults for some flags
+// will be changed towards less memory overhead.
+#ifndef ASAN_LOW_MEMORY
+# if SANITIZER_IOS || SANITIZER_ANDROID || SANITIZER_RTEMS
+# define ASAN_LOW_MEMORY 1
+# else
+# define ASAN_LOW_MEMORY 0
+# endif
+#endif
+
+#ifndef ASAN_DYNAMIC
+# ifdef PIC
+# define ASAN_DYNAMIC 1
+# else
+# define ASAN_DYNAMIC 0
+# endif
+#endif
+
+// All internal functions in asan reside inside the __asan namespace
+// to avoid namespace collisions with the user programs.
+// Separate namespace also makes it simpler to distinguish the asan run-time
+// functions from the instrumented user code in a profile.
+namespace __asan {
+
+class AsanThread;
+using __sanitizer::StackTrace;
+
+void AsanInitFromRtl();
+
+// asan_win.cc
+void InitializePlatformExceptionHandlers();
+// Returns whether an address is a valid allocated system heap block.
+// 'addr' must point to the beginning of the block.
+bool IsSystemHeapAddress(uptr addr);
+
+// asan_rtl.cc
+void PrintAddressSpaceLayout();
+void NORETURN ShowStatsAndAbort();
+
+// asan_shadow_setup.cc
+void InitializeShadowMemory();
+
+// asan_malloc_linux.cc / asan_malloc_mac.cc
+void ReplaceSystemMalloc();
+
+// asan_linux.cc / asan_mac.cc / asan_rtems.cc / asan_win.cc
+uptr FindDynamicShadowStart();
+void *AsanDoesNotSupportStaticLinkage();
+void AsanCheckDynamicRTPrereqs();
+void AsanCheckIncompatibleRT();
+
+// asan_thread.cc
+AsanThread *CreateMainThread();
+
+// Support function for __asan_(un)register_image_globals. Searches for the
+// loaded image containing `needle' and then enumerates all global metadata
+// structures declared in that image, applying `op' (e.g.,
+// __asan_(un)register_globals) to them.
+typedef void (*globals_op_fptr)(__asan_global *, uptr);
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle);
+
+void AsanOnDeadlySignal(int, void *siginfo, void *context);
+
+void ReadContextStack(void *context, uptr *stack, uptr *ssize);
+void StopInitOrderChecking();
+
+// Wrapper for TLS/TSD.
+void AsanTSDInit(void (*destructor)(void *tsd));
+void *AsanTSDGet();
+void AsanTSDSet(void *tsd);
+void PlatformTSDDtor(void *tsd);
+
+void AppendToErrorMessageBuffer(const char *buffer);
+
+void *AsanDlSymNext(const char *sym);
+
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name);
+
+// Returns `true` iff most of ASan init process should be skipped due to the
+// ASan library being loaded via `dlopen()`. Platforms may perform any
+// `dlopen()` specific initialization inside this function.
+bool HandleDlopenInit();
+
+// Add convenient macro for interface functions that may be represented as
+// weak hooks.
+#define ASAN_MALLOC_HOOK(ptr, size) \
+ do { \
+ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(ptr, size); \
+ RunMallocHooks(ptr, size); \
+ } while (false)
+#define ASAN_FREE_HOOK(ptr) \
+ do { \
+ if (&__sanitizer_free_hook) __sanitizer_free_hook(ptr); \
+ RunFreeHooks(ptr); \
+ } while (false)
+#define ASAN_ON_ERROR() \
+ if (&__asan_on_error) __asan_on_error()
+
+extern int asan_inited;
+// Used to avoid infinite recursion in __asan_init().
+extern bool asan_init_is_running;
+extern void (*death_callback)(void);
+// These magic values are written to shadow for better error reporting.
+const int kAsanHeapLeftRedzoneMagic = 0xfa;
+const int kAsanHeapFreeMagic = 0xfd;
+const int kAsanStackLeftRedzoneMagic = 0xf1;
+const int kAsanStackMidRedzoneMagic = 0xf2;
+const int kAsanStackRightRedzoneMagic = 0xf3;
+const int kAsanStackAfterReturnMagic = 0xf5;
+const int kAsanInitializationOrderMagic = 0xf6;
+const int kAsanUserPoisonedMemoryMagic = 0xf7;
+const int kAsanContiguousContainerOOBMagic = 0xfc;
+const int kAsanStackUseAfterScopeMagic = 0xf8;
+const int kAsanGlobalRedzoneMagic = 0xf9;
+const int kAsanInternalHeapMagic = 0xfe;
+const int kAsanArrayCookieMagic = 0xac;
+const int kAsanIntraObjectRedzone = 0xbb;
+const int kAsanAllocaLeftMagic = 0xca;
+const int kAsanAllocaRightMagic = 0xcb;
+// Used to populate the shadow gap for systems without memory
+// protection there (i.e. Myriad).
+const int kAsanShadowGap = 0xcc;
+
+static const uptr kCurrentStackFrameMagic = 0x41B58AB3;
+static const uptr kRetiredStackFrameMagic = 0x45E0360E;
+
+} // namespace __asan
+
+#endif // ASAN_INTERNAL_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_linux.cc (revision 351984)
@@ -0,0 +1,260 @@
+//===-- asan_linux.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Linux-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
+
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_premap_shadow.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_freebsd.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <unwind.h>
+
+#if SANITIZER_FREEBSD
+#include <sys/link_elf.h>
+#endif
+
+#if SANITIZER_SOLARIS
+#include <link.h>
+#endif
+
+#if SANITIZER_ANDROID || SANITIZER_FREEBSD || SANITIZER_SOLARIS
+#include <ucontext.h>
+extern "C" void* _DYNAMIC;
+#elif SANITIZER_NETBSD
+#include <link_elf.h>
+#include <ucontext.h>
+extern Elf_Dyn _DYNAMIC;
+#else
+#include <sys/ucontext.h>
+#include <link.h>
+#endif
+
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32) && \
+ __FreeBSD_version <= 902001 // v9.2
+#define ucontext_t xucontext_t
+#endif
+
+typedef enum {
+ ASAN_RT_VERSION_UNDEFINED = 0,
+ ASAN_RT_VERSION_DYNAMIC,
+ ASAN_RT_VERSION_STATIC,
+} asan_rt_version_t;
+
+// FIXME: perhaps also store abi version here?
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+asan_rt_version_t __asan_rt_version;
+}
+
+namespace __asan {
+
+void InitializePlatformInterceptors() {}
+void InitializePlatformExceptionHandlers() {}
+bool IsSystemHeapAddress (uptr addr) { return false; }
+
+void *AsanDoesNotSupportStaticLinkage() {
+ // This will fail to link with -static.
+ return &_DYNAMIC; // defined in link.h
+}
+
+static void UnmapFromTo(uptr from, uptr to) {
+ CHECK(to >= from);
+ if (to == from) return;
+ uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
+ if (UNLIKELY(internal_iserror(res))) {
+ Report(
+ "ERROR: AddresSanitizer failed to unmap 0x%zx (%zd) bytes at address "
+ "%p\n",
+ to - from, to - from, from);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+#if ASAN_PREMAP_SHADOW
+uptr FindPremappedShadowStart() {
+ uptr granularity = GetMmapGranularity();
+ uptr shadow_start = reinterpret_cast<uptr>(&__asan_shadow);
+ uptr premap_shadow_size = PremapShadowSize();
+ uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
+ // We may have mapped too much. Release extra memory.
+ UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
+ return shadow_start;
+}
+#endif
+
+uptr FindDynamicShadowStart() {
+#if ASAN_PREMAP_SHADOW
+ if (!PremapShadowFailed())
+ return FindPremappedShadowStart();
+#endif
+
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = granularity * 8;
+ uptr left_padding = granularity;
+ uptr shadow_size = RoundUpTo(kHighShadowEnd, granularity);
+ uptr map_size = shadow_size + left_padding + alignment;
+
+ uptr map_start = (uptr)MmapNoAccess(map_size);
+ CHECK_NE(map_start, ~(uptr)0);
+
+ uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
+ UnmapFromTo(map_start, shadow_start - left_padding);
+ UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
+
+ return shadow_start;
+}
+
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
+ UNIMPLEMENTED();
+}
+
+#if SANITIZER_ANDROID
+// FIXME: should we do anything for Android?
+void AsanCheckDynamicRTPrereqs() {}
+void AsanCheckIncompatibleRT() {}
+#else
+static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n",
+ info->dlpi_name, info->dlpi_addr);
+
+ // Continue until the first dynamic library is found
+ if (!info->dlpi_name || info->dlpi_name[0] == 0)
+ return 0;
+
+ // Ignore vDSO
+ if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
+ return 0;
+
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+ // Ignore first entry (the main program)
+ char **p = (char **)data;
+ if (!(*p)) {
+ *p = (char *)-1;
+ return 0;
+ }
+#endif
+
+#if SANITIZER_SOLARIS
+ // Ignore executable on Solaris
+ if (info->dlpi_addr == 0)
+ return 0;
+#endif
+
+ *(const char **)data = info->dlpi_name;
+ return 1;
+}
+
+static bool IsDynamicRTName(const char *libname) {
+ return internal_strstr(libname, "libclang_rt.asan") ||
+ internal_strstr(libname, "libasan.so");
+}
+
+static void ReportIncompatibleRT() {
+ Report("Your application is linked against incompatible ASan runtimes.\n");
+ Die();
+}
+
+void AsanCheckDynamicRTPrereqs() {
+ if (!ASAN_DYNAMIC || !flags()->verify_asan_link_order)
+ return;
+
+ // Ensure that dynamic RT is the first DSO in the list
+ const char *first_dso_name = nullptr;
+ dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
+ if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
+ Report("ASan runtime does not come first in initial library list; "
+ "you should either link runtime to your application or "
+ "manually preload it with LD_PRELOAD.\n");
+ Die();
+ }
+}
+
+void AsanCheckIncompatibleRT() {
+ if (ASAN_DYNAMIC) {
+ if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+ __asan_rt_version = ASAN_RT_VERSION_DYNAMIC;
+ } else if (__asan_rt_version != ASAN_RT_VERSION_DYNAMIC) {
+ ReportIncompatibleRT();
+ }
+ } else {
+ if (__asan_rt_version == ASAN_RT_VERSION_UNDEFINED) {
+ // Ensure that dynamic runtime is not present. We should detect it
+ // as early as possible, otherwise ASan interceptors could bind to
+ // the functions in dynamic ASan runtime instead of the functions in
+ // system libraries, causing crashes later in ASan initialization.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ char filename[PATH_MAX];
+ MemoryMappedSegment segment(filename, sizeof(filename));
+ while (proc_maps.Next(&segment)) {
+ if (IsDynamicRTName(segment.filename)) {
+ Report("Your application is linked against "
+ "incompatible ASan runtimes.\n");
+ Die();
+ }
+ }
+ __asan_rt_version = ASAN_RT_VERSION_STATIC;
+ } else if (__asan_rt_version != ASAN_RT_VERSION_STATIC) {
+ ReportIncompatibleRT();
+ }
+ }
+}
+#endif // SANITIZER_ANDROID
+
+#if !SANITIZER_ANDROID
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+ ucontext_t *ucp = (ucontext_t*)context;
+ *stack = (uptr)ucp->uc_stack.ss_sp;
+ *ssize = ucp->uc_stack.ss_size;
+}
+#else
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+ UNIMPLEMENTED();
+}
+#endif
+
+void *AsanDlSymNext(const char *sym) {
+ return dlsym(RTLD_NEXT, sym);
+}
+
+bool HandleDlopenInit() {
+ // Not supported on this platform.
+ static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN,
+ "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false");
+ return false;
+}
+
+} // namespace __asan
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
+ // SANITIZER_SOLARIS
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mac.cc (revision 351984)
@@ -0,0 +1,331 @@
+//===-- asan_mac.cc -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Mac-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mac.h"
+
+#include <dlfcn.h>
+#include <fcntl.h>
+#include <libkern/OSAtomic.h>
+#include <mach-o/dyld.h>
+#include <mach-o/getsect.h>
+#include <mach-o/loader.h>
+#include <pthread.h>
+#include <stdlib.h> // for free()
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <sys/ucontext.h>
+#include <unistd.h>
+
+// from <crt_externs.h>, but we don't have that file on iOS
+extern "C" {
+ extern char ***_NSGetArgv(void);
+ extern char ***_NSGetEnviron(void);
+}
+
+namespace __asan {
+
+void InitializePlatformInterceptors() {}
+void InitializePlatformExceptionHandlers() {}
+bool IsSystemHeapAddress (uptr addr) { return false; }
+
+// No-op. Mac does not support static linkage anyway.
+void *AsanDoesNotSupportStaticLinkage() {
+ return 0;
+}
+
+uptr FindDynamicShadowStart() {
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = 8 * granularity;
+ uptr left_padding = granularity;
+ uptr space_size = kHighShadowEnd + left_padding;
+
+ uptr largest_gap_found = 0;
+ uptr max_occupied_addr = 0;
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ uptr shadow_start =
+ FindAvailableMemoryRange(space_size, alignment, granularity,
+ &largest_gap_found, &max_occupied_addr);
+ // If the shadow doesn't fit, restrict the address space to make it fit.
+ if (shadow_start == 0) {
+ VReport(
+ 2,
+ "Shadow doesn't fit, largest_gap_found = %p, max_occupied_addr = %p\n",
+ largest_gap_found, max_occupied_addr);
+ uptr new_max_vm = RoundDownTo(largest_gap_found << SHADOW_SCALE, alignment);
+ if (new_max_vm < max_occupied_addr) {
+ Report("Unable to find a memory range for dynamic shadow.\n");
+ Report(
+ "space_size = %p, largest_gap_found = %p, max_occupied_addr = %p, "
+ "new_max_vm = %p\n",
+ space_size, largest_gap_found, max_occupied_addr, new_max_vm);
+ CHECK(0 && "cannot place shadow");
+ }
+ RestrictMemoryToMaxAddress(new_max_vm);
+ kHighMemEnd = new_max_vm - 1;
+ space_size = kHighShadowEnd + left_padding;
+ VReport(2, "FindDynamicShadowStart, space_size = %p\n", space_size);
+ shadow_start = FindAvailableMemoryRange(space_size, alignment, granularity,
+ nullptr, nullptr);
+ if (shadow_start == 0) {
+ Report("Unable to find a memory range after restricting VM.\n");
+ CHECK(0 && "cannot place shadow after restricting vm");
+ }
+ }
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
+// No-op. Mac does not support static linkage anyway.
+void AsanCheckDynamicRTPrereqs() {}
+
+// No-op. Mac does not support static linkage anyway.
+void AsanCheckIncompatibleRT() {}
+
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
+ // Find the Mach-O header for the image containing the needle
+ Dl_info info;
+ int err = dladdr(needle, &info);
+ if (err == 0) return;
+
+#if __LP64__
+ const struct mach_header_64 *mh = (struct mach_header_64 *)info.dli_fbase;
+#else
+ const struct mach_header *mh = (struct mach_header *)info.dli_fbase;
+#endif
+
+ // Look up the __asan_globals section in that image and register its globals
+ unsigned long size = 0;
+ __asan_global *globals = (__asan_global *)getsectiondata(
+ mh,
+ "__DATA", "__asan_globals",
+ &size);
+
+ if (!globals) return;
+ if (size % sizeof(__asan_global) != 0) return;
+ op(globals, size / sizeof(__asan_global));
+}
+
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+ UNIMPLEMENTED();
+}
+
+// Support for the following functions from libdispatch on Mac OS:
+// dispatch_async_f()
+// dispatch_async()
+// dispatch_sync_f()
+// dispatch_sync()
+// dispatch_after_f()
+// dispatch_after()
+// dispatch_group_async_f()
+// dispatch_group_async()
+// TODO(glider): libdispatch API contains other functions that we don't support
+// yet.
+//
+// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
+// they can cause jobs to run on a thread different from the current one.
+// TODO(glider): if so, we need a test for this (otherwise we should remove
+// them).
+//
+// The following functions use dispatch_barrier_async_f() (which isn't a library
+// function but is exported) and are thus supported:
+// dispatch_source_set_cancel_handler_f()
+// dispatch_source_set_cancel_handler()
+// dispatch_source_set_event_handler_f()
+// dispatch_source_set_event_handler()
+//
+// The reference manual for Grand Central Dispatch is available at
+// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
+// The implementation details are at
+// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
+
+typedef void* dispatch_group_t;
+typedef void* dispatch_queue_t;
+typedef void* dispatch_source_t;
+typedef u64 dispatch_time_t;
+typedef void (*dispatch_function_t)(void *block);
+typedef void* (*worker_t)(void *block);
+
+// A wrapper for the ObjC blocks used to support libdispatch.
+typedef struct {
+ void *block;
+ dispatch_function_t func;
+ u32 parent_tid;
+} asan_block_context_t;
+
+ALWAYS_INLINE
+void asan_register_worker_thread(int parent_tid, StackTrace *stack) {
+ AsanThread *t = GetCurrentThread();
+ if (!t) {
+ t = AsanThread::Create(/* start_routine */ nullptr, /* arg */ nullptr,
+ parent_tid, stack, /* detached */ true);
+ t->Init();
+ asanThreadRegistry().StartThread(t->tid(), GetTid(), ThreadType::Worker,
+ nullptr);
+ SetCurrentThread(t);
+ }
+}
+
+// For use by only those functions that allocated the context via
+// alloc_asan_context().
+extern "C"
+void asan_dispatch_call_block_and_release(void *block) {
+ GET_STACK_TRACE_THREAD;
+ asan_block_context_t *context = (asan_block_context_t*)block;
+ VReport(2,
+ "asan_dispatch_call_block_and_release(): "
+ "context: %p, pthread_self: %p\n",
+ block, pthread_self());
+ asan_register_worker_thread(context->parent_tid, &stack);
+ // Call the original dispatcher for the block.
+ context->func(context->block);
+ asan_free(context, &stack, FROM_MALLOC);
+}
+
+} // namespace __asan
+
+using namespace __asan; // NOLINT
+
+// Wrap |ctxt| and |func| into an asan_block_context_t.
+// The caller retains control of the allocated context.
+extern "C"
+asan_block_context_t *alloc_asan_context(void *ctxt, dispatch_function_t func,
+ BufferedStackTrace *stack) {
+ asan_block_context_t *asan_ctxt =
+ (asan_block_context_t*) asan_malloc(sizeof(asan_block_context_t), stack);
+ asan_ctxt->block = ctxt;
+ asan_ctxt->func = func;
+ asan_ctxt->parent_tid = GetCurrentTidOrInvalid();
+ return asan_ctxt;
+}
+
+// Define interceptor for dispatch_*_f function with the three most common
+// parameters: dispatch_queue_t, context, dispatch_function_t.
+#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
+ INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
+ dispatch_function_t func) { \
+ GET_STACK_TRACE_THREAD; \
+ asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack); \
+ if (Verbosity() >= 2) { \
+ Report(#dispatch_x_f "(): context: %p, pthread_self: %p\n", \
+ asan_ctxt, pthread_self()); \
+ PRINT_CURRENT_STACK(); \
+ } \
+ return REAL(dispatch_x_f)(dq, (void*)asan_ctxt, \
+ asan_dispatch_call_block_and_release); \
+ }
+
+INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
+
+INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
+ dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_THREAD;
+ asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
+ if (Verbosity() >= 2) {
+ Report("dispatch_after_f: %p\n", asan_ctxt);
+ PRINT_CURRENT_STACK();
+ }
+ return REAL(dispatch_after_f)(when, dq, (void*)asan_ctxt,
+ asan_dispatch_call_block_and_release);
+}
+
+INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t dq, void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_THREAD;
+ asan_block_context_t *asan_ctxt = alloc_asan_context(ctxt, func, &stack);
+ if (Verbosity() >= 2) {
+ Report("dispatch_group_async_f(): context: %p, pthread_self: %p\n",
+ asan_ctxt, pthread_self());
+ PRINT_CURRENT_STACK();
+ }
+ REAL(dispatch_group_async_f)(group, dq, (void*)asan_ctxt,
+ asan_dispatch_call_block_and_release);
+}
+
+#if !defined(MISSING_BLOCKS_SUPPORT)
+extern "C" {
+void dispatch_async(dispatch_queue_t dq, void(^work)(void));
+void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
+ void(^work)(void));
+void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+ void(^work)(void));
+void dispatch_source_set_cancel_handler(dispatch_source_t ds,
+ void(^work)(void));
+void dispatch_source_set_event_handler(dispatch_source_t ds, void(^work)(void));
+}
+
+#define GET_ASAN_BLOCK(work) \
+ void (^asan_block)(void); \
+ int parent_tid = GetCurrentTidOrInvalid(); \
+ asan_block = ^(void) { \
+ GET_STACK_TRACE_THREAD; \
+ asan_register_worker_thread(parent_tid, &stack); \
+ work(); \
+ }
+
+INTERCEPTOR(void, dispatch_async,
+ dispatch_queue_t dq, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
+ GET_ASAN_BLOCK(work);
+ REAL(dispatch_async)(dq, asan_block);
+}
+
+INTERCEPTOR(void, dispatch_group_async,
+ dispatch_group_t dg, dispatch_queue_t dq, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
+ GET_ASAN_BLOCK(work);
+ REAL(dispatch_group_async)(dg, dq, asan_block);
+}
+
+INTERCEPTOR(void, dispatch_after,
+ dispatch_time_t when, dispatch_queue_t queue, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
+ GET_ASAN_BLOCK(work);
+ REAL(dispatch_after)(when, queue, asan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_cancel_handler,
+ dispatch_source_t ds, void(^work)(void)) {
+ if (!work) {
+ REAL(dispatch_source_set_cancel_handler)(ds, work);
+ return;
+ }
+ ENABLE_FRAME_POINTER;
+ GET_ASAN_BLOCK(work);
+ REAL(dispatch_source_set_cancel_handler)(ds, asan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_event_handler,
+ dispatch_source_t ds, void(^work)(void)) {
+ ENABLE_FRAME_POINTER;
+ GET_ASAN_BLOCK(work);
+ REAL(dispatch_source_set_event_handler)(ds, asan_block);
+}
+#endif
+
+#endif // SANITIZER_MAC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_linux.cc (revision 351984)
@@ -0,0 +1,307 @@
+//===-- asan_malloc_linux.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Linux-specific malloc interception.
+// We simply define functions like malloc, free, realloc, etc.
+// They will replace the corresponding libc functions automagically.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX || \
+ SANITIZER_NETBSD || SANITIZER_RTEMS || SANITIZER_SOLARIS
+
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "asan_allocator.h"
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_malloc_local.h"
+#include "asan_stack.h"
+
+// ---------------------- Replacement functions ---------------- {{{1
+using namespace __asan; // NOLINT
+
+static uptr allocated_for_dlsym;
+static uptr last_dlsym_alloc_size_in_words;
+static const uptr kDlsymAllocPoolSize = SANITIZER_RTEMS ? 4096 : 1024;
+static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
+
+static INLINE bool IsInDlsymAllocPool(const void *ptr) {
+ uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ return off < allocated_for_dlsym * sizeof(alloc_memory_for_dlsym[0]);
+}
+
+static void *AllocateFromLocalPool(uptr size_in_bytes) {
+ uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
+ void *mem = (void*)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ last_dlsym_alloc_size_in_words = size_in_words;
+ allocated_for_dlsym += size_in_words;
+ CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
+ return mem;
+}
+
+static void DeallocateFromLocalPool(const void *ptr) {
+ // Hack: since glibc 2.27 dlsym no longer uses stack-allocated memory to store
+ // error messages and instead uses malloc followed by free. To avoid pool
+ // exhaustion due to long object filenames, handle that special case here.
+ uptr prev_offset = allocated_for_dlsym - last_dlsym_alloc_size_in_words;
+ void *prev_mem = (void*)&alloc_memory_for_dlsym[prev_offset];
+ if (prev_mem == ptr) {
+ REAL(memset)(prev_mem, 0, last_dlsym_alloc_size_in_words * kWordSize);
+ allocated_for_dlsym = prev_offset;
+ last_dlsym_alloc_size_in_words = 0;
+ }
+}
+
+static int PosixMemalignFromLocalPool(void **memptr, uptr alignment,
+ uptr size_in_bytes) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment)))
+ return errno_EINVAL;
+
+ CHECK(alignment >= kWordSize);
+
+ uptr addr = (uptr)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ uptr aligned_addr = RoundUpTo(addr, alignment);
+ uptr aligned_size = RoundUpTo(size_in_bytes, kWordSize);
+
+ uptr *end_mem = (uptr*)(aligned_addr + aligned_size);
+ uptr allocated = end_mem - alloc_memory_for_dlsym;
+ if (allocated >= kDlsymAllocPoolSize)
+ return errno_ENOMEM;
+
+ allocated_for_dlsym = allocated;
+ *memptr = (void*)aligned_addr;
+ return 0;
+}
+
+#if SANITIZER_RTEMS
+void* MemalignFromLocalPool(uptr alignment, uptr size) {
+ void *ptr = nullptr;
+ alignment = Max(alignment, kWordSize);
+ PosixMemalignFromLocalPool(&ptr, alignment, size);
+ return ptr;
+}
+
+bool IsFromLocalPool(const void *ptr) {
+ return IsInDlsymAllocPool(ptr);
+}
+#endif
+
+static INLINE bool MaybeInDlsym() {
+ // Fuchsia doesn't use dlsym-based interceptors.
+ return !SANITIZER_FUCHSIA && asan_init_is_running;
+}
+
+static INLINE bool UseLocalPool() {
+ return EarlyMalloc() || MaybeInDlsym();
+}
+
+static void *ReallocFromLocalPool(void *ptr, uptr size) {
+ const uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ const uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
+ void *new_ptr;
+ if (UNLIKELY(UseLocalPool())) {
+ new_ptr = AllocateFromLocalPool(size);
+ } else {
+ ENSURE_ASAN_INITED();
+ GET_STACK_TRACE_MALLOC;
+ new_ptr = asan_malloc(size, &stack);
+ }
+ internal_memcpy(new_ptr, ptr, copy_size);
+ return new_ptr;
+}
+
+INTERCEPTOR(void, free, void *ptr) {
+ GET_STACK_TRACE_FREE;
+ if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
+ DeallocateFromLocalPool(ptr);
+ return;
+ }
+ asan_free(ptr, &stack, FROM_MALLOC);
+}
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR(void, cfree, void *ptr) {
+ GET_STACK_TRACE_FREE;
+ if (UNLIKELY(IsInDlsymAllocPool(ptr)))
+ return;
+ asan_free(ptr, &stack, FROM_MALLOC);
+}
+#endif // SANITIZER_INTERCEPT_CFREE
+
+INTERCEPTOR(void*, malloc, uptr size) {
+ if (UNLIKELY(UseLocalPool()))
+ // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(size);
+ ENSURE_ASAN_INITED();
+ GET_STACK_TRACE_MALLOC;
+ return asan_malloc(size, &stack);
+}
+
+INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
+ if (UNLIKELY(UseLocalPool()))
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(nmemb * size);
+ ENSURE_ASAN_INITED();
+ GET_STACK_TRACE_MALLOC;
+ return asan_calloc(nmemb, size, &stack);
+}
+
+INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
+ if (UNLIKELY(IsInDlsymAllocPool(ptr)))
+ return ReallocFromLocalPool(ptr, size);
+ if (UNLIKELY(UseLocalPool()))
+ return AllocateFromLocalPool(size);
+ ENSURE_ASAN_INITED();
+ GET_STACK_TRACE_MALLOC;
+ return asan_realloc(ptr, size, &stack);
+}
+
+#if SANITIZER_INTERCEPT_REALLOCARRAY
+INTERCEPTOR(void*, reallocarray, void *ptr, uptr nmemb, uptr size) {
+ ENSURE_ASAN_INITED();
+ GET_STACK_TRACE_MALLOC;
+ return asan_reallocarray(ptr, nmemb, size, &stack);
+}
+#endif // SANITIZER_INTERCEPT_REALLOCARRAY
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR(void*, memalign, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_memalign(boundary, size, &stack, FROM_MALLOC);
+}
+
+INTERCEPTOR(void*, __libc_memalign, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ void *res = asan_memalign(boundary, size, &stack, FROM_MALLOC);
+ DTLS_on_libc_memalign(res, size);
+ return res;
+}
+#endif // SANITIZER_INTERCEPT_MEMALIGN
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR(void*, aligned_alloc, uptr boundary, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_aligned_alloc(boundary, size, &stack);
+}
+#endif // SANITIZER_INTERCEPT_ALIGNED_ALLOC
+
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+ GET_CURRENT_PC_BP_SP;
+ (void)sp;
+ return asan_malloc_usable_size(ptr, pc, bp);
+}
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+// We avoid including malloc.h for portability reasons.
+// man mallinfo says the fields are "long", but the implementation uses int.
+// It doesn't matter much -- we just need to make sure that the libc's mallinfo
+// is not called.
+struct fake_mallinfo {
+ int x[10];
+};
+
+INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
+ struct fake_mallinfo res;
+ REAL(memset)(&res, 0, sizeof(res));
+ return res;
+}
+
+INTERCEPTOR(int, mallopt, int cmd, int value) {
+ return 0;
+}
+#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+ if (UNLIKELY(UseLocalPool()))
+ return PosixMemalignFromLocalPool(memptr, alignment, size);
+ GET_STACK_TRACE_MALLOC;
+ return asan_posix_memalign(memptr, alignment, size, &stack);
+}
+
+INTERCEPTOR(void*, valloc, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_valloc(size, &stack);
+}
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR(void*, pvalloc, uptr size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_pvalloc(size, &stack);
+}
+#endif // SANITIZER_INTERCEPT_PVALLOC
+
+INTERCEPTOR(void, malloc_stats, void) {
+ __asan_print_accumulated_stats();
+}
+
+#if SANITIZER_ANDROID
+// Format of __libc_malloc_dispatch has changed in Android L.
+// While we are moving towards a solution that does not depend on bionic
+// internals, here is something to support both K* and L releases.
+struct MallocDebugK {
+ void *(*malloc)(uptr bytes);
+ void (*free)(void *mem);
+ void *(*calloc)(uptr n_elements, uptr elem_size);
+ void *(*realloc)(void *oldMem, uptr bytes);
+ void *(*memalign)(uptr alignment, uptr bytes);
+ uptr (*malloc_usable_size)(void *mem);
+};
+
+struct MallocDebugL {
+ void *(*calloc)(uptr n_elements, uptr elem_size);
+ void (*free)(void *mem);
+ fake_mallinfo (*mallinfo)(void);
+ void *(*malloc)(uptr bytes);
+ uptr (*malloc_usable_size)(void *mem);
+ void *(*memalign)(uptr alignment, uptr bytes);
+ int (*posix_memalign)(void **memptr, uptr alignment, uptr size);
+ void* (*pvalloc)(uptr size);
+ void *(*realloc)(void *oldMem, uptr bytes);
+ void* (*valloc)(uptr size);
+};
+
+ALIGNED(32) const MallocDebugK asan_malloc_dispatch_k = {
+ WRAP(malloc), WRAP(free), WRAP(calloc),
+ WRAP(realloc), WRAP(memalign), WRAP(malloc_usable_size)};
+
+ALIGNED(32) const MallocDebugL asan_malloc_dispatch_l = {
+ WRAP(calloc), WRAP(free), WRAP(mallinfo),
+ WRAP(malloc), WRAP(malloc_usable_size), WRAP(memalign),
+ WRAP(posix_memalign), WRAP(pvalloc), WRAP(realloc),
+ WRAP(valloc)};
+
+namespace __asan {
+void ReplaceSystemMalloc() {
+ void **__libc_malloc_dispatch_p =
+ (void **)AsanDlSymNext("__libc_malloc_dispatch");
+ if (__libc_malloc_dispatch_p) {
+ // Decide on K vs L dispatch format by the presence of
+ // __libc_malloc_default_dispatch export in libc.
+ void *default_dispatch_p = AsanDlSymNext("__libc_malloc_default_dispatch");
+ if (default_dispatch_p)
+ *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_k;
+ else
+ *__libc_malloc_dispatch_p = (void *)&asan_malloc_dispatch_l;
+ }
+}
+} // namespace __asan
+
+#else // SANITIZER_ANDROID
+
+namespace __asan {
+void ReplaceSystemMalloc() {
+}
+} // namespace __asan
+#endif // SANITIZER_ANDROID
+
+#endif // SANITIZER_FREEBSD || SANITIZER_FUCHSIA || SANITIZER_LINUX ||
+ // SANITIZER_NETBSD || SANITIZER_SOLARIS
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_local.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_local.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_local.h (revision 351984)
@@ -0,0 +1,52 @@
+//===-- asan_malloc_local.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Provide interfaces to check for and handle local pool memory allocation.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_MALLOC_LOCAL_H
+#define ASAN_MALLOC_LOCAL_H
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "asan_internal.h"
+
+static INLINE bool EarlyMalloc() {
+ return SANITIZER_RTEMS &&
+ (!__asan::asan_inited || __asan::asan_init_is_running);
+}
+
+#if SANITIZER_RTEMS
+
+bool IsFromLocalPool(const void *ptr);
+void *MemalignFromLocalPool(uptr alignment, uptr size);
+
+// On RTEMS, we use the local pool to handle memory allocation when the ASan
+// run-time is not up. This macro is expanded in the context of the operator new
+// implementation.
+#define MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow) \
+ do { \
+ if (UNLIKELY(EarlyMalloc())) { \
+ void *res = MemalignFromLocalPool(SHADOW_GRANULARITY, size); \
+ if (!nothrow) \
+ CHECK(res); \
+ return res; \
+ } \
+ } while (0)
+
+#define IS_FROM_LOCAL_POOL(ptr) UNLIKELY(IsFromLocalPool(ptr))
+
+#else // SANITIZER_RTEMS
+
+#define MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow)
+#define IS_FROM_LOCAL_POOL(ptr) 0
+
+#endif // SANITIZER_RTEMS
+
+#endif // ASAN_MALLOC_LOCAL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_local.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_mac.cc (revision 351984)
@@ -0,0 +1,102 @@
+//===-- asan_malloc_mac.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "asan_interceptors.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_stats.h"
+#include "lsan/lsan_common.h"
+
+using namespace __asan;
+#define COMMON_MALLOC_ZONE_NAME "asan"
+#define COMMON_MALLOC_ENTER() ENSURE_ASAN_INITED()
+#define COMMON_MALLOC_SANITIZER_INITIALIZED asan_inited
+#define COMMON_MALLOC_FORCE_LOCK() asan_mz_force_lock()
+#define COMMON_MALLOC_FORCE_UNLOCK() asan_mz_force_unlock()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_memalign(alignment, size, &stack, FROM_MALLOC)
+#define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_malloc(size, &stack)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_realloc(ptr, size, &stack);
+#define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_calloc(count, size, &stack);
+#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ int res = asan_posix_memalign(memptr, alignment, size, &stack);
+#define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = asan_memalign(GetPageSizeCached(), size, &stack, FROM_MALLOC);
+#define COMMON_MALLOC_FREE(ptr) \
+ GET_STACK_TRACE_FREE; \
+ asan_free(ptr, &stack, FROM_MALLOC);
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = asan_mz_size(ptr);
+#define COMMON_MALLOC_FILL_STATS(zone, stats) \
+ AsanMallocStats malloc_stats; \
+ FillMallocStatistics(&malloc_stats); \
+ CHECK(sizeof(malloc_statistics_t) == sizeof(AsanMallocStats)); \
+ internal_memcpy(stats, &malloc_stats, sizeof(malloc_statistics_t));
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ GET_STACK_TRACE_FREE; \
+ ReportMacMzReallocUnknown((uptr)ptr, (uptr)zone_ptr, zone_name, &stack);
+#define COMMON_MALLOC_NAMESPACE __asan
+#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 1
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+namespace COMMON_MALLOC_NAMESPACE {
+
+bool HandleDlopenInit() {
+ static_assert(SANITIZER_SUPPORTS_INIT_FOR_DLOPEN,
+ "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be true");
+ // We have no reliable way of knowing how we are being loaded
+ // so make it a requirement on Apple platforms to set this environment
+ // variable to indicate that we want to perform initialization via
+ // dlopen().
+ auto init_str = GetEnv("APPLE_ASAN_INIT_FOR_DLOPEN");
+ if (!init_str)
+ return false;
+ if (internal_strncmp(init_str, "1", 1) != 0)
+ return false;
+ // When we are loaded via `dlopen()` path we still initialize the malloc zone
+ // so Symbolication clients (e.g. `leaks`) that load the ASan allocator can
+ // find an initialized malloc zone.
+ InitMallocZoneFields();
+ return true;
+}
+} // namespace COMMON_MALLOC_NAMESPACE
+
+namespace {
+
+void mi_extra_init(sanitizer_malloc_introspection_t *mi) {
+ uptr last_byte_plus_one = 0;
+ mi->allocator_ptr = 0;
+ // Range is [begin_ptr, end_ptr)
+ __lsan::GetAllocatorGlobalRange(&(mi->allocator_ptr), &last_byte_plus_one);
+ CHECK_NE(mi->allocator_ptr, 0);
+ CHECK_GT(last_byte_plus_one, mi->allocator_ptr);
+ mi->allocator_size = last_byte_plus_one - (mi->allocator_ptr);
+ CHECK_GT(mi->allocator_size, 0);
+}
+} // namespace
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_win.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_win.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_malloc_win.cc (revision 351984)
@@ -0,0 +1,553 @@
+//===-- asan_malloc_win.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Windows-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+#include "asan_allocator.h"
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_stack.h"
+#include "interception/interception.h"
+#include <stddef.h>
+
+// Intentionally not including windows.h here, to avoid the risk of
+// pulling in conflicting declarations of these functions. (With mingw-w64,
+// there's a risk of windows.h pulling in stdint.h.)
+typedef int BOOL;
+typedef void *HANDLE;
+typedef const void *LPCVOID;
+typedef void *LPVOID;
+
+typedef unsigned long DWORD;
+constexpr unsigned long HEAP_ZERO_MEMORY = 0x00000008;
+constexpr unsigned long HEAP_REALLOC_IN_PLACE_ONLY = 0x00000010;
+constexpr unsigned long HEAP_ALLOCATE_SUPPORTED_FLAGS = (HEAP_ZERO_MEMORY);
+constexpr unsigned long HEAP_ALLOCATE_UNSUPPORTED_FLAGS =
+ (~HEAP_ALLOCATE_SUPPORTED_FLAGS);
+constexpr unsigned long HEAP_FREE_SUPPORTED_FLAGS = (0);
+constexpr unsigned long HEAP_FREE_UNSUPPORTED_FLAGS =
+ (~HEAP_ALLOCATE_SUPPORTED_FLAGS);
+constexpr unsigned long HEAP_REALLOC_SUPPORTED_FLAGS =
+ (HEAP_REALLOC_IN_PLACE_ONLY | HEAP_ZERO_MEMORY);
+constexpr unsigned long HEAP_REALLOC_UNSUPPORTED_FLAGS =
+ (~HEAP_ALLOCATE_SUPPORTED_FLAGS);
+
+
+extern "C" {
+LPVOID WINAPI HeapAlloc(HANDLE hHeap, DWORD dwFlags, size_t dwBytes);
+LPVOID WINAPI HeapReAlloc(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem,
+ size_t dwBytes);
+BOOL WINAPI HeapFree(HANDLE hHeap, DWORD dwFlags, LPVOID lpMem);
+size_t WINAPI HeapSize(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem);
+
+BOOL WINAPI HeapValidate(HANDLE hHeap, DWORD dwFlags, LPCVOID lpMem);
+}
+
+using namespace __asan; // NOLINT
+
+// MT: Simply defining functions with the same signature in *.obj
+// files overrides the standard functions in the CRT.
+// MD: Memory allocation functions are defined in the CRT .dll,
+// so we have to intercept them before they are called for the first time.
+
+#if ASAN_DYNAMIC
+# define ALLOCATION_FUNCTION_ATTRIBUTE
+#else
+# define ALLOCATION_FUNCTION_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+#endif
+
+extern "C" {
+ALLOCATION_FUNCTION_ATTRIBUTE
+size_t _msize(void *ptr) {
+ GET_CURRENT_PC_BP_SP;
+ (void)sp;
+ return asan_malloc_usable_size(ptr, pc, bp);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+size_t _msize_base(void *ptr) {
+ return _msize(ptr);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void free(void *ptr) {
+ GET_STACK_TRACE_FREE;
+ return asan_free(ptr, &stack, FROM_MALLOC);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void _free_dbg(void *ptr, int) {
+ free(ptr);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void _free_base(void *ptr) {
+ free(ptr);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *malloc(size_t size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_malloc(size, &stack);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_malloc_base(size_t size) {
+ return malloc(size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_malloc_dbg(size_t size, int, const char *, int) {
+ return malloc(size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *calloc(size_t nmemb, size_t size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_calloc(nmemb, size, &stack);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_calloc_base(size_t nmemb, size_t size) {
+ return calloc(nmemb, size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_calloc_dbg(size_t nmemb, size_t size, int, const char *, int) {
+ return calloc(nmemb, size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_calloc_impl(size_t nmemb, size_t size, int *errno_tmp) {
+ return calloc(nmemb, size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *realloc(void *ptr, size_t size) {
+ GET_STACK_TRACE_MALLOC;
+ return asan_realloc(ptr, size, &stack);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_realloc_dbg(void *ptr, size_t size, int) {
+ UNREACHABLE("_realloc_dbg should not exist!");
+ return 0;
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_realloc_base(void *ptr, size_t size) {
+ return realloc(ptr, size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_recalloc(void *p, size_t n, size_t elem_size) {
+ if (!p)
+ return calloc(n, elem_size);
+ const size_t size = n * elem_size;
+ if (elem_size != 0 && size / elem_size != n)
+ return 0;
+
+ size_t old_size = _msize(p);
+ void *new_alloc = malloc(size);
+ if (new_alloc) {
+ REAL(memcpy)(new_alloc, p, Min<size_t>(size, old_size));
+ if (old_size < size)
+ REAL(memset)(((u8 *)new_alloc) + old_size, 0, size - old_size);
+ free(p);
+ }
+ return new_alloc;
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_recalloc_base(void *p, size_t n, size_t elem_size) {
+ return _recalloc(p, n, elem_size);
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_expand(void *memblock, size_t size) {
+ // _expand is used in realloc-like functions to resize the buffer if possible.
+ // We don't want memory to stand still while resizing buffers, so return 0.
+ return 0;
+}
+
+ALLOCATION_FUNCTION_ATTRIBUTE
+void *_expand_dbg(void *memblock, size_t size) {
+ return _expand(memblock, size);
+}
+
+// TODO(timurrrr): Might want to add support for _aligned_* allocation
+// functions to detect a bit more bugs. Those functions seem to wrap malloc().
+
+int _CrtDbgReport(int, const char*, int,
+ const char*, const char*, ...) {
+ ShowStatsAndAbort();
+}
+
+int _CrtDbgReportW(int reportType, const wchar_t*, int,
+ const wchar_t*, const wchar_t*, ...) {
+ ShowStatsAndAbort();
+}
+
+int _CrtSetReportMode(int, int) {
+ return 0;
+}
+} // extern "C"
+
+#define OWNED_BY_RTL(heap, memory) \
+ (!__sanitizer_get_ownership(memory) && HeapValidate(heap, 0, memory))
+
+INTERCEPTOR_WINAPI(size_t, HeapSize, HANDLE hHeap, DWORD dwFlags,
+ LPCVOID lpMem) {
+ // If the RTL allocators are hooked we need to check whether the ASAN
+ // allocator owns the pointer we're about to use. Allocations occur before
+ // interception takes place, so if it is not owned by the RTL heap we can
+ // pass it to the ASAN heap for inspection.
+ if (flags()->windows_hook_rtl_allocators) {
+ if (!asan_inited || OWNED_BY_RTL(hHeap, lpMem))
+ return REAL(HeapSize)(hHeap, dwFlags, lpMem);
+ } else {
+ CHECK(dwFlags == 0 && "unsupported heap flags");
+ }
+ GET_CURRENT_PC_BP_SP;
+ (void)sp;
+ return asan_malloc_usable_size(lpMem, pc, bp);
+}
+
+INTERCEPTOR_WINAPI(LPVOID, HeapAlloc, HANDLE hHeap, DWORD dwFlags,
+ size_t dwBytes) {
+ // If the ASAN runtime is not initialized, or we encounter an unsupported
+ // flag, fall back to the original allocator.
+ if (flags()->windows_hook_rtl_allocators) {
+ if (UNLIKELY(!asan_inited ||
+ (dwFlags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) {
+ return REAL(HeapAlloc)(hHeap, dwFlags, dwBytes);
+ }
+ } else {
+ // In the case that we don't hook the rtl allocators,
+ // this becomes an assert since there is no failover to the original
+ // allocator.
+ CHECK((HEAP_ALLOCATE_UNSUPPORTED_FLAGS & dwFlags) != 0 &&
+ "unsupported flags");
+ }
+ GET_STACK_TRACE_MALLOC;
+ void *p = asan_malloc(dwBytes, &stack);
+ // Reading MSDN suggests that the *entire* usable allocation is zeroed out.
+ // Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY.
+ // https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083
+ if (p && (dwFlags & HEAP_ZERO_MEMORY)) {
+ GET_CURRENT_PC_BP_SP;
+ (void)sp;
+ auto usable_size = asan_malloc_usable_size(p, pc, bp);
+ internal_memset(p, 0, usable_size);
+ }
+ return p;
+}
+
+INTERCEPTOR_WINAPI(BOOL, HeapFree, HANDLE hHeap, DWORD dwFlags, LPVOID lpMem) {
+ // Heap allocations happen before this function is hooked, so we must fall
+ // back to the original function if the pointer is not from the ASAN heap,
+ // or unsupported flags are provided.
+ if (flags()->windows_hook_rtl_allocators) {
+ if (OWNED_BY_RTL(hHeap, lpMem))
+ return REAL(HeapFree)(hHeap, dwFlags, lpMem);
+ } else {
+ CHECK((HEAP_FREE_UNSUPPORTED_FLAGS & dwFlags) != 0 && "unsupported flags");
+ }
+ GET_STACK_TRACE_FREE;
+ asan_free(lpMem, &stack, FROM_MALLOC);
+ return true;
+}
+
+namespace __asan {
+using AllocFunction = LPVOID(WINAPI *)(HANDLE, DWORD, size_t);
+using ReAllocFunction = LPVOID(WINAPI *)(HANDLE, DWORD, LPVOID, size_t);
+using SizeFunction = size_t(WINAPI *)(HANDLE, DWORD, LPVOID);
+using FreeFunction = BOOL(WINAPI *)(HANDLE, DWORD, LPVOID);
+
+void *SharedReAlloc(ReAllocFunction reallocFunc, SizeFunction heapSizeFunc,
+ FreeFunction freeFunc, AllocFunction allocFunc,
+ HANDLE hHeap, DWORD dwFlags, LPVOID lpMem, size_t dwBytes) {
+ CHECK(reallocFunc && heapSizeFunc && freeFunc && allocFunc);
+ GET_STACK_TRACE_MALLOC;
+ GET_CURRENT_PC_BP_SP;
+ (void)sp;
+ if (flags()->windows_hook_rtl_allocators) {
+ enum AllocationOwnership { NEITHER = 0, ASAN = 1, RTL = 2 };
+ AllocationOwnership ownershipState;
+ bool owned_rtlalloc = false;
+ bool owned_asan = __sanitizer_get_ownership(lpMem);
+
+ if (!owned_asan)
+ owned_rtlalloc = HeapValidate(hHeap, 0, lpMem);
+
+ if (owned_asan && !owned_rtlalloc)
+ ownershipState = ASAN;
+ else if (!owned_asan && owned_rtlalloc)
+ ownershipState = RTL;
+ else if (!owned_asan && !owned_rtlalloc)
+ ownershipState = NEITHER;
+
+ // If this heap block which was allocated before the ASAN
+ // runtime came up, use the real HeapFree function.
+ if (UNLIKELY(!asan_inited)) {
+ return reallocFunc(hHeap, dwFlags, lpMem, dwBytes);
+ }
+ bool only_asan_supported_flags =
+ (HEAP_REALLOC_UNSUPPORTED_FLAGS & dwFlags) == 0;
+
+ if (ownershipState == RTL ||
+ (ownershipState == NEITHER && !only_asan_supported_flags)) {
+ if (only_asan_supported_flags) {
+ // if this is a conversion to ASAN upported flags, transfer this
+ // allocation to the ASAN allocator
+ void *replacement_alloc;
+ if (dwFlags & HEAP_ZERO_MEMORY)
+ replacement_alloc = asan_calloc(1, dwBytes, &stack);
+ else
+ replacement_alloc = asan_malloc(dwBytes, &stack);
+ if (replacement_alloc) {
+ size_t old_size = heapSizeFunc(hHeap, dwFlags, lpMem);
+ if (old_size == ((size_t)0) - 1) {
+ asan_free(replacement_alloc, &stack, FROM_MALLOC);
+ return nullptr;
+ }
+ REAL(memcpy)(replacement_alloc, lpMem, old_size);
+ freeFunc(hHeap, dwFlags, lpMem);
+ }
+ return replacement_alloc;
+ } else {
+ // owned by rtl or neither with unsupported ASAN flags,
+ // just pass back to original allocator
+ CHECK(ownershipState == RTL || ownershipState == NEITHER);
+ CHECK(!only_asan_supported_flags);
+ return reallocFunc(hHeap, dwFlags, lpMem, dwBytes);
+ }
+ }
+
+ if (ownershipState == ASAN && !only_asan_supported_flags) {
+ // Conversion to unsupported flags allocation,
+ // transfer this allocation back to the original allocator.
+ void *replacement_alloc = allocFunc(hHeap, dwFlags, dwBytes);
+ size_t old_usable_size = 0;
+ if (replacement_alloc) {
+ old_usable_size = asan_malloc_usable_size(lpMem, pc, bp);
+ REAL(memcpy)(replacement_alloc, lpMem,
+ Min<size_t>(dwBytes, old_usable_size));
+ asan_free(lpMem, &stack, FROM_MALLOC);
+ }
+ return replacement_alloc;
+ }
+
+ CHECK((ownershipState == ASAN || ownershipState == NEITHER) &&
+ only_asan_supported_flags);
+ // At this point we should either be ASAN owned with ASAN supported flags
+ // or we owned by neither and have supported flags.
+ // Pass through even when it's neither since this could be a null realloc or
+ // UAF that ASAN needs to catch.
+ } else {
+ CHECK((HEAP_REALLOC_UNSUPPORTED_FLAGS & dwFlags) != 0 &&
+ "unsupported flags");
+ }
+ // asan_realloc will never reallocate in place, so for now this flag is
+ // unsupported until we figure out a way to fake this.
+ if (dwFlags & HEAP_REALLOC_IN_PLACE_ONLY)
+ return nullptr;
+
+ // HeapReAlloc and HeapAlloc both happily accept 0 sized allocations.
+ // passing a 0 size into asan_realloc will free the allocation.
+ // To avoid this and keep behavior consistent, fudge the size if 0.
+ // (asan_malloc already does this)
+ if (dwBytes == 0)
+ dwBytes = 1;
+
+ size_t old_size;
+ if (dwFlags & HEAP_ZERO_MEMORY)
+ old_size = asan_malloc_usable_size(lpMem, pc, bp);
+
+ void *ptr = asan_realloc(lpMem, dwBytes, &stack);
+ if (ptr == nullptr)
+ return nullptr;
+
+ if (dwFlags & HEAP_ZERO_MEMORY) {
+ size_t new_size = asan_malloc_usable_size(ptr, pc, bp);
+ if (old_size < new_size)
+ REAL(memset)(((u8 *)ptr) + old_size, 0, new_size - old_size);
+ }
+
+ return ptr;
+}
+} // namespace __asan
+
+INTERCEPTOR_WINAPI(LPVOID, HeapReAlloc, HANDLE hHeap, DWORD dwFlags,
+ LPVOID lpMem, size_t dwBytes) {
+ return SharedReAlloc(REAL(HeapReAlloc), (SizeFunction)REAL(HeapSize),
+ REAL(HeapFree), REAL(HeapAlloc), hHeap, dwFlags, lpMem,
+ dwBytes);
+}
+
+// The following functions are undocumented and subject to change.
+// However, hooking them is necessary to hook Windows heap
+// allocations with detours and their definitions are unlikely to change.
+// Comments in /minkernel/ntos/rtl/heappublic.c indicate that these functions
+// are part of the heap's public interface.
+typedef unsigned long LOGICAL;
+
+// This function is documented as part of the Driver Development Kit but *not*
+// the Windows Development Kit.
+LOGICAL RtlFreeHeap(void* HeapHandle, DWORD Flags,
+ void* BaseAddress);
+
+// This function is documented as part of the Driver Development Kit but *not*
+// the Windows Development Kit.
+void* RtlAllocateHeap(void* HeapHandle, DWORD Flags, size_t Size);
+
+// This function is completely undocumented.
+void*
+RtlReAllocateHeap(void* HeapHandle, DWORD Flags, void* BaseAddress,
+ size_t Size);
+
+// This function is completely undocumented.
+size_t RtlSizeHeap(void* HeapHandle, DWORD Flags, void* BaseAddress);
+
+INTERCEPTOR_WINAPI(size_t, RtlSizeHeap, HANDLE HeapHandle, DWORD Flags,
+ void* BaseAddress) {
+ if (!flags()->windows_hook_rtl_allocators ||
+ UNLIKELY(!asan_inited || OWNED_BY_RTL(HeapHandle, BaseAddress))) {
+ return REAL(RtlSizeHeap)(HeapHandle, Flags, BaseAddress);
+ }
+ GET_CURRENT_PC_BP_SP;
+ (void)sp;
+ return asan_malloc_usable_size(BaseAddress, pc, bp);
+}
+
+INTERCEPTOR_WINAPI(BOOL, RtlFreeHeap, HANDLE HeapHandle, DWORD Flags,
+ void* BaseAddress) {
+ // Heap allocations happen before this function is hooked, so we must fall
+ // back to the original function if the pointer is not from the ASAN heap, or
+ // unsupported flags are provided.
+ if (!flags()->windows_hook_rtl_allocators ||
+ UNLIKELY((HEAP_FREE_UNSUPPORTED_FLAGS & Flags) != 0 ||
+ OWNED_BY_RTL(HeapHandle, BaseAddress))) {
+ return REAL(RtlFreeHeap)(HeapHandle, Flags, BaseAddress);
+ }
+ GET_STACK_TRACE_FREE;
+ asan_free(BaseAddress, &stack, FROM_MALLOC);
+ return true;
+}
+
+INTERCEPTOR_WINAPI(void*, RtlAllocateHeap, HANDLE HeapHandle, DWORD Flags,
+ size_t Size) {
+ // If the ASAN runtime is not initialized, or we encounter an unsupported
+ // flag, fall back to the original allocator.
+ if (!flags()->windows_hook_rtl_allocators ||
+ UNLIKELY(!asan_inited ||
+ (Flags & HEAP_ALLOCATE_UNSUPPORTED_FLAGS) != 0)) {
+ return REAL(RtlAllocateHeap)(HeapHandle, Flags, Size);
+ }
+ GET_STACK_TRACE_MALLOC;
+ void *p;
+ // Reading MSDN suggests that the *entire* usable allocation is zeroed out.
+ // Otherwise it is difficult to HeapReAlloc with HEAP_ZERO_MEMORY.
+ // https://blogs.msdn.microsoft.com/oldnewthing/20120316-00/?p=8083
+ if (Flags & HEAP_ZERO_MEMORY) {
+ p = asan_calloc(Size, 1, &stack);
+ } else {
+ p = asan_malloc(Size, &stack);
+ }
+ return p;
+}
+
+INTERCEPTOR_WINAPI(void*, RtlReAllocateHeap, HANDLE HeapHandle, DWORD Flags,
+ void* BaseAddress, size_t Size) {
+ // If it's actually a heap block which was allocated before the ASAN runtime
+ // came up, use the real RtlFreeHeap function.
+ if (!flags()->windows_hook_rtl_allocators)
+ return REAL(RtlReAllocateHeap)(HeapHandle, Flags, BaseAddress, Size);
+
+ return SharedReAlloc(REAL(RtlReAllocateHeap), REAL(RtlSizeHeap),
+ REAL(RtlFreeHeap), REAL(RtlAllocateHeap), HeapHandle,
+ Flags, BaseAddress, Size);
+}
+
+namespace __asan {
+
+static void TryToOverrideFunction(const char *fname, uptr new_func) {
+ // Failure here is not fatal. The CRT may not be present, and different CRT
+ // versions use different symbols.
+ if (!__interception::OverrideFunction(fname, new_func))
+ VPrintf(2, "Failed to override function %s\n", fname);
+}
+
+void ReplaceSystemMalloc() {
+#if defined(ASAN_DYNAMIC)
+ TryToOverrideFunction("free", (uptr)free);
+ TryToOverrideFunction("_free_base", (uptr)free);
+ TryToOverrideFunction("malloc", (uptr)malloc);
+ TryToOverrideFunction("_malloc_base", (uptr)malloc);
+ TryToOverrideFunction("_malloc_crt", (uptr)malloc);
+ TryToOverrideFunction("calloc", (uptr)calloc);
+ TryToOverrideFunction("_calloc_base", (uptr)calloc);
+ TryToOverrideFunction("_calloc_crt", (uptr)calloc);
+ TryToOverrideFunction("realloc", (uptr)realloc);
+ TryToOverrideFunction("_realloc_base", (uptr)realloc);
+ TryToOverrideFunction("_realloc_crt", (uptr)realloc);
+ TryToOverrideFunction("_recalloc", (uptr)_recalloc);
+ TryToOverrideFunction("_recalloc_base", (uptr)_recalloc);
+ TryToOverrideFunction("_recalloc_crt", (uptr)_recalloc);
+ TryToOverrideFunction("_msize", (uptr)_msize);
+ TryToOverrideFunction("_msize_base", (uptr)_msize);
+ TryToOverrideFunction("_expand", (uptr)_expand);
+ TryToOverrideFunction("_expand_base", (uptr)_expand);
+
+ if (flags()->windows_hook_rtl_allocators) {
+ INTERCEPT_FUNCTION(HeapSize);
+ INTERCEPT_FUNCTION(HeapFree);
+ INTERCEPT_FUNCTION(HeapReAlloc);
+ INTERCEPT_FUNCTION(HeapAlloc);
+
+ // Undocumented functions must be intercepted by name, not by symbol.
+ __interception::OverrideFunction("RtlSizeHeap", (uptr)WRAP(RtlSizeHeap),
+ (uptr *)&REAL(RtlSizeHeap));
+ __interception::OverrideFunction("RtlFreeHeap", (uptr)WRAP(RtlFreeHeap),
+ (uptr *)&REAL(RtlFreeHeap));
+ __interception::OverrideFunction("RtlReAllocateHeap",
+ (uptr)WRAP(RtlReAllocateHeap),
+ (uptr *)&REAL(RtlReAllocateHeap));
+ __interception::OverrideFunction("RtlAllocateHeap",
+ (uptr)WRAP(RtlAllocateHeap),
+ (uptr *)&REAL(RtlAllocateHeap));
+ } else {
+#define INTERCEPT_UCRT_FUNCTION(func) \
+ if (!INTERCEPT_FUNCTION_DLLIMPORT("ucrtbase.dll", \
+ "api-ms-win-core-heap-l1-1-0.dll", func)) \
+ VPrintf(2, "Failed to intercept ucrtbase.dll import %s\n", #func);
+ INTERCEPT_UCRT_FUNCTION(HeapAlloc);
+ INTERCEPT_UCRT_FUNCTION(HeapFree);
+ INTERCEPT_UCRT_FUNCTION(HeapReAlloc);
+ INTERCEPT_UCRT_FUNCTION(HeapSize);
+#undef INTERCEPT_UCRT_FUNCTION
+ }
+ // Recent versions of ucrtbase.dll appear to be built with PGO and LTCG, which
+ // enable cross-module inlining. This means our _malloc_base hook won't catch
+ // all CRT allocations. This code here patches the import table of
+ // ucrtbase.dll so that all attempts to use the lower-level win32 heap
+ // allocation API will be directed to ASan's heap. We don't currently
+ // intercept all calls to HeapAlloc. If we did, we would have to check on
+ // HeapFree whether the pointer came from ASan of from the system.
+
+#endif // defined(ASAN_DYNAMIC)
+}
+} // namespace __asan
+
+#endif // _WIN32
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping.h (revision 351984)
@@ -0,0 +1,400 @@
+//===-- asan_mapping.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Defines ASan memory mapping.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_MAPPING_H
+#define ASAN_MAPPING_H
+
+#include "asan_internal.h"
+
+// The full explanation of the memory mapping could be found here:
+// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm
+//
+// Typical shadow mapping on Linux/x86_64 with SHADOW_OFFSET == 0x00007fff8000:
+// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem ||
+// || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow ||
+// || `[0x00008fff7000, 0x02008fff6fff]` || ShadowGap ||
+// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow ||
+// || `[0x000000000000, 0x00007fff7fff]` || LowMem ||
+//
+// When SHADOW_OFFSET is zero (-pie):
+// || `[0x100000000000, 0x7fffffffffff]` || HighMem ||
+// || `[0x020000000000, 0x0fffffffffff]` || HighShadow ||
+// || `[0x000000040000, 0x01ffffffffff]` || ShadowGap ||
+//
+// Special case when something is already mapped between
+// 0x003000000000 and 0x005000000000 (e.g. when prelink is installed):
+// || `[0x10007fff8000, 0x7fffffffffff]` || HighMem ||
+// || `[0x02008fff7000, 0x10007fff7fff]` || HighShadow ||
+// || `[0x005000000000, 0x02008fff6fff]` || ShadowGap3 ||
+// || `[0x003000000000, 0x004fffffffff]` || MidMem ||
+// || `[0x000a7fff8000, 0x002fffffffff]` || ShadowGap2 ||
+// || `[0x00067fff8000, 0x000a7fff7fff]` || MidShadow ||
+// || `[0x00008fff7000, 0x00067fff7fff]` || ShadowGap ||
+// || `[0x00007fff8000, 0x00008fff6fff]` || LowShadow ||
+// || `[0x000000000000, 0x00007fff7fff]` || LowMem ||
+//
+// Default Linux/i386 mapping on x86_64 machine:
+// || `[0x40000000, 0xffffffff]` || HighMem ||
+// || `[0x28000000, 0x3fffffff]` || HighShadow ||
+// || `[0x24000000, 0x27ffffff]` || ShadowGap ||
+// || `[0x20000000, 0x23ffffff]` || LowShadow ||
+// || `[0x00000000, 0x1fffffff]` || LowMem ||
+//
+// Default Linux/i386 mapping on i386 machine
+// (addresses starting with 0xc0000000 are reserved
+// for kernel and thus not sanitized):
+// || `[0x38000000, 0xbfffffff]` || HighMem ||
+// || `[0x27000000, 0x37ffffff]` || HighShadow ||
+// || `[0x24000000, 0x26ffffff]` || ShadowGap ||
+// || `[0x20000000, 0x23ffffff]` || LowShadow ||
+// || `[0x00000000, 0x1fffffff]` || LowMem ||
+//
+// Default Linux/MIPS32 mapping:
+// || `[0x2aaa0000, 0xffffffff]` || HighMem ||
+// || `[0x0fff4000, 0x2aa9ffff]` || HighShadow ||
+// || `[0x0bff4000, 0x0fff3fff]` || ShadowGap ||
+// || `[0x0aaa0000, 0x0bff3fff]` || LowShadow ||
+// || `[0x00000000, 0x0aa9ffff]` || LowMem ||
+//
+// Default Linux/MIPS64 mapping:
+// || `[0x4000000000, 0xffffffffff]` || HighMem ||
+// || `[0x2800000000, 0x3fffffffff]` || HighShadow ||
+// || `[0x2400000000, 0x27ffffffff]` || ShadowGap ||
+// || `[0x2000000000, 0x23ffffffff]` || LowShadow ||
+// || `[0x0000000000, 0x1fffffffff]` || LowMem ||
+//
+// Default Linux/AArch64 (39-bit VMA) mapping:
+// || `[0x2000000000, 0x7fffffffff]` || highmem ||
+// || `[0x1400000000, 0x1fffffffff]` || highshadow ||
+// || `[0x1200000000, 0x13ffffffff]` || shadowgap ||
+// || `[0x1000000000, 0x11ffffffff]` || lowshadow ||
+// || `[0x0000000000, 0x0fffffffff]` || lowmem ||
+//
+// Default Linux/AArch64 (42-bit VMA) mapping:
+// || `[0x10000000000, 0x3ffffffffff]` || highmem ||
+// || `[0x0a000000000, 0x0ffffffffff]` || highshadow ||
+// || `[0x09000000000, 0x09fffffffff]` || shadowgap ||
+// || `[0x08000000000, 0x08fffffffff]` || lowshadow ||
+// || `[0x00000000000, 0x07fffffffff]` || lowmem ||
+//
+// Default Linux/S390 mapping:
+// || `[0x30000000, 0x7fffffff]` || HighMem ||
+// || `[0x26000000, 0x2fffffff]` || HighShadow ||
+// || `[0x24000000, 0x25ffffff]` || ShadowGap ||
+// || `[0x20000000, 0x23ffffff]` || LowShadow ||
+// || `[0x00000000, 0x1fffffff]` || LowMem ||
+//
+// Default Linux/SystemZ mapping:
+// || `[0x14000000000000, 0x1fffffffffffff]` || HighMem ||
+// || `[0x12800000000000, 0x13ffffffffffff]` || HighShadow ||
+// || `[0x12000000000000, 0x127fffffffffff]` || ShadowGap ||
+// || `[0x10000000000000, 0x11ffffffffffff]` || LowShadow ||
+// || `[0x00000000000000, 0x0fffffffffffff]` || LowMem ||
+//
+// Default Linux/SPARC64 (52-bit VMA) mapping:
+// || `[0x8000000000000, 0xfffffffffffff]` || HighMem ||
+// || `[0x1080000000000, 0x207ffffffffff]` || HighShadow ||
+// || `[0x0090000000000, 0x107ffffffffff]` || ShadowGap ||
+// || `[0x0080000000000, 0x008ffffffffff]` || LowShadow ||
+// || `[0x0000000000000, 0x007ffffffffff]` || LowMem ||
+//
+// Shadow mapping on FreeBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
+// || `[0x500000000000, 0x7fffffffffff]` || HighMem ||
+// || `[0x4a0000000000, 0x4fffffffffff]` || HighShadow ||
+// || `[0x480000000000, 0x49ffffffffff]` || ShadowGap ||
+// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
+// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
+//
+// Shadow mapping on FreeBSD/i386 with SHADOW_OFFSET == 0x40000000:
+// || `[0x60000000, 0xffffffff]` || HighMem ||
+// || `[0x4c000000, 0x5fffffff]` || HighShadow ||
+// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
+// || `[0x40000000, 0x47ffffff]` || LowShadow ||
+// || `[0x00000000, 0x3fffffff]` || LowMem ||
+//
+// Shadow mapping on NetBSD/x86-64 with SHADOW_OFFSET == 0x400000000000:
+// || `[0x4feffffffe01, 0x7f7ffffff000]` || HighMem ||
+// || `[0x49fdffffffc0, 0x4feffffffe00]` || HighShadow ||
+// || `[0x480000000000, 0x49fdffffffbf]` || ShadowGap ||
+// || `[0x400000000000, 0x47ffffffffff]` || LowShadow ||
+// || `[0x000000000000, 0x3fffffffffff]` || LowMem ||
+//
+// Shadow mapping on NetBSD/i386 with SHADOW_OFFSET == 0x40000000:
+// || `[0x60000000, 0xfffff000]` || HighMem ||
+// || `[0x4c000000, 0x5fffffff]` || HighShadow ||
+// || `[0x48000000, 0x4bffffff]` || ShadowGap ||
+// || `[0x40000000, 0x47ffffff]` || LowShadow ||
+// || `[0x00000000, 0x3fffffff]` || LowMem ||
+//
+// Default Windows/i386 mapping:
+// (the exact location of HighShadow/HighMem may vary depending
+// on WoW64, /LARGEADDRESSAWARE, etc).
+// || `[0x50000000, 0xffffffff]` || HighMem ||
+// || `[0x3a000000, 0x4fffffff]` || HighShadow ||
+// || `[0x36000000, 0x39ffffff]` || ShadowGap ||
+// || `[0x30000000, 0x35ffffff]` || LowShadow ||
+// || `[0x00000000, 0x2fffffff]` || LowMem ||
+//
+// Shadow mapping on Myriad2 (for shadow scale 5):
+// || `[0x9ff80000, 0x9fffffff]` || ShadowGap ||
+// || `[0x9f000000, 0x9ff7ffff]` || LowShadow ||
+// || `[0x80000000, 0x9effffff]` || LowMem ||
+// || `[0x00000000, 0x7fffffff]` || Ignored ||
+
+#if defined(ASAN_SHADOW_SCALE)
+static const u64 kDefaultShadowScale = ASAN_SHADOW_SCALE;
+#else
+static const u64 kDefaultShadowScale = SANITIZER_MYRIAD2 ? 5 : 3;
+#endif
+static const u64 kDefaultShadowSentinel = ~(uptr)0;
+static const u64 kDefaultShadowOffset32 = 1ULL << 29; // 0x20000000
+static const u64 kDefaultShadowOffset64 = 1ULL << 44;
+static const u64 kDefaultShort64bitShadowOffset =
+ 0x7FFFFFFF & (~0xFFFULL << kDefaultShadowScale); // < 2G.
+static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
+static const u64 kMIPS32_ShadowOffset32 = 0x0aaa0000;
+static const u64 kMIPS64_ShadowOffset64 = 1ULL << 37;
+static const u64 kPPC64_ShadowOffset64 = 1ULL << 44;
+static const u64 kSystemZ_ShadowOffset64 = 1ULL << 52;
+static const u64 kSPARC64_ShadowOffset64 = 1ULL << 43; // 0x80000000000
+static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
+static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
+static const u64 kNetBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
+static const u64 kNetBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
+static const u64 kWindowsShadowOffset32 = 3ULL << 28; // 0x30000000
+
+static const u64 kMyriadMemoryOffset32 = 0x80000000ULL;
+static const u64 kMyriadMemorySize32 = 0x20000000ULL;
+static const u64 kMyriadMemoryEnd32 =
+ kMyriadMemoryOffset32 + kMyriadMemorySize32 - 1;
+static const u64 kMyriadShadowOffset32 =
+ (kMyriadMemoryOffset32 + kMyriadMemorySize32 -
+ (kMyriadMemorySize32 >> kDefaultShadowScale));
+static const u64 kMyriadCacheBitMask32 = 0x40000000ULL;
+
+#define SHADOW_SCALE kDefaultShadowScale
+
+#if SANITIZER_FUCHSIA
+# define SHADOW_OFFSET (0)
+#elif SANITIZER_WORDSIZE == 32
+# if SANITIZER_ANDROID
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# elif defined(__mips__)
+# define SHADOW_OFFSET kMIPS32_ShadowOffset32
+# elif SANITIZER_FREEBSD
+# define SHADOW_OFFSET kFreeBSD_ShadowOffset32
+# elif SANITIZER_NETBSD
+# define SHADOW_OFFSET kNetBSD_ShadowOffset32
+# elif SANITIZER_WINDOWS
+# define SHADOW_OFFSET kWindowsShadowOffset32
+# elif SANITIZER_IOS
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# elif SANITIZER_MYRIAD2
+# define SHADOW_OFFSET kMyriadShadowOffset32
+# else
+# define SHADOW_OFFSET kDefaultShadowOffset32
+# endif
+#else
+# if SANITIZER_IOS
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# elif defined(__aarch64__)
+# define SHADOW_OFFSET kAArch64_ShadowOffset64
+# elif defined(__powerpc64__)
+# define SHADOW_OFFSET kPPC64_ShadowOffset64
+# elif defined(__s390x__)
+# define SHADOW_OFFSET kSystemZ_ShadowOffset64
+# elif SANITIZER_FREEBSD
+# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
+# elif SANITIZER_NETBSD
+# define SHADOW_OFFSET kNetBSD_ShadowOffset64
+# elif SANITIZER_MAC
+# define SHADOW_OFFSET kDefaultShadowOffset64
+# elif defined(__mips64)
+# define SHADOW_OFFSET kMIPS64_ShadowOffset64
+#elif defined(__sparc__)
+#define SHADOW_OFFSET kSPARC64_ShadowOffset64
+# elif SANITIZER_WINDOWS64
+# define SHADOW_OFFSET __asan_shadow_memory_dynamic_address
+# else
+# define SHADOW_OFFSET kDefaultShort64bitShadowOffset
+# endif
+#endif
+
+#if SANITIZER_ANDROID && defined(__arm__)
+# define ASAN_PREMAP_SHADOW 1
+#else
+# define ASAN_PREMAP_SHADOW 0
+#endif
+
+#define SHADOW_GRANULARITY (1ULL << SHADOW_SCALE)
+
+#define DO_ASAN_MAPPING_PROFILE 0 // Set to 1 to profile the functions below.
+
+#if DO_ASAN_MAPPING_PROFILE
+# define PROFILE_ASAN_MAPPING() AsanMappingProfile[__LINE__]++;
+#else
+# define PROFILE_ASAN_MAPPING()
+#endif
+
+// If 1, all shadow boundaries are constants.
+// Don't set to 1 other than for testing.
+#define ASAN_FIXED_MAPPING 0
+
+namespace __asan {
+
+extern uptr AsanMappingProfile[];
+
+#if ASAN_FIXED_MAPPING
+// Fixed mapping for 64-bit Linux. Mostly used for performance comparison
+// with non-fixed mapping. As of r175253 (Feb 2013) the performance
+// difference between fixed and non-fixed mapping is below the noise level.
+static uptr kHighMemEnd = 0x7fffffffffffULL;
+static uptr kMidMemBeg = 0x3000000000ULL;
+static uptr kMidMemEnd = 0x4fffffffffULL;
+#else
+extern uptr kHighMemEnd, kMidMemBeg, kMidMemEnd; // Initialized in __asan_init.
+#endif
+
+} // namespace __asan
+
+#if SANITIZER_MYRIAD2
+#include "asan_mapping_myriad.h"
+#elif defined(__sparc__) && SANITIZER_WORDSIZE == 64
+#include "asan_mapping_sparc64.h"
+#else
+#define MEM_TO_SHADOW(mem) (((mem) >> SHADOW_SCALE) + (SHADOW_OFFSET))
+
+#define kLowMemBeg 0
+#define kLowMemEnd (SHADOW_OFFSET ? SHADOW_OFFSET - 1 : 0)
+
+#define kLowShadowBeg SHADOW_OFFSET
+#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
+
+#define kHighMemBeg (MEM_TO_SHADOW(kHighMemEnd) + 1)
+
+#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
+#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
+
+# define kMidShadowBeg MEM_TO_SHADOW(kMidMemBeg)
+# define kMidShadowEnd MEM_TO_SHADOW(kMidMemEnd)
+
+// With the zero shadow base we can not actually map pages starting from 0.
+// This constant is somewhat arbitrary.
+#define kZeroBaseShadowStart 0
+#define kZeroBaseMaxShadowStart (1 << 18)
+
+#define kShadowGapBeg (kLowShadowEnd ? kLowShadowEnd + 1 \
+ : kZeroBaseShadowStart)
+#define kShadowGapEnd ((kMidMemBeg ? kMidShadowBeg : kHighShadowBeg) - 1)
+
+#define kShadowGap2Beg (kMidMemBeg ? kMidShadowEnd + 1 : 0)
+#define kShadowGap2End (kMidMemBeg ? kMidMemBeg - 1 : 0)
+
+#define kShadowGap3Beg (kMidMemBeg ? kMidMemEnd + 1 : 0)
+#define kShadowGap3End (kMidMemBeg ? kHighShadowBeg - 1 : 0)
+
+namespace __asan {
+
+static inline bool AddrIsInLowMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return a <= kLowMemEnd;
+}
+
+static inline bool AddrIsInLowShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return a >= kLowShadowBeg && a <= kLowShadowEnd;
+}
+
+static inline bool AddrIsInMidMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return kMidMemBeg && a >= kMidMemBeg && a <= kMidMemEnd;
+}
+
+static inline bool AddrIsInMidShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return kMidMemBeg && a >= kMidShadowBeg && a <= kMidShadowEnd;
+}
+
+static inline bool AddrIsInHighMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd;
+}
+
+static inline bool AddrIsInHighShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd;
+}
+
+static inline bool AddrIsInShadowGap(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ if (kMidMemBeg) {
+ if (a <= kShadowGapEnd)
+ return SHADOW_OFFSET == 0 || a >= kShadowGapBeg;
+ return (a >= kShadowGap2Beg && a <= kShadowGap2End) ||
+ (a >= kShadowGap3Beg && a <= kShadowGap3End);
+ }
+ // In zero-based shadow mode we treat addresses near zero as addresses
+ // in shadow gap as well.
+ if (SHADOW_OFFSET == 0)
+ return a <= kShadowGapEnd;
+ return a >= kShadowGapBeg && a <= kShadowGapEnd;
+}
+
+} // namespace __asan
+
+#endif // SANITIZER_MYRIAD2
+
+namespace __asan {
+
+static inline bool AddrIsInMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return AddrIsInLowMem(a) || AddrIsInMidMem(a) || AddrIsInHighMem(a) ||
+ (flags()->protect_shadow_gap == 0 && AddrIsInShadowGap(a));
+}
+
+static inline uptr MemToShadow(uptr p) {
+ PROFILE_ASAN_MAPPING();
+ CHECK(AddrIsInMem(p));
+ return MEM_TO_SHADOW(p);
+}
+
+static inline bool AddrIsInShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return AddrIsInLowShadow(a) || AddrIsInMidShadow(a) || AddrIsInHighShadow(a);
+}
+
+static inline bool AddrIsAlignedByGranularity(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return (a & (SHADOW_GRANULARITY - 1)) == 0;
+}
+
+static inline bool AddressIsPoisoned(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ if (SANITIZER_MYRIAD2 && !AddrIsInMem(a) && !AddrIsInShadow(a))
+ return false;
+ const uptr kAccessSize = 1;
+ u8 *shadow_address = (u8*)MEM_TO_SHADOW(a);
+ s8 shadow_value = *shadow_address;
+ if (shadow_value) {
+ u8 last_accessed_byte = (a & (SHADOW_GRANULARITY - 1))
+ + kAccessSize - 1;
+ return (last_accessed_byte >= shadow_value);
+ }
+ return false;
+}
+
+// Must be after all calls to PROFILE_ASAN_MAPPING().
+static const uptr kAsanMappingProfileSize = __LINE__;
+
+} // namespace __asan
+
+#endif // ASAN_MAPPING_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping_myriad.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping_myriad.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping_myriad.h (revision 351984)
@@ -0,0 +1,85 @@
+//===-- asan_mapping_myriad.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Myriad-specific definitions for ASan memory mapping.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_MAPPING_MYRIAD_H
+#define ASAN_MAPPING_MYRIAD_H
+
+#define RAW_ADDR(mem) ((mem) & ~kMyriadCacheBitMask32)
+#define MEM_TO_SHADOW(mem) \
+ (((RAW_ADDR(mem) - kLowMemBeg) >> SHADOW_SCALE) + (SHADOW_OFFSET))
+
+#define kLowMemBeg kMyriadMemoryOffset32
+#define kLowMemEnd (SHADOW_OFFSET - 1)
+
+#define kLowShadowBeg SHADOW_OFFSET
+#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
+
+#define kHighMemBeg 0
+
+#define kHighShadowBeg 0
+#define kHighShadowEnd 0
+
+#define kMidShadowBeg 0
+#define kMidShadowEnd 0
+
+#define kShadowGapBeg (kLowShadowEnd + 1)
+#define kShadowGapEnd kMyriadMemoryEnd32
+
+#define kShadowGap2Beg 0
+#define kShadowGap2End 0
+
+#define kShadowGap3Beg 0
+#define kShadowGap3End 0
+
+namespace __asan {
+
+static inline bool AddrIsInLowMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ a = RAW_ADDR(a);
+ return a >= kLowMemBeg && a <= kLowMemEnd;
+}
+
+static inline bool AddrIsInLowShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ a = RAW_ADDR(a);
+ return a >= kLowShadowBeg && a <= kLowShadowEnd;
+}
+
+static inline bool AddrIsInMidMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return false;
+}
+
+static inline bool AddrIsInMidShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return false;
+}
+
+static inline bool AddrIsInHighMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return false;
+}
+
+static inline bool AddrIsInHighShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return false;
+}
+
+static inline bool AddrIsInShadowGap(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ a = RAW_ADDR(a);
+ return a >= kShadowGapBeg && a <= kShadowGapEnd;
+}
+
+} // namespace __asan
+
+#endif // ASAN_MAPPING_MYRIAD_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping_myriad.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping_sparc64.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping_sparc64.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_mapping_sparc64.h (revision 351984)
@@ -0,0 +1,101 @@
+//===-- asan_mapping_sparc64.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// SPARC64-specific definitions for ASan memory mapping.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_MAPPING_SPARC64_H
+#define ASAN_MAPPING_SPARC64_H
+
+// This is tailored to the 52-bit VM layout on SPARC-T4 and later.
+// The VM space is split into two 51-bit halves at both ends: the low part
+// has all the bits above the 51st cleared, while the high part has them set.
+// 0xfff8000000000000 - 0xffffffffffffffff
+// 0x0000000000000000 - 0x0007ffffffffffff
+
+#define VMA_BITS 52
+#define HIGH_BITS (64 - VMA_BITS)
+
+// The idea is to chop the high bits before doing the scaling, so the two
+// parts become contiguous again and the usual scheme can be applied.
+
+#define MEM_TO_SHADOW(mem) \
+ ((((mem) << HIGH_BITS) >> (HIGH_BITS + (SHADOW_SCALE))) + (SHADOW_OFFSET))
+
+#define kLowMemBeg 0
+#define kLowMemEnd (SHADOW_OFFSET - 1)
+
+#define kLowShadowBeg SHADOW_OFFSET
+#define kLowShadowEnd MEM_TO_SHADOW(kLowMemEnd)
+
+// But of course there is the huge hole between the high shadow memory,
+// which is in the low part, and the beginning of the high part.
+
+#define kHighMemBeg (-(1ULL << (VMA_BITS - 1)))
+
+#define kHighShadowBeg MEM_TO_SHADOW(kHighMemBeg)
+#define kHighShadowEnd MEM_TO_SHADOW(kHighMemEnd)
+
+#define kMidShadowBeg 0
+#define kMidShadowEnd 0
+
+// With the zero shadow base we can not actually map pages starting from 0.
+// This constant is somewhat arbitrary.
+#define kZeroBaseShadowStart 0
+#define kZeroBaseMaxShadowStart (1 << 18)
+
+#define kShadowGapBeg (kLowShadowEnd + 1)
+#define kShadowGapEnd (kHighShadowBeg - 1)
+
+#define kShadowGap2Beg 0
+#define kShadowGap2End 0
+
+#define kShadowGap3Beg 0
+#define kShadowGap3End 0
+
+namespace __asan {
+
+static inline bool AddrIsInLowMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return a <= kLowMemEnd;
+}
+
+static inline bool AddrIsInLowShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return a >= kLowShadowBeg && a <= kLowShadowEnd;
+}
+
+static inline bool AddrIsInMidMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return false;
+}
+
+static inline bool AddrIsInMidShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return false;
+}
+
+static inline bool AddrIsInHighMem(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return kHighMemBeg && a >= kHighMemBeg && a <= kHighMemEnd;
+}
+
+static inline bool AddrIsInHighShadow(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return kHighMemBeg && a >= kHighShadowBeg && a <= kHighShadowEnd;
+}
+
+static inline bool AddrIsInShadowGap(uptr a) {
+ PROFILE_ASAN_MAPPING();
+ return a >= kShadowGapBeg && a <= kShadowGapEnd;
+}
+
+} // namespace __asan
+
+#endif // ASAN_MAPPING_SPARC64_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_memory_profile.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_memory_profile.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_memory_profile.cc (revision 351984)
@@ -0,0 +1,129 @@
+//===-- asan_memory_profile.cc.cc -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file implements __sanitizer_print_memory_profile.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "lsan/lsan_common.h"
+#include "asan/asan_allocator.h"
+
+#if CAN_SANITIZE_LEAKS
+
+namespace __asan {
+
+struct AllocationSite {
+ u32 id;
+ uptr total_size;
+ uptr count;
+};
+
+class HeapProfile {
+ public:
+ HeapProfile() { allocations_.reserve(1024); }
+
+ void ProcessChunk(const AsanChunkView &cv) {
+ if (cv.IsAllocated()) {
+ total_allocated_user_size_ += cv.UsedSize();
+ total_allocated_count_++;
+ u32 id = cv.GetAllocStackId();
+ if (id)
+ Insert(id, cv.UsedSize());
+ } else if (cv.IsQuarantined()) {
+ total_quarantined_user_size_ += cv.UsedSize();
+ total_quarantined_count_++;
+ } else {
+ total_other_count_++;
+ }
+ }
+
+ void Print(uptr top_percent, uptr max_number_of_contexts) {
+ Sort(allocations_.data(), allocations_.size(),
+ [](const AllocationSite &a, const AllocationSite &b) {
+ return a.total_size > b.total_size;
+ });
+ CHECK(total_allocated_user_size_);
+ uptr total_shown = 0;
+ Printf("Live Heap Allocations: %zd bytes in %zd chunks; quarantined: "
+ "%zd bytes in %zd chunks; %zd other chunks; total chunks: %zd; "
+ "showing top %zd%% (at most %zd unique contexts)\n",
+ total_allocated_user_size_, total_allocated_count_,
+ total_quarantined_user_size_, total_quarantined_count_,
+ total_other_count_, total_allocated_count_ +
+ total_quarantined_count_ + total_other_count_, top_percent,
+ max_number_of_contexts);
+ for (uptr i = 0; i < Min(allocations_.size(), max_number_of_contexts);
+ i++) {
+ auto &a = allocations_[i];
+ Printf("%zd byte(s) (%zd%%) in %zd allocation(s)\n", a.total_size,
+ a.total_size * 100 / total_allocated_user_size_, a.count);
+ StackDepotGet(a.id).Print();
+ total_shown += a.total_size;
+ if (total_shown * 100 / total_allocated_user_size_ > top_percent)
+ break;
+ }
+ }
+
+ private:
+ uptr total_allocated_user_size_ = 0;
+ uptr total_allocated_count_ = 0;
+ uptr total_quarantined_user_size_ = 0;
+ uptr total_quarantined_count_ = 0;
+ uptr total_other_count_ = 0;
+ InternalMmapVector<AllocationSite> allocations_;
+
+ void Insert(u32 id, uptr size) {
+ // Linear lookup will be good enough for most cases (although not all).
+ for (uptr i = 0; i < allocations_.size(); i++) {
+ if (allocations_[i].id == id) {
+ allocations_[i].total_size += size;
+ allocations_[i].count++;
+ return;
+ }
+ }
+ allocations_.push_back({id, size, 1});
+ }
+};
+
+static void ChunkCallback(uptr chunk, void *arg) {
+ reinterpret_cast<HeapProfile*>(arg)->ProcessChunk(
+ FindHeapChunkByAllocBeg(chunk));
+}
+
+static void MemoryProfileCB(const SuspendedThreadsList &suspended_threads_list,
+ void *argument) {
+ HeapProfile hp;
+ __lsan::ForEachChunk(ChunkCallback, &hp);
+ uptr *Arg = reinterpret_cast<uptr*>(argument);
+ hp.Print(Arg[0], Arg[1]);
+
+ if (Verbosity())
+ __asan_print_accumulated_stats();
+}
+
+} // namespace __asan
+
+#endif // CAN_SANITIZE_LEAKS
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_memory_profile(uptr top_percent,
+ uptr max_number_of_contexts) {
+#if CAN_SANITIZE_LEAKS
+ uptr Arg[2];
+ Arg[0] = top_percent;
+ Arg[1] = max_number_of_contexts;
+ __sanitizer::StopTheWorld(__asan::MemoryProfileCB, Arg);
+#endif // CAN_SANITIZE_LEAKS
+}
+} // extern "C"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_memory_profile.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_new_delete.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_new_delete.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_new_delete.cc (revision 351984)
@@ -0,0 +1,204 @@
+//===-- asan_interceptors.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+
+#include "asan_allocator.h"
+#include "asan_internal.h"
+#include "asan_malloc_local.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+
+#include "interception/interception.h"
+
+#include <stddef.h>
+
+// C++ operators can't have dllexport attributes on Windows. We export them
+// anyway by passing extra -export flags to the linker, which is exactly that
+// dllexport would normally do. We need to export them in order to make the
+// VS2015 dynamic CRT (MD) work.
+#if SANITIZER_WINDOWS && defined(_MSC_VER)
+#define CXX_OPERATOR_ATTRIBUTE
+#define COMMENT_EXPORT(sym) __pragma(comment(linker, "/export:" sym))
+#ifdef _WIN64
+COMMENT_EXPORT("??2@YAPEAX_K@Z") // operator new
+COMMENT_EXPORT("??2@YAPEAX_KAEBUnothrow_t@std@@@Z") // operator new nothrow
+COMMENT_EXPORT("??3@YAXPEAX@Z") // operator delete
+COMMENT_EXPORT("??3@YAXPEAX_K@Z") // sized operator delete
+COMMENT_EXPORT("??_U@YAPEAX_K@Z") // operator new[]
+COMMENT_EXPORT("??_V@YAXPEAX@Z") // operator delete[]
+#else
+COMMENT_EXPORT("??2@YAPAXI@Z") // operator new
+COMMENT_EXPORT("??2@YAPAXIABUnothrow_t@std@@@Z") // operator new nothrow
+COMMENT_EXPORT("??3@YAXPAX@Z") // operator delete
+COMMENT_EXPORT("??3@YAXPAXI@Z") // sized operator delete
+COMMENT_EXPORT("??_U@YAPAXI@Z") // operator new[]
+COMMENT_EXPORT("??_V@YAXPAX@Z") // operator delete[]
+#endif
+#undef COMMENT_EXPORT
+#else
+#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+#endif
+
+using namespace __asan; // NOLINT
+
+// FreeBSD prior v9.2 have wrong definition of 'size_t'.
+// http://svnweb.freebsd.org/base?view=revision&revision=232261
+#if SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+#include <sys/param.h>
+#if __FreeBSD_version <= 902001 // v9.2
+#define size_t unsigned
+#endif // __FreeBSD_version
+#endif // SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 32
+
+// This code has issues on OSX.
+// See https://github.com/google/sanitizers/issues/131.
+
+// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
+namespace std {
+struct nothrow_t {};
+enum class align_val_t: size_t {};
+} // namespace std
+
+// TODO(alekseyshl): throw std::bad_alloc instead of dying on OOM.
+// For local pool allocation, align to SHADOW_GRANULARITY to match asan
+// allocator behavior.
+#define OPERATOR_NEW_BODY(type, nothrow) \
+ MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = asan_memalign(0, size, &stack, type); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res;
+#define OPERATOR_NEW_BODY_ALIGN(type, nothrow) \
+ MAYBE_ALLOCATE_FROM_LOCAL_POOL(nothrow); \
+ GET_STACK_TRACE_MALLOC; \
+ void *res = asan_memalign((uptr)align, size, &stack, type); \
+ if (!nothrow && UNLIKELY(!res)) \
+ ReportOutOfMemory(size, &stack); \
+ return res;
+
+// On OS X it's not enough to just provide our own 'operator new' and
+// 'operator delete' implementations, because they're going to be in the
+// runtime dylib, and the main executable will depend on both the runtime
+// dylib and libstdc++, each of those'll have its implementation of new and
+// delete.
+// To make sure that C++ allocation/deallocation operators are overridden on
+// OS X we need to intercept them using their mangled names.
+#if !SANITIZER_MAC
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size)
+{ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size)
+{ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, false /*nothrow*/); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, false /*nothrow*/); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW, true /*nothrow*/); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FROM_NEW_BR, true /*nothrow*/); }
+
+#else // SANITIZER_MAC
+INTERCEPTOR(void *, _Znwm, size_t size) {
+ OPERATOR_NEW_BODY(FROM_NEW, false /*nothrow*/);
+}
+INTERCEPTOR(void *, _Znam, size_t size) {
+ OPERATOR_NEW_BODY(FROM_NEW_BR, false /*nothrow*/);
+}
+INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(FROM_NEW, true /*nothrow*/);
+}
+INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(FROM_NEW_BR, true /*nothrow*/);
+}
+#endif // !SANITIZER_MAC
+
+#define OPERATOR_DELETE_BODY(type) \
+ if (IS_FROM_LOCAL_POOL(ptr)) return;\
+ GET_STACK_TRACE_FREE;\
+ asan_delete(ptr, 0, 0, &stack, type);
+
+#define OPERATOR_DELETE_BODY_SIZE(type) \
+ if (IS_FROM_LOCAL_POOL(ptr)) return;\
+ GET_STACK_TRACE_FREE;\
+ asan_delete(ptr, size, 0, &stack, type);
+
+#define OPERATOR_DELETE_BODY_ALIGN(type) \
+ if (IS_FROM_LOCAL_POOL(ptr)) return;\
+ GET_STACK_TRACE_FREE;\
+ asan_delete(ptr, 0, static_cast<uptr>(align), &stack, type);
+
+#define OPERATOR_DELETE_BODY_SIZE_ALIGN(type) \
+ if (IS_FROM_LOCAL_POOL(ptr)) return;\
+ GET_STACK_TRACE_FREE;\
+ asan_delete(ptr, size, static_cast<uptr>(align), &stack, type);
+
+#if !SANITIZER_MAC
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FROM_NEW_BR); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FROM_NEW_BR); }
+
+#else // SANITIZER_MAC
+INTERCEPTOR(void, _ZdlPv, void *ptr)
+{ OPERATOR_DELETE_BODY(FROM_NEW); }
+INTERCEPTOR(void, _ZdaPv, void *ptr)
+{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FROM_NEW); }
+INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FROM_NEW_BR); }
+#endif // !SANITIZER_MAC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_poisoning.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_poisoning.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_poisoning.cc (revision 351984)
@@ -0,0 +1,460 @@
+//===-- asan_poisoning.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Shadow memory poisoning by ASan RTL and by user application.
+//===----------------------------------------------------------------------===//
+
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __asan {
+
+static atomic_uint8_t can_poison_memory;
+
+void SetCanPoisonMemory(bool value) {
+ atomic_store(&can_poison_memory, value, memory_order_release);
+}
+
+bool CanPoisonMemory() {
+ return atomic_load(&can_poison_memory, memory_order_acquire);
+}
+
+void PoisonShadow(uptr addr, uptr size, u8 value) {
+ if (value && !CanPoisonMemory()) return;
+ CHECK(AddrIsAlignedByGranularity(addr));
+ CHECK(AddrIsInMem(addr));
+ CHECK(AddrIsAlignedByGranularity(addr + size));
+ CHECK(AddrIsInMem(addr + size - SHADOW_GRANULARITY));
+ CHECK(REAL(memset));
+ FastPoisonShadow(addr, size, value);
+}
+
+void PoisonShadowPartialRightRedzone(uptr addr,
+ uptr size,
+ uptr redzone_size,
+ u8 value) {
+ if (!CanPoisonMemory()) return;
+ CHECK(AddrIsAlignedByGranularity(addr));
+ CHECK(AddrIsInMem(addr));
+ FastPoisonShadowPartialRightRedzone(addr, size, redzone_size, value);
+}
+
+struct ShadowSegmentEndpoint {
+ u8 *chunk;
+ s8 offset; // in [0, SHADOW_GRANULARITY)
+ s8 value; // = *chunk;
+
+ explicit ShadowSegmentEndpoint(uptr address) {
+ chunk = (u8*)MemToShadow(address);
+ offset = address & (SHADOW_GRANULARITY - 1);
+ value = *chunk;
+ }
+};
+
+void FlushUnneededASanShadowMemory(uptr p, uptr size) {
+ // Since asan's mapping is compacting, the shadow chunk may be
+ // not page-aligned, so we only flush the page-aligned portion.
+ ReleaseMemoryPagesToOS(MemToShadow(p), MemToShadow(p + size));
+}
+
+void AsanPoisonOrUnpoisonIntraObjectRedzone(uptr ptr, uptr size, bool poison) {
+ uptr end = ptr + size;
+ if (Verbosity()) {
+ Printf("__asan_%spoison_intra_object_redzone [%p,%p) %zd\n",
+ poison ? "" : "un", ptr, end, size);
+ if (Verbosity() >= 2)
+ PRINT_CURRENT_STACK();
+ }
+ CHECK(size);
+ CHECK_LE(size, 4096);
+ CHECK(IsAligned(end, SHADOW_GRANULARITY));
+ if (!IsAligned(ptr, SHADOW_GRANULARITY)) {
+ *(u8 *)MemToShadow(ptr) =
+ poison ? static_cast<u8>(ptr % SHADOW_GRANULARITY) : 0;
+ ptr |= SHADOW_GRANULARITY - 1;
+ ptr++;
+ }
+ for (; ptr < end; ptr += SHADOW_GRANULARITY)
+ *(u8*)MemToShadow(ptr) = poison ? kAsanIntraObjectRedzone : 0;
+}
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+// Current implementation of __asan_(un)poison_memory_region doesn't check
+// that user program (un)poisons the memory it owns. It poisons memory
+// conservatively, and unpoisons progressively to make sure asan shadow
+// mapping invariant is preserved (see detailed mapping description here:
+// https://github.com/google/sanitizers/wiki/AddressSanitizerAlgorithm).
+//
+// * if user asks to poison region [left, right), the program poisons
+// at least [left, AlignDown(right)).
+// * if user asks to unpoison region [left, right), the program unpoisons
+// at most [AlignDown(left), right).
+void __asan_poison_memory_region(void const volatile *addr, uptr size) {
+ if (!flags()->allow_user_poisoning || size == 0) return;
+ uptr beg_addr = (uptr)addr;
+ uptr end_addr = beg_addr + size;
+ VPrintf(3, "Trying to poison memory region [%p, %p)\n", (void *)beg_addr,
+ (void *)end_addr);
+ ShadowSegmentEndpoint beg(beg_addr);
+ ShadowSegmentEndpoint end(end_addr);
+ if (beg.chunk == end.chunk) {
+ CHECK_LT(beg.offset, end.offset);
+ s8 value = beg.value;
+ CHECK_EQ(value, end.value);
+ // We can only poison memory if the byte in end.offset is unaddressable.
+ // No need to re-poison memory if it is poisoned already.
+ if (value > 0 && value <= end.offset) {
+ if (beg.offset > 0) {
+ *beg.chunk = Min(value, beg.offset);
+ } else {
+ *beg.chunk = kAsanUserPoisonedMemoryMagic;
+ }
+ }
+ return;
+ }
+ CHECK_LT(beg.chunk, end.chunk);
+ if (beg.offset > 0) {
+ // Mark bytes from beg.offset as unaddressable.
+ if (beg.value == 0) {
+ *beg.chunk = beg.offset;
+ } else {
+ *beg.chunk = Min(beg.value, beg.offset);
+ }
+ beg.chunk++;
+ }
+ REAL(memset)(beg.chunk, kAsanUserPoisonedMemoryMagic, end.chunk - beg.chunk);
+ // Poison if byte in end.offset is unaddressable.
+ if (end.value > 0 && end.value <= end.offset) {
+ *end.chunk = kAsanUserPoisonedMemoryMagic;
+ }
+}
+
+void __asan_unpoison_memory_region(void const volatile *addr, uptr size) {
+ if (!flags()->allow_user_poisoning || size == 0) return;
+ uptr beg_addr = (uptr)addr;
+ uptr end_addr = beg_addr + size;
+ VPrintf(3, "Trying to unpoison memory region [%p, %p)\n", (void *)beg_addr,
+ (void *)end_addr);
+ ShadowSegmentEndpoint beg(beg_addr);
+ ShadowSegmentEndpoint end(end_addr);
+ if (beg.chunk == end.chunk) {
+ CHECK_LT(beg.offset, end.offset);
+ s8 value = beg.value;
+ CHECK_EQ(value, end.value);
+ // We unpoison memory bytes up to enbytes up to end.offset if it is not
+ // unpoisoned already.
+ if (value != 0) {
+ *beg.chunk = Max(value, end.offset);
+ }
+ return;
+ }
+ CHECK_LT(beg.chunk, end.chunk);
+ if (beg.offset > 0) {
+ *beg.chunk = 0;
+ beg.chunk++;
+ }
+ REAL(memset)(beg.chunk, 0, end.chunk - beg.chunk);
+ if (end.offset > 0 && end.value != 0) {
+ *end.chunk = Max(end.value, end.offset);
+ }
+}
+
+int __asan_address_is_poisoned(void const volatile *addr) {
+ return __asan::AddressIsPoisoned((uptr)addr);
+}
+
+uptr __asan_region_is_poisoned(uptr beg, uptr size) {
+ if (!size) return 0;
+ uptr end = beg + size;
+ if (SANITIZER_MYRIAD2) {
+ // On Myriad, address not in DRAM range need to be treated as
+ // unpoisoned.
+ if (!AddrIsInMem(beg) && !AddrIsInShadow(beg)) return 0;
+ if (!AddrIsInMem(end) && !AddrIsInShadow(end)) return 0;
+ } else {
+ if (!AddrIsInMem(beg)) return beg;
+ if (!AddrIsInMem(end)) return end;
+ }
+ CHECK_LT(beg, end);
+ uptr aligned_b = RoundUpTo(beg, SHADOW_GRANULARITY);
+ uptr aligned_e = RoundDownTo(end, SHADOW_GRANULARITY);
+ uptr shadow_beg = MemToShadow(aligned_b);
+ uptr shadow_end = MemToShadow(aligned_e);
+ // First check the first and the last application bytes,
+ // then check the SHADOW_GRANULARITY-aligned region by calling
+ // mem_is_zero on the corresponding shadow.
+ if (!__asan::AddressIsPoisoned(beg) &&
+ !__asan::AddressIsPoisoned(end - 1) &&
+ (shadow_end <= shadow_beg ||
+ __sanitizer::mem_is_zero((const char *)shadow_beg,
+ shadow_end - shadow_beg)))
+ return 0;
+ // The fast check failed, so we have a poisoned byte somewhere.
+ // Find it slowly.
+ for (; beg < end; beg++)
+ if (__asan::AddressIsPoisoned(beg))
+ return beg;
+ UNREACHABLE("mem_is_zero returned false, but poisoned byte was not found");
+ return 0;
+}
+
+#define CHECK_SMALL_REGION(p, size, isWrite) \
+ do { \
+ uptr __p = reinterpret_cast<uptr>(p); \
+ uptr __size = size; \
+ if (UNLIKELY(__asan::AddressIsPoisoned(__p) || \
+ __asan::AddressIsPoisoned(__p + __size - 1))) { \
+ GET_CURRENT_PC_BP_SP; \
+ uptr __bad = __asan_region_is_poisoned(__p, __size); \
+ __asan_report_error(pc, bp, sp, __bad, isWrite, __size, 0);\
+ } \
+ } while (false)
+
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *p) {
+ CHECK_SMALL_REGION(p, sizeof(*p), false);
+ return *p;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *p) {
+ CHECK_SMALL_REGION(p, sizeof(*p), false);
+ return *p;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *p) {
+ CHECK_SMALL_REGION(p, sizeof(*p), false);
+ return *p;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
+ CHECK_SMALL_REGION(p, sizeof(*p), true);
+ *p = x;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
+ CHECK_SMALL_REGION(p, sizeof(*p), true);
+ *p = x;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
+ CHECK_SMALL_REGION(p, sizeof(*p), true);
+ *p = x;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_poison_cxx_array_cookie(uptr p) {
+ if (SANITIZER_WORDSIZE != 64) return;
+ if (!flags()->poison_array_cookie) return;
+ uptr s = MEM_TO_SHADOW(p);
+ *reinterpret_cast<u8*>(s) = kAsanArrayCookieMagic;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_load_cxx_array_cookie(uptr *p) {
+ if (SANITIZER_WORDSIZE != 64) return *p;
+ if (!flags()->poison_array_cookie) return *p;
+ uptr s = MEM_TO_SHADOW(reinterpret_cast<uptr>(p));
+ u8 sval = *reinterpret_cast<u8*>(s);
+ if (sval == kAsanArrayCookieMagic) return *p;
+ // If sval is not kAsanArrayCookieMagic it can only be freed memory,
+ // which means that we are going to get double-free. So, return 0 to avoid
+ // infinite loop of destructors. We don't want to report a double-free here
+ // though, so print a warning just in case.
+ // CHECK_EQ(sval, kAsanHeapFreeMagic);
+ if (sval == kAsanHeapFreeMagic) {
+ Report("AddressSanitizer: loaded array cookie from free-d memory; "
+ "expect a double-free report\n");
+ return 0;
+ }
+ // The cookie may remain unpoisoned if e.g. it comes from a custom
+ // operator new defined inside a class.
+ return *p;
+}
+
+// This is a simplified version of __asan_(un)poison_memory_region, which
+// assumes that left border of region to be poisoned is properly aligned.
+static void PoisonAlignedStackMemory(uptr addr, uptr size, bool do_poison) {
+ if (size == 0) return;
+ uptr aligned_size = size & ~(SHADOW_GRANULARITY - 1);
+ PoisonShadow(addr, aligned_size,
+ do_poison ? kAsanStackUseAfterScopeMagic : 0);
+ if (size == aligned_size)
+ return;
+ s8 end_offset = (s8)(size - aligned_size);
+ s8* shadow_end = (s8*)MemToShadow(addr + aligned_size);
+ s8 end_value = *shadow_end;
+ if (do_poison) {
+ // If possible, mark all the bytes mapping to last shadow byte as
+ // unaddressable.
+ if (end_value > 0 && end_value <= end_offset)
+ *shadow_end = (s8)kAsanStackUseAfterScopeMagic;
+ } else {
+ // If necessary, mark few first bytes mapping to last shadow byte
+ // as addressable
+ if (end_value != 0)
+ *shadow_end = Max(end_value, end_offset);
+ }
+}
+
+void __asan_set_shadow_00(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0, size);
+}
+
+void __asan_set_shadow_f1(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf1, size);
+}
+
+void __asan_set_shadow_f2(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf2, size);
+}
+
+void __asan_set_shadow_f3(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf3, size);
+}
+
+void __asan_set_shadow_f5(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf5, size);
+}
+
+void __asan_set_shadow_f8(uptr addr, uptr size) {
+ REAL(memset)((void *)addr, 0xf8, size);
+}
+
+void __asan_poison_stack_memory(uptr addr, uptr size) {
+ VReport(1, "poisoning: %p %zx\n", (void *)addr, size);
+ PoisonAlignedStackMemory(addr, size, true);
+}
+
+void __asan_unpoison_stack_memory(uptr addr, uptr size) {
+ VReport(1, "unpoisoning: %p %zx\n", (void *)addr, size);
+ PoisonAlignedStackMemory(addr, size, false);
+}
+
+void __sanitizer_annotate_contiguous_container(const void *beg_p,
+ const void *end_p,
+ const void *old_mid_p,
+ const void *new_mid_p) {
+ if (!flags()->detect_container_overflow) return;
+ VPrintf(2, "contiguous_container: %p %p %p %p\n", beg_p, end_p, old_mid_p,
+ new_mid_p);
+ uptr beg = reinterpret_cast<uptr>(beg_p);
+ uptr end = reinterpret_cast<uptr>(end_p);
+ uptr old_mid = reinterpret_cast<uptr>(old_mid_p);
+ uptr new_mid = reinterpret_cast<uptr>(new_mid_p);
+ uptr granularity = SHADOW_GRANULARITY;
+ if (!(beg <= old_mid && beg <= new_mid && old_mid <= end && new_mid <= end &&
+ IsAligned(beg, granularity))) {
+ GET_STACK_TRACE_FATAL_HERE;
+ ReportBadParamsToAnnotateContiguousContainer(beg, end, old_mid, new_mid,
+ &stack);
+ }
+ CHECK_LE(end - beg,
+ FIRST_32_SECOND_64(1UL << 30, 1ULL << 34)); // Sanity check.
+
+ uptr a = RoundDownTo(Min(old_mid, new_mid), granularity);
+ uptr c = RoundUpTo(Max(old_mid, new_mid), granularity);
+ uptr d1 = RoundDownTo(old_mid, granularity);
+ // uptr d2 = RoundUpTo(old_mid, granularity);
+ // Currently we should be in this state:
+ // [a, d1) is good, [d2, c) is bad, [d1, d2) is partially good.
+ // Make a quick sanity check that we are indeed in this state.
+ //
+ // FIXME: Two of these three checks are disabled until we fix
+ // https://github.com/google/sanitizers/issues/258.
+ // if (d1 != d2)
+ // CHECK_EQ(*(u8*)MemToShadow(d1), old_mid - d1);
+ if (a + granularity <= d1)
+ CHECK_EQ(*(u8*)MemToShadow(a), 0);
+ // if (d2 + granularity <= c && c <= end)
+ // CHECK_EQ(*(u8 *)MemToShadow(c - granularity),
+ // kAsanContiguousContainerOOBMagic);
+
+ uptr b1 = RoundDownTo(new_mid, granularity);
+ uptr b2 = RoundUpTo(new_mid, granularity);
+ // New state:
+ // [a, b1) is good, [b2, c) is bad, [b1, b2) is partially good.
+ PoisonShadow(a, b1 - a, 0);
+ PoisonShadow(b2, c - b2, kAsanContiguousContainerOOBMagic);
+ if (b1 != b2) {
+ CHECK_EQ(b2 - b1, granularity);
+ *(u8*)MemToShadow(b1) = static_cast<u8>(new_mid - b1);
+ }
+}
+
+const void *__sanitizer_contiguous_container_find_bad_address(
+ const void *beg_p, const void *mid_p, const void *end_p) {
+ if (!flags()->detect_container_overflow)
+ return nullptr;
+ uptr beg = reinterpret_cast<uptr>(beg_p);
+ uptr end = reinterpret_cast<uptr>(end_p);
+ uptr mid = reinterpret_cast<uptr>(mid_p);
+ CHECK_LE(beg, mid);
+ CHECK_LE(mid, end);
+ // Check some bytes starting from beg, some bytes around mid, and some bytes
+ // ending with end.
+ uptr kMaxRangeToCheck = 32;
+ uptr r1_beg = beg;
+ uptr r1_end = Min(beg + kMaxRangeToCheck, mid);
+ uptr r2_beg = Max(beg, mid - kMaxRangeToCheck);
+ uptr r2_end = Min(end, mid + kMaxRangeToCheck);
+ uptr r3_beg = Max(end - kMaxRangeToCheck, mid);
+ uptr r3_end = end;
+ for (uptr i = r1_beg; i < r1_end; i++)
+ if (AddressIsPoisoned(i))
+ return reinterpret_cast<const void *>(i);
+ for (uptr i = r2_beg; i < mid; i++)
+ if (AddressIsPoisoned(i))
+ return reinterpret_cast<const void *>(i);
+ for (uptr i = mid; i < r2_end; i++)
+ if (!AddressIsPoisoned(i))
+ return reinterpret_cast<const void *>(i);
+ for (uptr i = r3_beg; i < r3_end; i++)
+ if (!AddressIsPoisoned(i))
+ return reinterpret_cast<const void *>(i);
+ return nullptr;
+}
+
+int __sanitizer_verify_contiguous_container(const void *beg_p,
+ const void *mid_p,
+ const void *end_p) {
+ return __sanitizer_contiguous_container_find_bad_address(beg_p, mid_p,
+ end_p) == nullptr;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_poison_intra_object_redzone(uptr ptr, uptr size) {
+ AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, true);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __asan_unpoison_intra_object_redzone(uptr ptr, uptr size) {
+ AsanPoisonOrUnpoisonIntraObjectRedzone(ptr, size, false);
+}
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+bool WordIsPoisoned(uptr addr) {
+ return (__asan_region_is_poisoned(addr, sizeof(uptr)) != 0);
+}
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_poisoning.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_poisoning.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_poisoning.h (revision 351984)
@@ -0,0 +1,100 @@
+//===-- asan_poisoning.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Shadow memory poisoning by ASan RTL and by user application.
+//===----------------------------------------------------------------------===//
+
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+namespace __asan {
+
+// Enable/disable memory poisoning.
+void SetCanPoisonMemory(bool value);
+bool CanPoisonMemory();
+
+// Poisons the shadow memory for "size" bytes starting from "addr".
+void PoisonShadow(uptr addr, uptr size, u8 value);
+
+// Poisons the shadow memory for "redzone_size" bytes starting from
+// "addr + size".
+void PoisonShadowPartialRightRedzone(uptr addr,
+ uptr size,
+ uptr redzone_size,
+ u8 value);
+
+// Fast versions of PoisonShadow and PoisonShadowPartialRightRedzone that
+// assume that memory addresses are properly aligned. Use in
+// performance-critical code with care.
+ALWAYS_INLINE void FastPoisonShadow(uptr aligned_beg, uptr aligned_size,
+ u8 value) {
+ DCHECK(!value || CanPoisonMemory());
+#if SANITIZER_FUCHSIA
+ __sanitizer_fill_shadow(aligned_beg, aligned_size, value,
+ common_flags()->clear_shadow_mmap_threshold);
+#else
+ uptr shadow_beg = MEM_TO_SHADOW(aligned_beg);
+ uptr shadow_end = MEM_TO_SHADOW(
+ aligned_beg + aligned_size - SHADOW_GRANULARITY) + 1;
+ // FIXME: Page states are different on Windows, so using the same interface
+ // for mapping shadow and zeroing out pages doesn't "just work", so we should
+ // probably provide higher-level interface for these operations.
+ // For now, just memset on Windows.
+ if (value || SANITIZER_WINDOWS == 1 ||
+ // RTEMS doesn't have have pages, let alone a fast way to zero
+ // them, so default to memset.
+ SANITIZER_RTEMS == 1 ||
+ shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
+ REAL(memset)((void*)shadow_beg, value, shadow_end - shadow_beg);
+ } else {
+ uptr page_size = GetPageSizeCached();
+ uptr page_beg = RoundUpTo(shadow_beg, page_size);
+ uptr page_end = RoundDownTo(shadow_end, page_size);
+
+ if (page_beg >= page_end) {
+ REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
+ } else {
+ if (page_beg != shadow_beg) {
+ REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
+ }
+ if (page_end != shadow_end) {
+ REAL(memset)((void *)page_end, 0, shadow_end - page_end);
+ }
+ ReserveShadowMemoryRange(page_beg, page_end - 1, nullptr);
+ }
+ }
+#endif // SANITIZER_FUCHSIA
+}
+
+ALWAYS_INLINE void FastPoisonShadowPartialRightRedzone(
+ uptr aligned_addr, uptr size, uptr redzone_size, u8 value) {
+ DCHECK(CanPoisonMemory());
+ bool poison_partial = flags()->poison_partial;
+ u8 *shadow = (u8*)MEM_TO_SHADOW(aligned_addr);
+ for (uptr i = 0; i < redzone_size; i += SHADOW_GRANULARITY, shadow++) {
+ if (i + SHADOW_GRANULARITY <= size) {
+ *shadow = 0; // fully addressable
+ } else if (i >= size) {
+ *shadow = (SHADOW_GRANULARITY == 128) ? 0xff : value; // unaddressable
+ } else {
+ // first size-i bytes are addressable
+ *shadow = poison_partial ? static_cast<u8>(size - i) : 0;
+ }
+ }
+}
+
+// Calls __sanitizer::ReleaseMemoryPagesToOS() on
+// [MemToShadow(p), MemToShadow(p+size)].
+void FlushUnneededASanShadowMemory(uptr p, uptr size);
+
+} // namespace __asan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_preinit.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_preinit.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_preinit.cc (revision 351984)
@@ -0,0 +1,24 @@
+//===-- asan_preinit.cc ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Call __asan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+#include "asan_internal.h"
+
+using namespace __asan;
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+ // The symbol is called __local_asan_preinit, because it's not intended to be
+ // exported.
+ // This code linked into the main executable when -fsanitize=address is in
+ // the link flags. It can only use exported interface functions.
+ __attribute__((section(".preinit_array"), used))
+ void (*__local_asan_preinit)(void) = __asan_init;
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_premap_shadow.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_premap_shadow.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_premap_shadow.cc (revision 351984)
@@ -0,0 +1,78 @@
+//===-- asan_premap_shadow.cc ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Reserve shadow memory with an ifunc resolver.
+//===----------------------------------------------------------------------===//
+
+#include "asan_mapping.h"
+
+#if ASAN_PREMAP_SHADOW
+
+#include "asan_premap_shadow.h"
+#include "sanitizer_common/sanitizer_posix.h"
+
+namespace __asan {
+
+// The code in this file needs to run in an unrelocated binary. It may not
+// access any external symbol, including its own non-hidden globals.
+
+// Conservative upper limit.
+uptr PremapShadowSize() {
+ uptr granularity = GetMmapGranularity();
+ return RoundUpTo(GetMaxVirtualAddress() >> SHADOW_SCALE, granularity);
+}
+
+// Returns an address aligned to 8 pages, such that one page on the left and
+// PremapShadowSize() bytes on the right of it are mapped r/o.
+uptr PremapShadow() {
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = granularity * 8;
+ uptr left_padding = granularity;
+ uptr shadow_size = PremapShadowSize();
+ uptr map_size = shadow_size + left_padding + alignment;
+
+ uptr map_start = (uptr)MmapNoAccess(map_size);
+ CHECK_NE(map_start, ~(uptr)0);
+
+ uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
+ uptr shadow_end = shadow_start + shadow_size;
+ internal_munmap(reinterpret_cast<void *>(map_start),
+ shadow_start - left_padding - map_start);
+ internal_munmap(reinterpret_cast<void *>(shadow_end),
+ map_start + map_size - shadow_end);
+ return shadow_start;
+}
+
+bool PremapShadowFailed() {
+ uptr shadow = reinterpret_cast<uptr>(&__asan_shadow);
+ uptr resolver = reinterpret_cast<uptr>(&__asan_premap_shadow);
+ // shadow == resolver is how Android KitKat and older handles ifunc.
+ // shadow == 0 just in case.
+ if (shadow == 0 || shadow == resolver)
+ return true;
+ return false;
+}
+} // namespace __asan
+
+extern "C" {
+decltype(__asan_shadow)* __asan_premap_shadow() {
+ // The resolver may be called multiple times. Map the shadow just once.
+ static uptr premapped_shadow = 0;
+ if (!premapped_shadow) premapped_shadow = __asan::PremapShadow();
+ return reinterpret_cast<decltype(__asan_shadow)*>(premapped_shadow);
+}
+
+// __asan_shadow is a "function" that has the same address as the first byte of
+// the shadow mapping.
+INTERFACE_ATTRIBUTE __attribute__((ifunc("__asan_premap_shadow"))) void
+__asan_shadow();
+}
+
+#endif // ASAN_PREMAP_SHADOW
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_premap_shadow.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_premap_shadow.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_premap_shadow.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_premap_shadow.h (revision 351984)
@@ -0,0 +1,29 @@
+//===-- asan_mapping.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Premap shadow range with an ifunc resolver.
+//===----------------------------------------------------------------------===//
+
+
+#ifndef ASAN_PREMAP_SHADOW_H
+#define ASAN_PREMAP_SHADOW_H
+
+#if ASAN_PREMAP_SHADOW
+namespace __asan {
+// Conservative upper limit.
+uptr PremapShadowSize();
+bool PremapShadowFailed();
+}
+#endif
+
+extern "C" INTERFACE_ATTRIBUTE void __asan_shadow();
+extern "C" decltype(__asan_shadow)* __asan_premap_shadow();
+
+#endif // ASAN_PREMAP_SHADOW_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_premap_shadow.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_report.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_report.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_report.cc (revision 351984)
@@ -0,0 +1,558 @@
+//===-- asan_report.cc ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file contains error reporting code.
+//===----------------------------------------------------------------------===//
+
+#include "asan_errors.h"
+#include "asan_flags.h"
+#include "asan_descriptions.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_scariness_score.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+namespace __asan {
+
+// -------------------- User-specified callbacks ----------------- {{{1
+static void (*error_report_callback)(const char*);
+static char *error_message_buffer = nullptr;
+static uptr error_message_buffer_pos = 0;
+static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED);
+static const unsigned kAsanBuggyPcPoolSize = 25;
+static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize];
+
+void AppendToErrorMessageBuffer(const char *buffer) {
+ BlockingMutexLock l(&error_message_buf_mutex);
+ if (!error_message_buffer) {
+ error_message_buffer =
+ (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__);
+ error_message_buffer_pos = 0;
+ }
+ uptr length = internal_strlen(buffer);
+ RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos);
+ uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos;
+ internal_strncpy(error_message_buffer + error_message_buffer_pos,
+ buffer, remaining);
+ error_message_buffer[kErrorMessageBufferSize - 1] = '\0';
+ // FIXME: reallocate the buffer instead of truncating the message.
+ error_message_buffer_pos += Min(remaining, length);
+}
+
+// ---------------------- Helper functions ----------------------- {{{1
+
+void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
+ bool in_shadow, const char *after) {
+ Decorator d;
+ str->append("%s%s%x%x%s%s", before,
+ in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4,
+ byte & 15, d.Default(), after);
+}
+
+static void PrintZoneForPointer(uptr ptr, uptr zone_ptr,
+ const char *zone_name) {
+ if (zone_ptr) {
+ if (zone_name) {
+ Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n",
+ ptr, zone_ptr, zone_name);
+ } else {
+ Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n",
+ ptr, zone_ptr);
+ }
+ } else {
+ Printf("malloc_zone_from_ptr(%p) = 0\n", ptr);
+ }
+}
+
+// ---------------------- Address Descriptions ------------------- {{{1
+
+bool ParseFrameDescription(const char *frame_descr,
+ InternalMmapVector<StackVarDescr> *vars) {
+ CHECK(frame_descr);
+ const char *p;
+ // This string is created by the compiler and has the following form:
+ // "n alloc_1 alloc_2 ... alloc_n"
+ // where alloc_i looks like "offset size len ObjectName"
+ // or "offset size len ObjectName:line".
+ uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10);
+ if (n_objects == 0)
+ return false;
+
+ for (uptr i = 0; i < n_objects; i++) {
+ uptr beg = (uptr)internal_simple_strtoll(p, &p, 10);
+ uptr size = (uptr)internal_simple_strtoll(p, &p, 10);
+ uptr len = (uptr)internal_simple_strtoll(p, &p, 10);
+ if (beg == 0 || size == 0 || *p != ' ') {
+ return false;
+ }
+ p++;
+ char *colon_pos = internal_strchr(p, ':');
+ uptr line = 0;
+ uptr name_len = len;
+ if (colon_pos != nullptr && colon_pos < p + len) {
+ name_len = colon_pos - p;
+ line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10);
+ }
+ StackVarDescr var = {beg, size, p, name_len, line};
+ vars->push_back(var);
+ p += len;
+ }
+
+ return true;
+}
+
+// -------------------- Different kinds of reports ----------------- {{{1
+
+// Use ScopedInErrorReport to run common actions just before and
+// immediately after printing error report.
+class ScopedInErrorReport {
+ public:
+ explicit ScopedInErrorReport(bool fatal = false)
+ : halt_on_error_(fatal || flags()->halt_on_error) {
+ // Make sure the registry and sanitizer report mutexes are locked while
+ // we're printing an error report.
+ // We can lock them only here to avoid self-deadlock in case of
+ // recursive reports.
+ asanThreadRegistry().Lock();
+ Printf(
+ "=================================================================\n");
+ }
+
+ ~ScopedInErrorReport() {
+ if (halt_on_error_ && !__sanitizer_acquire_crash_state()) {
+ asanThreadRegistry().Unlock();
+ return;
+ }
+ ASAN_ON_ERROR();
+ if (current_error_.IsValid()) current_error_.Print();
+
+ // Make sure the current thread is announced.
+ DescribeThread(GetCurrentThread());
+ // We may want to grab this lock again when printing stats.
+ asanThreadRegistry().Unlock();
+ // Print memory stats.
+ if (flags()->print_stats)
+ __asan_print_accumulated_stats();
+
+ if (common_flags()->print_cmdline)
+ PrintCmdline();
+
+ if (common_flags()->print_module_map == 2) PrintModuleMap();
+
+ // Copy the message buffer so that we could start logging without holding a
+ // lock that gets aquired during printing.
+ InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize);
+ {
+ BlockingMutexLock l(&error_message_buf_mutex);
+ internal_memcpy(buffer_copy.data(),
+ error_message_buffer, kErrorMessageBufferSize);
+ }
+
+ LogFullErrorReport(buffer_copy.data());
+
+ if (error_report_callback) {
+ error_report_callback(buffer_copy.data());
+ }
+
+ if (halt_on_error_ && common_flags()->abort_on_error) {
+ // On Android the message is truncated to 512 characters.
+ // FIXME: implement "compact" error format, possibly without, or with
+ // highly compressed stack traces?
+ // FIXME: or just use the summary line as abort message?
+ SetAbortMessage(buffer_copy.data());
+ }
+
+ // In halt_on_error = false mode, reset the current error object (before
+ // unlocking).
+ if (!halt_on_error_)
+ internal_memset(&current_error_, 0, sizeof(current_error_));
+
+ if (halt_on_error_) {
+ Report("ABORTING\n");
+ Die();
+ }
+ }
+
+ void ReportError(const ErrorDescription &description) {
+ // Can only report one error per ScopedInErrorReport.
+ CHECK_EQ(current_error_.kind, kErrorKindInvalid);
+ internal_memcpy(&current_error_, &description, sizeof(current_error_));
+ }
+
+ static ErrorDescription &CurrentError() {
+ return current_error_;
+ }
+
+ private:
+ ScopedErrorReportLock error_report_lock_;
+ // Error currently being reported. This enables the destructor to interact
+ // with the debugger and point it to an error description.
+ static ErrorDescription current_error_;
+ bool halt_on_error_;
+};
+
+ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED);
+
+void ReportDeadlySignal(const SignalContext &sig) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig);
+ in_report.ReportError(error);
+}
+
+void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) {
+ ScopedInErrorReport in_report;
+ ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr);
+ in_report.ReportError(error);
+}
+
+void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
+ uptr delete_alignment,
+ BufferedStackTrace *free_stack) {
+ ScopedInErrorReport in_report;
+ ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
+ delete_size, delete_alignment);
+ in_report.ReportError(error);
+}
+
+void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) {
+ ScopedInErrorReport in_report;
+ ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr);
+ in_report.ReportError(error);
+}
+
+void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
+ AllocType alloc_type,
+ AllocType dealloc_type) {
+ ScopedInErrorReport in_report;
+ ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr,
+ alloc_type, dealloc_type);
+ in_report.ReportError(error);
+}
+
+void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr);
+ in_report.ReportError(error);
+}
+
+void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack,
+ addr);
+ in_report.ReportError(error);
+}
+
+void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
+ in_report.ReportError(error);
+}
+
+void ReportReallocArrayOverflow(uptr count, uptr size,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorReallocArrayOverflow error(GetCurrentTidOrInvalid(), stack, count, size);
+ in_report.ReportError(error);
+}
+
+void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size);
+ in_report.ReportError(error);
+}
+
+void ReportInvalidAllocationAlignment(uptr alignment,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack,
+ alignment);
+ in_report.ReportError(error);
+}
+
+void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack,
+ size, alignment);
+ in_report.ReportError(error);
+}
+
+void ReportInvalidPosixMemalignAlignment(uptr alignment,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack,
+ alignment);
+ in_report.ReportError(error);
+}
+
+void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size,
+ total_size, max_size);
+ in_report.ReportError(error);
+}
+
+void ReportRssLimitExceeded(BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack);
+ in_report.ReportError(error);
+}
+
+void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report(/*fatal*/ true);
+ ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size);
+ in_report.ReportError(error);
+}
+
+void ReportStringFunctionMemoryRangesOverlap(const char *function,
+ const char *offset1, uptr length1,
+ const char *offset2, uptr length2,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ ErrorStringFunctionMemoryRangesOverlap error(
+ GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2,
+ length2, function);
+ in_report.ReportError(error);
+}
+
+void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset,
+ size);
+ in_report.ReportError(error);
+}
+
+void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
+ uptr old_mid, uptr new_mid,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ ErrorBadParamsToAnnotateContiguousContainer error(
+ GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid);
+ in_report.ReportError(error);
+}
+
+void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
+ const __asan_global *g2, u32 stack_id2) {
+ ScopedInErrorReport in_report;
+ ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2,
+ stack_id2);
+ in_report.ReportError(error);
+}
+
+// ----------------------- CheckForInvalidPointerPair ----------- {{{1
+static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp,
+ uptr a1, uptr a2) {
+ ScopedInErrorReport in_report;
+ ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2);
+ in_report.ReportError(error);
+}
+
+static bool IsInvalidPointerPair(uptr a1, uptr a2) {
+ if (a1 == a2)
+ return false;
+
+ // 256B in shadow memory can be iterated quite fast
+ static const uptr kMaxOffset = 2048;
+
+ uptr left = a1 < a2 ? a1 : a2;
+ uptr right = a1 < a2 ? a2 : a1;
+ uptr offset = right - left;
+ if (offset <= kMaxOffset)
+ return __asan_region_is_poisoned(left, offset);
+
+ AsanThread *t = GetCurrentThread();
+
+ // check whether left is a stack memory pointer
+ if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) {
+ uptr shadow_offset2 = t->GetStackVariableShadowStart(right);
+ return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2;
+ }
+
+ // check whether left is a heap memory address
+ HeapAddressDescription hdesc1, hdesc2;
+ if (GetHeapAddressInformation(left, 0, &hdesc1) &&
+ hdesc1.chunk_access.access_type == kAccessTypeInside)
+ return !GetHeapAddressInformation(right, 0, &hdesc2) ||
+ hdesc2.chunk_access.access_type != kAccessTypeInside ||
+ hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin;
+
+ // check whether left is an address of a global variable
+ GlobalAddressDescription gdesc1, gdesc2;
+ if (GetGlobalAddressInformation(left, 0, &gdesc1))
+ return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) ||
+ !gdesc1.PointsInsideTheSameVariable(gdesc2);
+
+ if (t->GetStackVariableShadowStart(right) ||
+ GetHeapAddressInformation(right, 0, &hdesc2) ||
+ GetGlobalAddressInformation(right - 1, 0, &gdesc2))
+ return true;
+
+ // At this point we know nothing about both a1 and a2 addresses.
+ return false;
+}
+
+static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) {
+ switch (flags()->detect_invalid_pointer_pairs) {
+ case 0 : return;
+ case 1 : if (p1 == nullptr || p2 == nullptr) return; break;
+ }
+
+ uptr a1 = reinterpret_cast<uptr>(p1);
+ uptr a2 = reinterpret_cast<uptr>(p2);
+
+ if (IsInvalidPointerPair(a1, a2)) {
+ GET_CALLER_PC_BP_SP;
+ ReportInvalidPointerPair(pc, bp, sp, a1, a2);
+ }
+}
+// ----------------------- Mac-specific reports ----------------- {{{1
+
+void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name,
+ BufferedStackTrace *stack) {
+ ScopedInErrorReport in_report;
+ Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n"
+ "This is an unrecoverable problem, exiting now.\n",
+ addr);
+ PrintZoneForPointer(addr, zone_ptr, zone_name);
+ stack->Print();
+ DescribeAddressIfHeap(addr);
+}
+
+// -------------- SuppressErrorReport -------------- {{{1
+// Avoid error reports duplicating for ASan recover mode.
+static bool SuppressErrorReport(uptr pc) {
+ if (!common_flags()->suppress_equal_pcs) return false;
+ for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) {
+ uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]);
+ if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp,
+ pc, memory_order_relaxed))
+ return false;
+ if (cmp == pc) return true;
+ }
+ Die();
+}
+
+void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
+ uptr access_size, u32 exp, bool fatal) {
+ if (!fatal && SuppressErrorReport(pc)) return;
+ ENABLE_FRAME_POINTER;
+
+ // Optimization experiments.
+ // The experiments can be used to evaluate potential optimizations that remove
+ // instrumentation (assess false negatives). Instead of completely removing
+ // some instrumentation, compiler can emit special calls into runtime
+ // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass
+ // mask of experiments (exp).
+ // The reaction to a non-zero value of exp is to be defined.
+ (void)exp;
+
+ ScopedInErrorReport in_report(fatal);
+ ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write,
+ access_size);
+ in_report.ReportError(error);
+}
+
+} // namespace __asan
+
+// --------------------------- Interface --------------------- {{{1
+using namespace __asan; // NOLINT
+
+void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write,
+ uptr access_size, u32 exp) {
+ ENABLE_FRAME_POINTER;
+ bool fatal = flags()->halt_on_error;
+ ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal);
+}
+
+void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) {
+ BlockingMutexLock l(&error_message_buf_mutex);
+ error_report_callback = callback;
+}
+
+void __asan_describe_address(uptr addr) {
+ // Thread registry must be locked while we're describing an address.
+ asanThreadRegistry().Lock();
+ PrintAddressDescription(addr, 1, "");
+ asanThreadRegistry().Unlock();
+}
+
+int __asan_report_present() {
+ return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid;
+}
+
+uptr __asan_get_report_pc() {
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.pc;
+ return 0;
+}
+
+uptr __asan_get_report_bp() {
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.bp;
+ return 0;
+}
+
+uptr __asan_get_report_sp() {
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.sp;
+ return 0;
+}
+
+uptr __asan_get_report_address() {
+ ErrorDescription &err = ScopedInErrorReport::CurrentError();
+ if (err.kind == kErrorKindGeneric)
+ return err.Generic.addr_description.Address();
+ else if (err.kind == kErrorKindDoubleFree)
+ return err.DoubleFree.addr_description.addr;
+ return 0;
+}
+
+int __asan_get_report_access_type() {
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.is_write;
+ return 0;
+}
+
+uptr __asan_get_report_access_size() {
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.access_size;
+ return 0;
+}
+
+const char *__asan_get_report_description() {
+ if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric)
+ return ScopedInErrorReport::CurrentError().Generic.bug_descr;
+ return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription();
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_ptr_sub(void *a, void *b) {
+ CheckForInvalidPointerPair(a, b);
+}
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_ptr_cmp(void *a, void *b) {
+ CheckForInvalidPointerPair(a, b);
+}
+} // extern "C"
+
+// Provide default implementation of __asan_on_error that does nothing
+// and may be overriden by user.
+SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_report.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_report.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_report.h (revision 351984)
@@ -0,0 +1,99 @@
+//===-- asan_report.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for error reporting functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_REPORT_H
+#define ASAN_REPORT_H
+
+#include "asan_allocator.h"
+#include "asan_internal.h"
+#include "asan_thread.h"
+
+namespace __asan {
+
+struct StackVarDescr {
+ uptr beg;
+ uptr size;
+ const char *name_pos;
+ uptr name_len;
+ uptr line;
+};
+
+// Returns the number of globals close to the provided address and copies
+// them to "globals" array.
+int GetGlobalsForAddress(uptr addr, __asan_global *globals, u32 *reg_sites,
+ int max_globals);
+
+const char *MaybeDemangleGlobalName(const char *name);
+void PrintGlobalNameIfASCII(InternalScopedString *str, const __asan_global &g);
+void PrintGlobalLocation(InternalScopedString *str, const __asan_global &g);
+
+void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte,
+ bool in_shadow, const char *after = "\n");
+
+// The following functions prints address description depending
+// on the memory type (shadow/heap/stack/global).
+bool ParseFrameDescription(const char *frame_descr,
+ InternalMmapVector<StackVarDescr> *vars);
+
+// Different kinds of error reports.
+void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write,
+ uptr access_size, u32 exp, bool fatal);
+void ReportDeadlySignal(const SignalContext &sig);
+void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size,
+ uptr delete_alignment,
+ BufferedStackTrace *free_stack);
+void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack);
+void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack);
+void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack,
+ AllocType alloc_type,
+ AllocType dealloc_type);
+void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack);
+void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr,
+ BufferedStackTrace *stack);
+void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack);
+void ReportReallocArrayOverflow(uptr count, uptr size,
+ BufferedStackTrace *stack);
+void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack);
+void ReportInvalidAllocationAlignment(uptr alignment,
+ BufferedStackTrace *stack);
+void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
+ BufferedStackTrace *stack);
+void ReportInvalidPosixMemalignAlignment(uptr alignment,
+ BufferedStackTrace *stack);
+void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size,
+ BufferedStackTrace *stack);
+void ReportRssLimitExceeded(BufferedStackTrace *stack);
+void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack);
+void ReportStringFunctionMemoryRangesOverlap(const char *function,
+ const char *offset1, uptr length1,
+ const char *offset2, uptr length2,
+ BufferedStackTrace *stack);
+void ReportStringFunctionSizeOverflow(uptr offset, uptr size,
+ BufferedStackTrace *stack);
+void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end,
+ uptr old_mid, uptr new_mid,
+ BufferedStackTrace *stack);
+
+void ReportODRViolation(const __asan_global *g1, u32 stack_id1,
+ const __asan_global *g2, u32 stack_id2);
+
+// Mac-specific errors and warnings.
+void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr,
+ const char *zone_name,
+ BufferedStackTrace *stack);
+void ReportMacCfReallocUnknown(uptr addr, uptr zone_ptr,
+ const char *zone_name,
+ BufferedStackTrace *stack);
+
+} // namespace __asan
+#endif // ASAN_REPORT_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_rtems.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_rtems.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_rtems.cc (revision 351984)
@@ -0,0 +1,258 @@
+//===-- asan_rtems.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// RTEMS-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_rtems.h"
+#if SANITIZER_RTEMS
+
+#include "asan_internal.h"
+#include "asan_interceptors.h"
+#include "asan_mapping.h"
+#include "asan_poisoning.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+#include <pthread.h>
+#include <stdlib.h>
+
+namespace __asan {
+
+static void ResetShadowMemory() {
+ uptr shadow_start = SHADOW_OFFSET;
+ uptr shadow_end = MEM_TO_SHADOW(kMyriadMemoryEnd32);
+ uptr gap_start = MEM_TO_SHADOW(shadow_start);
+ uptr gap_end = MEM_TO_SHADOW(shadow_end);
+
+ REAL(memset)((void *)shadow_start, 0, shadow_end - shadow_start);
+ REAL(memset)((void *)gap_start, kAsanShadowGap, gap_end - gap_start);
+}
+
+void InitializeShadowMemory() {
+ kHighMemEnd = 0;
+ kMidMemBeg = 0;
+ kMidMemEnd = 0;
+
+ ResetShadowMemory();
+}
+
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
+ UNIMPLEMENTED();
+}
+
+void AsanCheckDynamicRTPrereqs() {}
+void AsanCheckIncompatibleRT() {}
+void InitializeAsanInterceptors() {}
+void InitializePlatformInterceptors() {}
+void InitializePlatformExceptionHandlers() {}
+
+// RTEMS only support static linking; it sufficies to return with no
+// error.
+void *AsanDoesNotSupportStaticLinkage() { return nullptr; }
+
+void AsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ UNIMPLEMENTED();
+}
+
+void EarlyInit() {
+ // Provide early initialization of shadow memory so that
+ // instrumented code running before full initialzation will not
+ // report spurious errors.
+ ResetShadowMemory();
+}
+
+// We can use a plain thread_local variable for TSD.
+static thread_local void *per_thread;
+
+void *AsanTSDGet() { return per_thread; }
+
+void AsanTSDSet(void *tsd) { per_thread = tsd; }
+
+// There's no initialization needed, and the passed-in destructor
+// will never be called. Instead, our own thread destruction hook
+// (below) will call AsanThread::TSDDtor directly.
+void AsanTSDInit(void (*destructor)(void *tsd)) {
+ DCHECK(destructor == &PlatformTSDDtor);
+}
+
+void PlatformTSDDtor(void *tsd) { UNREACHABLE(__func__); }
+
+//
+// Thread registration. We provide an API similar to the Fushia port.
+//
+
+struct AsanThread::InitOptions {
+ uptr stack_bottom, stack_size, tls_bottom, tls_size;
+};
+
+// Shared setup between thread creation and startup for the initial thread.
+static AsanThread *CreateAsanThread(StackTrace *stack, u32 parent_tid,
+ uptr user_id, bool detached,
+ uptr stack_bottom, uptr stack_size,
+ uptr tls_bottom, uptr tls_size) {
+ // In lieu of AsanThread::Create.
+ AsanThread *thread = (AsanThread *)MmapOrDie(sizeof(AsanThread), __func__);
+ AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
+ asanThreadRegistry().CreateThread(user_id, detached, parent_tid, &args);
+
+ // On other systems, AsanThread::Init() is called from the new
+ // thread itself. But on RTEMS we already know the stack address
+ // range beforehand, so we can do most of the setup right now.
+ const AsanThread::InitOptions options = {stack_bottom, stack_size,
+ tls_bottom, tls_size};
+ thread->Init(&options);
+ return thread;
+}
+
+// This gets the same arguments passed to Init by CreateAsanThread, above.
+// We're in the creator thread before the new thread is actually started, but
+// its stack and tls address range are already known.
+void AsanThread::SetThreadStackAndTls(const AsanThread::InitOptions *options) {
+ DCHECK_NE(GetCurrentThread(), this);
+ DCHECK_NE(GetCurrentThread(), nullptr);
+ CHECK_NE(options->stack_bottom, 0);
+ CHECK_NE(options->stack_size, 0);
+ stack_bottom_ = options->stack_bottom;
+ stack_top_ = options->stack_bottom + options->stack_size;
+ tls_begin_ = options->tls_bottom;
+ tls_end_ = options->tls_bottom + options->tls_size;
+}
+
+// Called by __asan::AsanInitInternal (asan_rtl.c). Unlike other ports, the
+// main thread on RTEMS does not require special treatment; its AsanThread is
+// already created by the provided hooks. This function simply looks up and
+// returns the created thread.
+AsanThread *CreateMainThread() {
+ return GetThreadContextByTidLocked(0)->thread;
+}
+
+// This is called before each thread creation is attempted. So, in
+// its first call, the calling thread is the initial and sole thread.
+static void *BeforeThreadCreateHook(uptr user_id, bool detached,
+ uptr stack_bottom, uptr stack_size,
+ uptr tls_bottom, uptr tls_size) {
+ EnsureMainThreadIDIsCorrect();
+ // Strict init-order checking is thread-hostile.
+ if (flags()->strict_init_order) StopInitOrderChecking();
+
+ GET_STACK_TRACE_THREAD;
+ u32 parent_tid = GetCurrentTidOrInvalid();
+
+ return CreateAsanThread(&stack, parent_tid, user_id, detached,
+ stack_bottom, stack_size, tls_bottom, tls_size);
+}
+
+// This is called after creating a new thread (in the creating thread),
+// with the pointer returned by BeforeThreadCreateHook (above).
+static void ThreadCreateHook(void *hook, bool aborted) {
+ AsanThread *thread = static_cast<AsanThread *>(hook);
+ if (!aborted) {
+ // The thread was created successfully.
+ // ThreadStartHook is already running in the new thread.
+ } else {
+ // The thread wasn't created after all.
+ // Clean up everything we set up in BeforeThreadCreateHook.
+ asanThreadRegistry().FinishThread(thread->tid());
+ UnmapOrDie(thread, sizeof(AsanThread));
+ }
+}
+
+// This is called (1) in the newly-created thread before it runs anything else,
+// with the pointer returned by BeforeThreadCreateHook (above). (2) before a
+// thread restart.
+static void ThreadStartHook(void *hook, uptr os_id) {
+ if (!hook)
+ return;
+
+ AsanThread *thread = static_cast<AsanThread *>(hook);
+ SetCurrentThread(thread);
+
+ ThreadStatus status =
+ asanThreadRegistry().GetThreadLocked(thread->tid())->status;
+ DCHECK(status == ThreadStatusCreated || status == ThreadStatusRunning);
+ // Determine whether we are starting or restarting the thread.
+ if (status == ThreadStatusCreated)
+ // In lieu of AsanThread::ThreadStart.
+ asanThreadRegistry().StartThread(thread->tid(), os_id, ThreadType::Regular,
+ nullptr);
+ else {
+ // In a thread restart, a thread may resume execution at an
+ // arbitrary function entry point, with its stack and TLS state
+ // reset. We unpoison the stack in that case.
+ PoisonShadow(thread->stack_bottom(), thread->stack_size(), 0);
+ }
+}
+
+// Each thread runs this just before it exits,
+// with the pointer returned by BeforeThreadCreateHook (above).
+// All per-thread destructors have already been called.
+static void ThreadExitHook(void *hook, uptr os_id) {
+ AsanThread *thread = static_cast<AsanThread *>(hook);
+ if (thread)
+ AsanThread::TSDDtor(thread->context());
+}
+
+static void HandleExit() {
+ // Disable ASan by setting it to uninitialized. Also reset the
+ // shadow memory to avoid reporting errors after the run-time has
+ // been desroyed.
+ if (asan_inited) {
+ asan_inited = false;
+ ResetShadowMemory();
+ }
+}
+
+bool HandleDlopenInit() {
+ // Not supported on this platform.
+ static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN,
+ "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false");
+ return false;
+}
+} // namespace __asan
+
+// These are declared (in extern "C") by <some_path/sanitizer.h>.
+// The system runtime will call our definitions directly.
+
+extern "C" {
+void __sanitizer_early_init() {
+ __asan::EarlyInit();
+}
+
+void *__sanitizer_before_thread_create_hook(uptr thread, bool detached,
+ const char *name,
+ void *stack_base, size_t stack_size,
+ void *tls_base, size_t tls_size) {
+ return __asan::BeforeThreadCreateHook(
+ thread, detached,
+ reinterpret_cast<uptr>(stack_base), stack_size,
+ reinterpret_cast<uptr>(tls_base), tls_size);
+}
+
+void __sanitizer_thread_create_hook(void *handle, uptr thread, int status) {
+ __asan::ThreadCreateHook(handle, status != 0);
+}
+
+void __sanitizer_thread_start_hook(void *handle, uptr self) {
+ __asan::ThreadStartHook(handle, self);
+}
+
+void __sanitizer_thread_exit_hook(void *handle, uptr self) {
+ __asan::ThreadExitHook(handle, self);
+}
+
+void __sanitizer_exit() {
+ __asan::HandleExit();
+}
+} // "C"
+
+#endif // SANITIZER_RTEMS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_rtems.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_scariness_score.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_scariness_score.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_scariness_score.h (revision 351984)
@@ -0,0 +1,73 @@
+//===-- asan_scariness_score.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Compute the level of scariness of the error message.
+// Don't expect any deep science here, just a set of heuristics that suggest
+// that e.g. 1-byte-read-global-buffer-overflow is less scary than
+// 8-byte-write-stack-use-after-return.
+//
+// Every error report has one or more features, such as memory access size,
+// type (read or write), type of accessed memory (e.g. free-d heap, or a global
+// redzone), etc. Every such feature has an int score and a string description.
+// The overall score is the sum of all feature scores and the description
+// is a concatenation of feature descriptions.
+// Examples:
+// 17 (4-byte-read-heap-buffer-overflow)
+// 65 (multi-byte-write-stack-use-after-return)
+// 10 (null-deref)
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_SCARINESS_SCORE_H
+#define ASAN_SCARINESS_SCORE_H
+
+#include "asan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+namespace __asan {
+struct ScarinessScoreBase {
+ void Clear() {
+ descr[0] = 0;
+ score = 0;
+ }
+ void Scare(int add_to_score, const char *reason) {
+ if (descr[0])
+ internal_strlcat(descr, "-", sizeof(descr));
+ internal_strlcat(descr, reason, sizeof(descr));
+ score += add_to_score;
+ };
+ int GetScore() const { return score; }
+ const char *GetDescription() const { return descr; }
+ void Print() const {
+ if (score && flags()->print_scariness)
+ Printf("SCARINESS: %d (%s)\n", score, descr);
+ }
+ static void PrintSimple(int score, const char *descr) {
+ ScarinessScoreBase SSB;
+ SSB.Clear();
+ SSB.Scare(score, descr);
+ SSB.Print();
+ }
+
+ private:
+ int score;
+ char descr[1024];
+};
+
+struct ScarinessScore : ScarinessScoreBase {
+ ScarinessScore() {
+ Clear();
+ }
+};
+
+} // namespace __asan
+
+#endif // ASAN_SCARINESS_SCORE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_scariness_score.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_shadow_setup.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_shadow_setup.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_shadow_setup.cc (revision 351984)
@@ -0,0 +1,164 @@
+//===-- asan_shadow_setup.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Set up the shadow memory.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+// asan_fuchsia.cc and asan_rtems.cc have their own
+// InitializeShadowMemory implementation.
+#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+
+#include "asan_internal.h"
+#include "asan_mapping.h"
+
+namespace __asan {
+
+// ---------------------- mmap -------------------- {{{1
+// Reserve memory range [beg, end].
+// We need to use inclusive range because end+1 may not be representable.
+void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
+ CHECK_EQ((beg % GetMmapGranularity()), 0);
+ CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
+ uptr size = end - beg + 1;
+ DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
+ if (!MmapFixedNoReserve(beg, size, name)) {
+ Report(
+ "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
+ "Perhaps you're using ulimit -v\n",
+ size);
+ Abort();
+ }
+ if (common_flags()->no_huge_pages_for_shadow) NoHugePagesInRegion(beg, size);
+ if (common_flags()->use_madv_dontdump) DontDumpShadowMemory(beg, size);
+}
+
+static void ProtectGap(uptr addr, uptr size) {
+ if (!flags()->protect_shadow_gap) {
+ // The shadow gap is unprotected, so there is a chance that someone
+ // is actually using this memory. Which means it needs a shadow...
+ uptr GapShadowBeg = RoundDownTo(MEM_TO_SHADOW(addr), GetPageSizeCached());
+ uptr GapShadowEnd =
+ RoundUpTo(MEM_TO_SHADOW(addr + size), GetPageSizeCached()) - 1;
+ if (Verbosity())
+ Printf(
+ "protect_shadow_gap=0:"
+ " not protecting shadow gap, allocating gap's shadow\n"
+ "|| `[%p, %p]` || ShadowGap's shadow ||\n",
+ GapShadowBeg, GapShadowEnd);
+ ReserveShadowMemoryRange(GapShadowBeg, GapShadowEnd,
+ "unprotected gap shadow");
+ return;
+ }
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res) return;
+ // A few pages at the start of the address space can not be protected.
+ // But we really want to protect as much as possible, to prevent this memory
+ // being returned as a result of a non-FIXED mmap().
+ if (addr == kZeroBaseShadowStart) {
+ uptr step = GetMmapGranularity();
+ while (size > step && addr < kZeroBaseMaxShadowStart) {
+ addr += step;
+ size -= step;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res) return;
+ }
+ }
+
+ Report(
+ "ERROR: Failed to protect the shadow gap. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ DumpProcessMap();
+ Die();
+}
+
+static void MaybeReportLinuxPIEBug() {
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__aarch64__))
+ Report("This might be related to ELF_ET_DYN_BASE change in Linux 4.12.\n");
+ Report(
+ "See https://github.com/google/sanitizers/issues/856 for possible "
+ "workarounds.\n");
+#endif
+}
+
+void InitializeShadowMemory() {
+ // Set the shadow memory address to uninitialized.
+ __asan_shadow_memory_dynamic_address = kDefaultShadowSentinel;
+
+ uptr shadow_start = kLowShadowBeg;
+ // Detect if a dynamic shadow address must used and find a available location
+ // when necessary. When dynamic address is used, the macro |kLowShadowBeg|
+ // expands to |__asan_shadow_memory_dynamic_address| which is
+ // |kDefaultShadowSentinel|.
+ bool full_shadow_is_available = false;
+ if (shadow_start == kDefaultShadowSentinel) {
+ __asan_shadow_memory_dynamic_address = 0;
+ CHECK_EQ(0, kLowShadowBeg);
+ shadow_start = FindDynamicShadowStart();
+ if (SANITIZER_LINUX) full_shadow_is_available = true;
+ }
+ // Update the shadow memory address (potentially) used by instrumentation.
+ __asan_shadow_memory_dynamic_address = shadow_start;
+
+ if (kLowShadowBeg) shadow_start -= GetMmapGranularity();
+
+ if (!full_shadow_is_available)
+ full_shadow_is_available =
+ MemoryRangeIsAvailable(shadow_start, kHighShadowEnd);
+
+#if SANITIZER_LINUX && defined(__x86_64__) && defined(_LP64) && \
+ !ASAN_FIXED_MAPPING
+ if (!full_shadow_is_available) {
+ kMidMemBeg = kLowMemEnd < 0x3000000000ULL ? 0x3000000000ULL : 0;
+ kMidMemEnd = kLowMemEnd < 0x3000000000ULL ? 0x4fffffffffULL : 0;
+ }
+#endif
+
+ if (Verbosity()) PrintAddressSpaceLayout();
+
+ if (full_shadow_is_available) {
+ // mmap the low shadow plus at least one page at the left.
+ if (kLowShadowBeg)
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
+ // protect the gap.
+ ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ CHECK_EQ(kShadowGapEnd, kHighShadowBeg - 1);
+ } else if (kMidMemBeg &&
+ MemoryRangeIsAvailable(shadow_start, kMidMemBeg - 1) &&
+ MemoryRangeIsAvailable(kMidMemEnd + 1, kHighShadowEnd)) {
+ CHECK(kLowShadowBeg != kLowShadowEnd);
+ // mmap the low shadow plus at least one page at the left.
+ ReserveShadowMemoryRange(shadow_start, kLowShadowEnd, "low shadow");
+ // mmap the mid shadow.
+ ReserveShadowMemoryRange(kMidShadowBeg, kMidShadowEnd, "mid shadow");
+ // mmap the high shadow.
+ ReserveShadowMemoryRange(kHighShadowBeg, kHighShadowEnd, "high shadow");
+ // protect the gaps.
+ ProtectGap(kShadowGapBeg, kShadowGapEnd - kShadowGapBeg + 1);
+ ProtectGap(kShadowGap2Beg, kShadowGap2End - kShadowGap2Beg + 1);
+ ProtectGap(kShadowGap3Beg, kShadowGap3End - kShadowGap3Beg + 1);
+ } else {
+ Report(
+ "Shadow memory range interleaves with an existing memory mapping. "
+ "ASan cannot proceed correctly. ABORTING.\n");
+ Report("ASan shadow was supposed to be located in the [%p-%p] range.\n",
+ shadow_start, kHighShadowEnd);
+ MaybeReportLinuxPIEBug();
+ DumpProcessMap();
+ Die();
+ }
+}
+
+} // namespace __asan
+
+#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_shadow_setup.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stack.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stack.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stack.cc (revision 351984)
@@ -0,0 +1,88 @@
+//===-- asan_stack.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Code for ASan stack trace.
+//===----------------------------------------------------------------------===//
+#include "asan_internal.h"
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+
+namespace __asan {
+
+static atomic_uint32_t malloc_context_size;
+
+void SetMallocContextSize(u32 size) {
+ atomic_store(&malloc_context_size, size, memory_order_release);
+}
+
+u32 GetMallocContextSize() {
+ return atomic_load(&malloc_context_size, memory_order_acquire);
+}
+
+namespace {
+
+// ScopedUnwinding is a scope for stacktracing member of a context
+class ScopedUnwinding {
+ public:
+ explicit ScopedUnwinding(AsanThread *t) : thread(t) {
+ if (thread) {
+ can_unwind = !thread->isUnwinding();
+ thread->setUnwinding(true);
+ }
+ }
+ ~ScopedUnwinding() {
+ if (thread)
+ thread->setUnwinding(false);
+ }
+
+ bool CanUnwind() const { return can_unwind; }
+
+ private:
+ AsanThread *thread = nullptr;
+ bool can_unwind = true;
+};
+
+} // namespace
+
+} // namespace __asan
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ using namespace __asan;
+ size = 0;
+ if (UNLIKELY(!asan_inited))
+ return;
+ request_fast = StackTrace::WillUseFastUnwind(request_fast);
+ AsanThread *t = GetCurrentThread();
+ ScopedUnwinding unwind_scope(t);
+ if (!unwind_scope.CanUnwind())
+ return;
+ if (request_fast) {
+ if (t) {
+ Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(),
+ true);
+ }
+ return;
+ }
+ if (SANITIZER_MIPS && t &&
+ !IsValidFrame(bp, t->stack_top(), t->stack_bottom()))
+ return;
+ Unwind(max_depth, pc, bp, context, 0, 0, false);
+}
+
+// ------------------ Interface -------------- {{{1
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ using namespace __asan;
+ PRINT_CURRENT_STACK();
+}
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stack.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stack.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stack.h (revision 351984)
@@ -0,0 +1,85 @@
+//===-- asan_stack.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_stack.cc.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_STACK_H
+#define ASAN_STACK_H
+
+#include "asan_flags.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __asan {
+
+static const u32 kDefaultMallocContextSize = 30;
+
+void SetMallocContextSize(u32 size);
+u32 GetMallocContextSize();
+
+} // namespace __asan
+
+// NOTE: A Rule of thumb is to retrieve stack trace in the interceptors
+// as early as possible (in functions exposed to the user), as we generally
+// don't want stack trace to contain functions from ASan internals.
+
+#define GET_STACK_TRACE(max_size, fast) \
+ BufferedStackTrace stack; \
+ if (max_size <= 2) { \
+ stack.size = max_size; \
+ if (max_size > 0) { \
+ stack.top_frame_bp = GET_CURRENT_FRAME(); \
+ stack.trace_buffer[0] = StackTrace::GetCurrentPc(); \
+ if (max_size > 1) stack.trace_buffer[1] = GET_CALLER_PC(); \
+ } \
+ } else { \
+ stack.Unwind(StackTrace::GetCurrentPc(), \
+ GET_CURRENT_FRAME(), nullptr, fast, max_size); \
+ }
+
+#define GET_STACK_TRACE_FATAL(pc, bp) \
+ BufferedStackTrace stack; \
+ stack.Unwind(pc, bp, nullptr, \
+ common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_SIGNAL(sig) \
+ BufferedStackTrace stack; \
+ stack.Unwind((sig).pc, (sig).bp, (sig).context, \
+ common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_FATAL_HERE \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_CHECK_HERE \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_check)
+
+#define GET_STACK_TRACE_THREAD \
+ GET_STACK_TRACE(kStackTraceMax, true)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(GetMallocContextSize(), common_flags()->fast_unwind_on_malloc)
+
+#define GET_STACK_TRACE_FREE GET_STACK_TRACE_MALLOC
+
+#define PRINT_CURRENT_STACK() \
+ { \
+ GET_STACK_TRACE_FATAL_HERE; \
+ stack.Print(); \
+ }
+
+#define PRINT_CURRENT_STACK_CHECK() \
+ { \
+ GET_STACK_TRACE_CHECK_HERE; \
+ stack.Print(); \
+ }
+
+#endif // ASAN_STACK_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stats.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stats.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stats.cc (revision 351984)
@@ -0,0 +1,173 @@
+//===-- asan_stats.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Code related to statistics collected by AddressSanitizer.
+//===----------------------------------------------------------------------===//
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_stats.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __asan {
+
+AsanStats::AsanStats() {
+ Clear();
+}
+
+void AsanStats::Clear() {
+ CHECK(REAL(memset));
+ REAL(memset)(this, 0, sizeof(AsanStats));
+}
+
+static void PrintMallocStatsArray(const char *prefix,
+ uptr (&array)[kNumberOfSizeClasses]) {
+ Printf("%s", prefix);
+ for (uptr i = 0; i < kNumberOfSizeClasses; i++) {
+ if (!array[i]) continue;
+ Printf("%zu:%zu; ", i, array[i]);
+ }
+ Printf("\n");
+}
+
+void AsanStats::Print() {
+ Printf("Stats: %zuM malloced (%zuM for red zones) by %zu calls\n",
+ malloced>>20, malloced_redzones>>20, mallocs);
+ Printf("Stats: %zuM realloced by %zu calls\n", realloced>>20, reallocs);
+ Printf("Stats: %zuM freed by %zu calls\n", freed>>20, frees);
+ Printf("Stats: %zuM really freed by %zu calls\n",
+ really_freed>>20, real_frees);
+ Printf("Stats: %zuM (%zuM-%zuM) mmaped; %zu maps, %zu unmaps\n",
+ (mmaped-munmaped)>>20, mmaped>>20, munmaped>>20,
+ mmaps, munmaps);
+
+ PrintMallocStatsArray(" mallocs by size class: ", malloced_by_size);
+ Printf("Stats: malloc large: %zu\n", malloc_large);
+}
+
+void AsanStats::MergeFrom(const AsanStats *stats) {
+ uptr *dst_ptr = reinterpret_cast<uptr*>(this);
+ const uptr *src_ptr = reinterpret_cast<const uptr*>(stats);
+ uptr num_fields = sizeof(*this) / sizeof(uptr);
+ for (uptr i = 0; i < num_fields; i++)
+ dst_ptr[i] += src_ptr[i];
+}
+
+static BlockingMutex print_lock(LINKER_INITIALIZED);
+
+static AsanStats unknown_thread_stats(LINKER_INITIALIZED);
+static AsanStats dead_threads_stats(LINKER_INITIALIZED);
+static BlockingMutex dead_threads_stats_lock(LINKER_INITIALIZED);
+// Required for malloc_zone_statistics() on OS X. This can't be stored in
+// per-thread AsanStats.
+static uptr max_malloced_memory;
+
+static void MergeThreadStats(ThreadContextBase *tctx_base, void *arg) {
+ AsanStats *accumulated_stats = reinterpret_cast<AsanStats*>(arg);
+ AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
+ if (AsanThread *t = tctx->thread)
+ accumulated_stats->MergeFrom(&t->stats());
+}
+
+static void GetAccumulatedStats(AsanStats *stats) {
+ stats->Clear();
+ {
+ ThreadRegistryLock l(&asanThreadRegistry());
+ asanThreadRegistry()
+ .RunCallbackForEachThreadLocked(MergeThreadStats, stats);
+ }
+ stats->MergeFrom(&unknown_thread_stats);
+ {
+ BlockingMutexLock lock(&dead_threads_stats_lock);
+ stats->MergeFrom(&dead_threads_stats);
+ }
+ // This is not very accurate: we may miss allocation peaks that happen
+ // between two updates of accumulated_stats_. For more accurate bookkeeping
+ // the maximum should be updated on every malloc(), which is unacceptable.
+ if (max_malloced_memory < stats->malloced) {
+ max_malloced_memory = stats->malloced;
+ }
+}
+
+void FlushToDeadThreadStats(AsanStats *stats) {
+ BlockingMutexLock lock(&dead_threads_stats_lock);
+ dead_threads_stats.MergeFrom(stats);
+ stats->Clear();
+}
+
+void FillMallocStatistics(AsanMallocStats *malloc_stats) {
+ AsanStats stats;
+ GetAccumulatedStats(&stats);
+ malloc_stats->blocks_in_use = stats.mallocs;
+ malloc_stats->size_in_use = stats.malloced;
+ malloc_stats->max_size_in_use = max_malloced_memory;
+ malloc_stats->size_allocated = stats.mmaped;
+}
+
+AsanStats &GetCurrentThreadStats() {
+ AsanThread *t = GetCurrentThread();
+ return (t) ? t->stats() : unknown_thread_stats;
+}
+
+static void PrintAccumulatedStats() {
+ AsanStats stats;
+ GetAccumulatedStats(&stats);
+ // Use lock to keep reports from mixing up.
+ BlockingMutexLock lock(&print_lock);
+ stats.Print();
+ StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ Printf("Stats: StackDepot: %zd ids; %zdM allocated\n",
+ stack_depot_stats->n_uniq_ids, stack_depot_stats->allocated >> 20);
+ PrintInternalAllocatorStats();
+}
+
+} // namespace __asan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+uptr __sanitizer_get_current_allocated_bytes() {
+ AsanStats stats;
+ GetAccumulatedStats(&stats);
+ uptr malloced = stats.malloced;
+ uptr freed = stats.freed;
+ // Return sane value if malloced < freed due to racy
+ // way we update accumulated stats.
+ return (malloced > freed) ? malloced - freed : 1;
+}
+
+uptr __sanitizer_get_heap_size() {
+ AsanStats stats;
+ GetAccumulatedStats(&stats);
+ return stats.mmaped - stats.munmaped;
+}
+
+uptr __sanitizer_get_free_bytes() {
+ AsanStats stats;
+ GetAccumulatedStats(&stats);
+ uptr total_free = stats.mmaped
+ - stats.munmaped
+ + stats.really_freed;
+ uptr total_used = stats.malloced
+ + stats.malloced_redzones;
+ // Return sane value if total_free < total_used due to racy
+ // way we update accumulated stats.
+ return (total_free > total_used) ? total_free - total_used : 1;
+}
+
+uptr __sanitizer_get_unmapped_bytes() {
+ return 0;
+}
+
+void __asan_print_accumulated_stats() {
+ PrintAccumulatedStats();
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stats.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stats.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_stats.h (revision 351984)
@@ -0,0 +1,71 @@
+//===-- asan_stats.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for statistics.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_STATS_H
+#define ASAN_STATS_H
+
+#include "asan_allocator.h"
+#include "asan_internal.h"
+
+namespace __asan {
+
+// AsanStats struct is NOT thread-safe.
+// Each AsanThread has its own AsanStats, which are sometimes flushed
+// to the accumulated AsanStats.
+struct AsanStats {
+ // AsanStats must be a struct consisting of uptr fields only.
+ // When merging two AsanStats structs, we treat them as arrays of uptr.
+ uptr mallocs;
+ uptr malloced;
+ uptr malloced_redzones;
+ uptr frees;
+ uptr freed;
+ uptr real_frees;
+ uptr really_freed;
+ uptr reallocs;
+ uptr realloced;
+ uptr mmaps;
+ uptr mmaped;
+ uptr munmaps;
+ uptr munmaped;
+ uptr malloc_large;
+ uptr malloced_by_size[kNumberOfSizeClasses];
+
+ // Ctor for global AsanStats (accumulated stats for dead threads).
+ explicit AsanStats(LinkerInitialized) { }
+ // Creates empty stats.
+ AsanStats();
+
+ void Print(); // Prints formatted stats to stderr.
+ void Clear();
+ void MergeFrom(const AsanStats *stats);
+};
+
+// Returns stats for GetCurrentThread(), or stats for fake "unknown thread"
+// if GetCurrentThread() returns 0.
+AsanStats &GetCurrentThreadStats();
+// Flushes a given stats into accumulated stats of dead threads.
+void FlushToDeadThreadStats(AsanStats *stats);
+
+// A cross-platform equivalent of malloc_statistics_t on Mac OS.
+struct AsanMallocStats {
+ uptr blocks_in_use;
+ uptr size_in_use;
+ uptr max_size_in_use;
+ uptr size_allocated;
+};
+
+void FillMallocStatistics(AsanMallocStats *malloc_stats);
+
+} // namespace __asan
+
+#endif // ASAN_STATS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_suppressions.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_suppressions.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_suppressions.cc (revision 351984)
@@ -0,0 +1,104 @@
+//===-- asan_suppressions.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Issue suppression and suppression-related functions.
+//===----------------------------------------------------------------------===//
+
+#include "asan_suppressions.h"
+
+#include "asan_stack.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+namespace __asan {
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kInterceptorName[] = "interceptor_name";
+static const char kInterceptorViaFunction[] = "interceptor_via_fun";
+static const char kInterceptorViaLibrary[] = "interceptor_via_lib";
+static const char kODRViolation[] = "odr_violation";
+static const char *kSuppressionTypes[] = {
+ kInterceptorName, kInterceptorViaFunction, kInterceptorViaLibrary,
+ kODRViolation};
+
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __asan_default_suppressions, void) {
+ return "";
+}
+
+void InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+ if (&__asan_default_suppressions)
+ suppression_ctx->Parse(__asan_default_suppressions());
+}
+
+bool IsInterceptorSuppressed(const char *interceptor_name) {
+ CHECK(suppression_ctx);
+ Suppression *s;
+ // Match "interceptor_name" suppressions.
+ return suppression_ctx->Match(interceptor_name, kInterceptorName, &s);
+}
+
+bool HaveStackTraceBasedSuppressions() {
+ CHECK(suppression_ctx);
+ return suppression_ctx->HasSuppressionType(kInterceptorViaFunction) ||
+ suppression_ctx->HasSuppressionType(kInterceptorViaLibrary);
+}
+
+bool IsODRViolationSuppressed(const char *global_var_name) {
+ CHECK(suppression_ctx);
+ Suppression *s;
+ // Match "odr_violation" suppressions.
+ return suppression_ctx->Match(global_var_name, kODRViolation, &s);
+}
+
+bool IsStackTraceSuppressed(const StackTrace *stack) {
+ if (!HaveStackTraceBasedSuppressions())
+ return false;
+
+ CHECK(suppression_ctx);
+ Symbolizer *symbolizer = Symbolizer::GetOrInit();
+ Suppression *s;
+ for (uptr i = 0; i < stack->size && stack->trace[i]; i++) {
+ uptr addr = stack->trace[i];
+
+ if (suppression_ctx->HasSuppressionType(kInterceptorViaLibrary)) {
+ // Match "interceptor_via_lib" suppressions.
+ if (const char *module_name = symbolizer->GetModuleNameForPc(addr))
+ if (suppression_ctx->Match(module_name, kInterceptorViaLibrary, &s))
+ return true;
+ }
+
+ if (suppression_ctx->HasSuppressionType(kInterceptorViaFunction)) {
+ SymbolizedStack *frames = symbolizer->SymbolizePC(addr);
+ CHECK(frames);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ const char *function_name = cur->info.function;
+ if (!function_name) {
+ continue;
+ }
+ // Match "interceptor_via_fun" suppressions.
+ if (suppression_ctx->Match(function_name, kInterceptorViaFunction,
+ &s)) {
+ frames->ClearAll();
+ return true;
+ }
+ }
+ frames->ClearAll();
+ }
+ }
+ return false;
+}
+
+} // namespace __asan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_suppressions.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_suppressions.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_suppressions.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_suppressions.h (revision 351984)
@@ -0,0 +1,29 @@
+//===-- asan_suppressions.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_suppressions.cc.
+//===----------------------------------------------------------------------===//
+#ifndef ASAN_SUPPRESSIONS_H
+#define ASAN_SUPPRESSIONS_H
+
+#include "asan_internal.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __asan {
+
+void InitializeSuppressions();
+bool IsInterceptorSuppressed(const char *interceptor_name);
+bool HaveStackTraceBasedSuppressions();
+bool IsStackTraceSuppressed(const StackTrace *stack);
+bool IsODRViolationSuppressed(const char *global_var_name);
+
+} // namespace __asan
+
+#endif // ASAN_SUPPRESSIONS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_suppressions.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_thread.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_thread.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_thread.cc (revision 351984)
@@ -0,0 +1,535 @@
+//===-- asan_thread.cc ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Thread-related code.
+//===----------------------------------------------------------------------===//
+#include "asan_allocator.h"
+#include "asan_interceptors.h"
+#include "asan_poisoning.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+#include "asan_mapping.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "lsan/lsan_common.h"
+
+namespace __asan {
+
+// AsanThreadContext implementation.
+
+void AsanThreadContext::OnCreated(void *arg) {
+ CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
+ if (args->stack)
+ stack_id = StackDepotPut(*args->stack);
+ thread = args->thread;
+ thread->set_context(this);
+}
+
+void AsanThreadContext::OnFinished() {
+ // Drop the link to the AsanThread object.
+ thread = nullptr;
+}
+
+// MIPS requires aligned address
+static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
+static ThreadRegistry *asan_thread_registry;
+
+static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
+static LowLevelAllocator allocator_for_thread_context;
+
+static ThreadContextBase *GetAsanThreadContext(u32 tid) {
+ BlockingMutexLock lock(&mu_for_thread_context);
+ return new(allocator_for_thread_context) AsanThreadContext(tid);
+}
+
+ThreadRegistry &asanThreadRegistry() {
+ static bool initialized;
+ // Don't worry about thread_safety - this should be called when there is
+ // a single thread.
+ if (!initialized) {
+ // Never reuse ASan threads: we store pointer to AsanThreadContext
+ // in TSD and can't reliably tell when no more TSD destructors will
+ // be called. It would be wrong to reuse AsanThreadContext for another
+ // thread before all TSD destructors will be called for it.
+ asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
+ GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
+ initialized = true;
+ }
+ return *asan_thread_registry;
+}
+
+AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
+ return static_cast<AsanThreadContext *>(
+ asanThreadRegistry().GetThreadLocked(tid));
+}
+
+// AsanThread implementation.
+
+AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
+ u32 parent_tid, StackTrace *stack,
+ bool detached) {
+ uptr PageSize = GetPageSizeCached();
+ uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
+ AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
+ thread->start_routine_ = start_routine;
+ thread->arg_ = arg;
+ AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
+ asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
+ parent_tid, &args);
+
+ return thread;
+}
+
+void AsanThread::TSDDtor(void *tsd) {
+ AsanThreadContext *context = (AsanThreadContext*)tsd;
+ VReport(1, "T%d TSDDtor\n", context->tid);
+ if (context->thread)
+ context->thread->Destroy();
+}
+
+void AsanThread::Destroy() {
+ int tid = this->tid();
+ VReport(1, "T%d exited\n", tid);
+
+ malloc_storage().CommitBack();
+ if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
+ asanThreadRegistry().FinishThread(tid);
+ FlushToDeadThreadStats(&stats_);
+ // We also clear the shadow on thread destruction because
+ // some code may still be executing in later TSD destructors
+ // and we don't want it to have any poisoned stack.
+ ClearShadowForThreadStackAndTLS();
+ DeleteFakeStack(tid);
+ uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
+ UnmapOrDie(this, size);
+ DTLS_Destroy();
+}
+
+void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
+ uptr size) {
+ if (atomic_load(&stack_switching_, memory_order_relaxed)) {
+ Report("ERROR: starting fiber switch while in fiber switch\n");
+ Die();
+ }
+
+ next_stack_bottom_ = bottom;
+ next_stack_top_ = bottom + size;
+ atomic_store(&stack_switching_, 1, memory_order_release);
+
+ FakeStack *current_fake_stack = fake_stack_;
+ if (fake_stack_save)
+ *fake_stack_save = fake_stack_;
+ fake_stack_ = nullptr;
+ SetTLSFakeStack(nullptr);
+ // if fake_stack_save is null, the fiber will die, delete the fakestack
+ if (!fake_stack_save && current_fake_stack)
+ current_fake_stack->Destroy(this->tid());
+}
+
+void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
+ uptr *bottom_old,
+ uptr *size_old) {
+ if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
+ Report("ERROR: finishing a fiber switch that has not started\n");
+ Die();
+ }
+
+ if (fake_stack_save) {
+ SetTLSFakeStack(fake_stack_save);
+ fake_stack_ = fake_stack_save;
+ }
+
+ if (bottom_old)
+ *bottom_old = stack_bottom_;
+ if (size_old)
+ *size_old = stack_top_ - stack_bottom_;
+ stack_bottom_ = next_stack_bottom_;
+ stack_top_ = next_stack_top_;
+ atomic_store(&stack_switching_, 0, memory_order_release);
+ next_stack_top_ = 0;
+ next_stack_bottom_ = 0;
+}
+
+inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
+ if (!atomic_load(&stack_switching_, memory_order_acquire)) {
+ // Make sure the stack bounds are fully initialized.
+ if (stack_bottom_ >= stack_top_) return {0, 0};
+ return {stack_bottom_, stack_top_};
+ }
+ char local;
+ const uptr cur_stack = (uptr)&local;
+ // Note: need to check next stack first, because FinishSwitchFiber
+ // may be in process of overwriting stack_top_/bottom_. But in such case
+ // we are already on the next stack.
+ if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
+ return {next_stack_bottom_, next_stack_top_};
+ return {stack_bottom_, stack_top_};
+}
+
+uptr AsanThread::stack_top() {
+ return GetStackBounds().top;
+}
+
+uptr AsanThread::stack_bottom() {
+ return GetStackBounds().bottom;
+}
+
+uptr AsanThread::stack_size() {
+ const auto bounds = GetStackBounds();
+ return bounds.top - bounds.bottom;
+}
+
+// We want to create the FakeStack lazyly on the first use, but not eralier
+// than the stack size is known and the procedure has to be async-signal safe.
+FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
+ uptr stack_size = this->stack_size();
+ if (stack_size == 0) // stack_size is not yet available, don't use FakeStack.
+ return nullptr;
+ uptr old_val = 0;
+ // fake_stack_ has 3 states:
+ // 0 -- not initialized
+ // 1 -- being initialized
+ // ptr -- initialized
+ // This CAS checks if the state was 0 and if so changes it to state 1,
+ // if that was successful, it initializes the pointer.
+ if (atomic_compare_exchange_strong(
+ reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
+ memory_order_relaxed)) {
+ uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
+ CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
+ stack_size_log =
+ Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
+ stack_size_log =
+ Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
+ fake_stack_ = FakeStack::Create(stack_size_log);
+ SetTLSFakeStack(fake_stack_);
+ return fake_stack_;
+ }
+ return nullptr;
+}
+
+void AsanThread::Init(const InitOptions *options) {
+ next_stack_top_ = next_stack_bottom_ = 0;
+ atomic_store(&stack_switching_, false, memory_order_release);
+ CHECK_EQ(this->stack_size(), 0U);
+ SetThreadStackAndTls(options);
+ if (stack_top_ != stack_bottom_) {
+ CHECK_GT(this->stack_size(), 0U);
+ CHECK(AddrIsInMem(stack_bottom_));
+ CHECK(AddrIsInMem(stack_top_ - 1));
+ }
+ ClearShadowForThreadStackAndTLS();
+ fake_stack_ = nullptr;
+ if (__asan_option_detect_stack_use_after_return)
+ AsyncSignalSafeLazyInitFakeStack();
+ int local = 0;
+ VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
+ (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
+ &local);
+}
+
+// Fuchsia and RTEMS don't use ThreadStart.
+// asan_fuchsia.c/asan_rtems.c define CreateMainThread and
+// SetThreadStackAndTls.
+#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+
+thread_return_t AsanThread::ThreadStart(
+ tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
+ Init();
+ asanThreadRegistry().StartThread(tid(), os_id, ThreadType::Regular, nullptr);
+ if (signal_thread_is_registered)
+ atomic_store(signal_thread_is_registered, 1, memory_order_release);
+
+ if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
+
+ if (!start_routine_) {
+ // start_routine_ == 0 if we're on the main thread or on one of the
+ // OS X libdispatch worker threads. But nobody is supposed to call
+ // ThreadStart() for the worker threads.
+ CHECK_EQ(tid(), 0);
+ return 0;
+ }
+
+ thread_return_t res = start_routine_(arg_);
+
+ // On POSIX systems we defer this to the TSD destructor. LSan will consider
+ // the thread's memory as non-live from the moment we call Destroy(), even
+ // though that memory might contain pointers to heap objects which will be
+ // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
+ // the TSD destructors have run might cause false positives in LSan.
+ if (!SANITIZER_POSIX)
+ this->Destroy();
+
+ return res;
+}
+
+AsanThread *CreateMainThread() {
+ AsanThread *main_thread = AsanThread::Create(
+ /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
+ /* stack */ nullptr, /* detached */ true);
+ SetCurrentThread(main_thread);
+ main_thread->ThreadStart(internal_getpid(),
+ /* signal_thread_is_registered */ nullptr);
+ return main_thread;
+}
+
+// This implementation doesn't use the argument, which is just passed down
+// from the caller of Init (which see, above). It's only there to support
+// OS-specific implementations that need more information passed through.
+void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
+ DCHECK_EQ(options, nullptr);
+ uptr tls_size = 0;
+ uptr stack_size = 0;
+ GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_,
+ &tls_size);
+ stack_top_ = stack_bottom_ + stack_size;
+ tls_end_ = tls_begin_ + tls_size;
+ dtls_ = DTLS_Get();
+
+ if (stack_top_ != stack_bottom_) {
+ int local;
+ CHECK(AddrIsInStack((uptr)&local));
+ }
+}
+
+#endif // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+
+void AsanThread::ClearShadowForThreadStackAndTLS() {
+ if (stack_top_ != stack_bottom_)
+ PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
+ if (tls_begin_ != tls_end_) {
+ uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
+ uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
+ FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
+ tls_end_ - tls_begin_aligned,
+ tls_end_aligned - tls_end_, 0);
+ }
+}
+
+bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
+ StackFrameAccess *access) {
+ if (stack_top_ == stack_bottom_)
+ return false;
+
+ uptr bottom = 0;
+ if (AddrIsInStack(addr)) {
+ bottom = stack_bottom();
+ } else if (has_fake_stack()) {
+ bottom = fake_stack()->AddrIsInFakeStack(addr);
+ CHECK(bottom);
+ access->offset = addr - bottom;
+ access->frame_pc = ((uptr*)bottom)[2];
+ access->frame_descr = (const char *)((uptr*)bottom)[1];
+ return true;
+ }
+ uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
+ uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
+ u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
+ u8 *shadow_bottom = (u8*)MemToShadow(bottom);
+
+ while (shadow_ptr >= shadow_bottom &&
+ *shadow_ptr != kAsanStackLeftRedzoneMagic) {
+ shadow_ptr--;
+ mem_ptr -= SHADOW_GRANULARITY;
+ }
+
+ while (shadow_ptr >= shadow_bottom &&
+ *shadow_ptr == kAsanStackLeftRedzoneMagic) {
+ shadow_ptr--;
+ mem_ptr -= SHADOW_GRANULARITY;
+ }
+
+ if (shadow_ptr < shadow_bottom) {
+ return false;
+ }
+
+ uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
+ CHECK(ptr[0] == kCurrentStackFrameMagic);
+ access->offset = addr - (uptr)ptr;
+ access->frame_pc = ptr[2];
+ access->frame_descr = (const char*)ptr[1];
+ return true;
+}
+
+uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
+ uptr bottom = 0;
+ if (AddrIsInStack(addr)) {
+ bottom = stack_bottom();
+ } else if (has_fake_stack()) {
+ bottom = fake_stack()->AddrIsInFakeStack(addr);
+ CHECK(bottom);
+ } else
+ return 0;
+
+ uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8); // align addr.
+ u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
+ u8 *shadow_bottom = (u8*)MemToShadow(bottom);
+
+ while (shadow_ptr >= shadow_bottom &&
+ (*shadow_ptr != kAsanStackLeftRedzoneMagic &&
+ *shadow_ptr != kAsanStackMidRedzoneMagic &&
+ *shadow_ptr != kAsanStackRightRedzoneMagic))
+ shadow_ptr--;
+
+ return (uptr)shadow_ptr + 1;
+}
+
+bool AsanThread::AddrIsInStack(uptr addr) {
+ const auto bounds = GetStackBounds();
+ return addr >= bounds.bottom && addr < bounds.top;
+}
+
+static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
+ void *addr) {
+ AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
+ AsanThread *t = tctx->thread;
+ if (!t) return false;
+ if (t->AddrIsInStack((uptr)addr)) return true;
+ if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
+ return true;
+ return false;
+}
+
+AsanThread *GetCurrentThread() {
+ if (SANITIZER_RTEMS && !asan_inited)
+ return nullptr;
+
+ AsanThreadContext *context =
+ reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
+ if (!context) {
+ if (SANITIZER_ANDROID) {
+ // On Android, libc constructor is called _after_ asan_init, and cleans up
+ // TSD. Try to figure out if this is still the main thread by the stack
+ // address. We are not entirely sure that we have correct main thread
+ // limits, so only do this magic on Android, and only if the found thread
+ // is the main thread.
+ AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
+ if (tctx && ThreadStackContainsAddress(tctx, &context)) {
+ SetCurrentThread(tctx->thread);
+ return tctx->thread;
+ }
+ }
+ return nullptr;
+ }
+ return context->thread;
+}
+
+void SetCurrentThread(AsanThread *t) {
+ CHECK(t->context());
+ VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
+ (void *)GetThreadSelf());
+ // Make sure we do not reset the current AsanThread.
+ CHECK_EQ(0, AsanTSDGet());
+ AsanTSDSet(t->context());
+ CHECK_EQ(t->context(), AsanTSDGet());
+}
+
+u32 GetCurrentTidOrInvalid() {
+ AsanThread *t = GetCurrentThread();
+ return t ? t->tid() : kInvalidTid;
+}
+
+AsanThread *FindThreadByStackAddress(uptr addr) {
+ asanThreadRegistry().CheckLocked();
+ AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
+ asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
+ (void *)addr));
+ return tctx ? tctx->thread : nullptr;
+}
+
+void EnsureMainThreadIDIsCorrect() {
+ AsanThreadContext *context =
+ reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
+ if (context && (context->tid == 0))
+ context->os_id = GetTid();
+}
+
+__asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
+ __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
+ __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
+ if (!context) return nullptr;
+ return context->thread;
+}
+} // namespace __asan
+
+// --- Implementation of LSan-specific functions --- {{{1
+namespace __lsan {
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
+ __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
+ if (!t) return false;
+ *stack_begin = t->stack_bottom();
+ *stack_end = t->stack_top();
+ *tls_begin = t->tls_begin();
+ *tls_end = t->tls_end();
+ // ASan doesn't keep allocator caches in TLS, so these are unused.
+ *cache_begin = 0;
+ *cache_end = 0;
+ *dtls = t->dtls();
+ return true;
+}
+
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
+ void *arg) {
+ __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
+ if (t && t->has_fake_stack())
+ t->fake_stack()->ForEachFakeFrame(callback, arg);
+}
+
+void LockThreadRegistry() {
+ __asan::asanThreadRegistry().Lock();
+}
+
+void UnlockThreadRegistry() {
+ __asan::asanThreadRegistry().Unlock();
+}
+
+ThreadRegistry *GetThreadRegistryLocked() {
+ __asan::asanThreadRegistry().CheckLocked();
+ return &__asan::asanThreadRegistry();
+}
+
+void EnsureMainThreadIDIsCorrect() {
+ __asan::EnsureMainThreadIDIsCorrect();
+}
+} // namespace __lsan
+
+// ---------------------- Interface ---------------- {{{1
+using namespace __asan; // NOLINT
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
+ uptr size) {
+ AsanThread *t = GetCurrentThread();
+ if (!t) {
+ VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
+ return;
+ }
+ t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_finish_switch_fiber(void* fakestack,
+ const void **bottom_old,
+ uptr *size_old) {
+ AsanThread *t = GetCurrentThread();
+ if (!t) {
+ VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
+ return;
+ }
+ t->FinishSwitchFiber((FakeStack*)fakestack,
+ (uptr*)bottom_old,
+ (uptr*)size_old);
+}
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_thread.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_thread.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_thread.h (revision 351984)
@@ -0,0 +1,189 @@
+//===-- asan_thread.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// ASan-private header for asan_thread.cc.
+//===----------------------------------------------------------------------===//
+
+#ifndef ASAN_THREAD_H
+#define ASAN_THREAD_H
+
+#include "asan_allocator.h"
+#include "asan_internal.h"
+#include "asan_fake_stack.h"
+#include "asan_stats.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+namespace __sanitizer {
+struct DTLS;
+} // namespace __sanitizer
+
+namespace __asan {
+
+const u32 kInvalidTid = 0xffffff; // Must fit into 24 bits.
+const u32 kMaxNumberOfThreads = (1 << 22); // 4M
+
+class AsanThread;
+
+// These objects are created for every thread and are never deleted,
+// so we can find them by tid even if the thread is long dead.
+class AsanThreadContext : public ThreadContextBase {
+ public:
+ explicit AsanThreadContext(int tid)
+ : ThreadContextBase(tid), announced(false),
+ destructor_iterations(GetPthreadDestructorIterations()), stack_id(0),
+ thread(nullptr) {}
+ bool announced;
+ u8 destructor_iterations;
+ u32 stack_id;
+ AsanThread *thread;
+
+ void OnCreated(void *arg) override;
+ void OnFinished() override;
+
+ struct CreateThreadContextArgs {
+ AsanThread *thread;
+ StackTrace *stack;
+ };
+};
+
+// AsanThreadContext objects are never freed, so we need many of them.
+COMPILER_CHECK(sizeof(AsanThreadContext) <= 256);
+
+// AsanThread are stored in TSD and destroyed when the thread dies.
+class AsanThread {
+ public:
+ static AsanThread *Create(thread_callback_t start_routine, void *arg,
+ u32 parent_tid, StackTrace *stack, bool detached);
+ static void TSDDtor(void *tsd);
+ void Destroy();
+
+ struct InitOptions;
+ void Init(const InitOptions *options = nullptr);
+
+ thread_return_t ThreadStart(tid_t os_id,
+ atomic_uintptr_t *signal_thread_is_registered);
+
+ uptr stack_top();
+ uptr stack_bottom();
+ uptr stack_size();
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
+ DTLS *dtls() { return dtls_; }
+ u32 tid() { return context_->tid; }
+ AsanThreadContext *context() { return context_; }
+ void set_context(AsanThreadContext *context) { context_ = context; }
+
+ struct StackFrameAccess {
+ uptr offset;
+ uptr frame_pc;
+ const char *frame_descr;
+ };
+ bool GetStackFrameAccessByAddr(uptr addr, StackFrameAccess *access);
+
+ // Returns a pointer to the start of the stack variable's shadow memory.
+ uptr GetStackVariableShadowStart(uptr addr);
+
+ bool AddrIsInStack(uptr addr);
+
+ void DeleteFakeStack(int tid) {
+ if (!fake_stack_) return;
+ FakeStack *t = fake_stack_;
+ fake_stack_ = nullptr;
+ SetTLSFakeStack(nullptr);
+ t->Destroy(tid);
+ }
+
+ void StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom, uptr size);
+ void FinishSwitchFiber(FakeStack *fake_stack_save, uptr *bottom_old,
+ uptr *size_old);
+
+ bool has_fake_stack() {
+ return !atomic_load(&stack_switching_, memory_order_relaxed) &&
+ (reinterpret_cast<uptr>(fake_stack_) > 1);
+ }
+
+ FakeStack *fake_stack() {
+ if (!__asan_option_detect_stack_use_after_return)
+ return nullptr;
+ if (atomic_load(&stack_switching_, memory_order_relaxed))
+ return nullptr;
+ if (!has_fake_stack())
+ return AsyncSignalSafeLazyInitFakeStack();
+ return fake_stack_;
+ }
+
+ // True is this thread is currently unwinding stack (i.e. collecting a stack
+ // trace). Used to prevent deadlocks on platforms where libc unwinder calls
+ // malloc internally. See PR17116 for more details.
+ bool isUnwinding() const { return unwinding_; }
+ void setUnwinding(bool b) { unwinding_ = b; }
+
+ AsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
+ AsanStats &stats() { return stats_; }
+
+ void *extra_spill_area() { return &extra_spill_area_; }
+
+ private:
+ // NOTE: There is no AsanThread constructor. It is allocated
+ // via mmap() and *must* be valid in zero-initialized state.
+
+ void SetThreadStackAndTls(const InitOptions *options);
+
+ void ClearShadowForThreadStackAndTLS();
+ FakeStack *AsyncSignalSafeLazyInitFakeStack();
+
+ struct StackBounds {
+ uptr bottom;
+ uptr top;
+ };
+ StackBounds GetStackBounds() const;
+
+ AsanThreadContext *context_;
+ thread_callback_t start_routine_;
+ void *arg_;
+
+ uptr stack_top_;
+ uptr stack_bottom_;
+ // these variables are used when the thread is about to switch stack
+ uptr next_stack_top_;
+ uptr next_stack_bottom_;
+ // true if switching is in progress
+ atomic_uint8_t stack_switching_;
+
+ uptr tls_begin_;
+ uptr tls_end_;
+ DTLS *dtls_;
+
+ FakeStack *fake_stack_;
+ AsanThreadLocalMallocStorage malloc_storage_;
+ AsanStats stats_;
+ bool unwinding_;
+ uptr extra_spill_area_;
+};
+
+// Returns a single instance of registry.
+ThreadRegistry &asanThreadRegistry();
+
+// Must be called under ThreadRegistryLock.
+AsanThreadContext *GetThreadContextByTidLocked(u32 tid);
+
+// Get the current thread. May return 0.
+AsanThread *GetCurrentThread();
+void SetCurrentThread(AsanThread *t);
+u32 GetCurrentTidOrInvalid();
+AsanThread *FindThreadByStackAddress(uptr addr);
+
+// Used to handle fork().
+void EnsureMainThreadIDIsCorrect();
+} // namespace __asan
+
+#endif // ASAN_THREAD_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win.cc (revision 351984)
@@ -0,0 +1,401 @@
+//===-- asan_win.cc -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Windows-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#include <stdlib.h>
+
+#include "asan_interceptors.h"
+#include "asan_internal.h"
+#include "asan_mapping.h"
+#include "asan_report.h"
+#include "asan_stack.h"
+#include "asan_thread.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_win.h"
+#include "sanitizer_common/sanitizer_win_defs.h"
+
+using namespace __asan; // NOLINT
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+int __asan_should_detect_stack_use_after_return() {
+ __asan_init();
+ return __asan_option_detect_stack_use_after_return;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __asan_get_shadow_memory_dynamic_address() {
+ __asan_init();
+ return __asan_shadow_memory_dynamic_address;
+}
+} // extern "C"
+
+// ---------------------- Windows-specific interceptors ---------------- {{{
+static LPTOP_LEVEL_EXCEPTION_FILTER default_seh_handler;
+static LPTOP_LEVEL_EXCEPTION_FILTER user_seh_handler;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+long __asan_unhandled_exception_filter(EXCEPTION_POINTERS *info) {
+ EXCEPTION_RECORD *exception_record = info->ExceptionRecord;
+ CONTEXT *context = info->ContextRecord;
+
+ // FIXME: Handle EXCEPTION_STACK_OVERFLOW here.
+
+ SignalContext sig(exception_record, context);
+ ReportDeadlySignal(sig);
+ UNREACHABLE("returned from reporting deadly signal");
+}
+
+// Wrapper SEH Handler. If the exception should be handled by asan, we call
+// __asan_unhandled_exception_filter, otherwise, we execute the user provided
+// exception handler or the default.
+static long WINAPI SEHHandler(EXCEPTION_POINTERS *info) {
+ DWORD exception_code = info->ExceptionRecord->ExceptionCode;
+ if (__sanitizer::IsHandledDeadlyException(exception_code))
+ return __asan_unhandled_exception_filter(info);
+ if (user_seh_handler)
+ return user_seh_handler(info);
+ // Bubble out to the default exception filter.
+ if (default_seh_handler)
+ return default_seh_handler(info);
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+INTERCEPTOR_WINAPI(LPTOP_LEVEL_EXCEPTION_FILTER, SetUnhandledExceptionFilter,
+ LPTOP_LEVEL_EXCEPTION_FILTER ExceptionFilter) {
+ CHECK(REAL(SetUnhandledExceptionFilter));
+ if (ExceptionFilter == &SEHHandler)
+ return REAL(SetUnhandledExceptionFilter)(ExceptionFilter);
+ // We record the user provided exception handler to be called for all the
+ // exceptions unhandled by asan.
+ Swap(ExceptionFilter, user_seh_handler);
+ return ExceptionFilter;
+}
+
+INTERCEPTOR_WINAPI(void, RtlRaiseException, EXCEPTION_RECORD *ExceptionRecord) {
+ CHECK(REAL(RtlRaiseException));
+ // This is a noreturn function, unless it's one of the exceptions raised to
+ // communicate with the debugger, such as the one from OutputDebugString.
+ if (ExceptionRecord->ExceptionCode != DBG_PRINTEXCEPTION_C)
+ __asan_handle_no_return();
+ REAL(RtlRaiseException)(ExceptionRecord);
+}
+
+INTERCEPTOR_WINAPI(void, RaiseException, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(RaiseException));
+ __asan_handle_no_return();
+ REAL(RaiseException)(a, b, c, d);
+}
+
+#ifdef _WIN64
+
+INTERCEPTOR_WINAPI(EXCEPTION_DISPOSITION, __C_specific_handler,
+ _EXCEPTION_RECORD *a, void *b, _CONTEXT *c,
+ _DISPATCHER_CONTEXT *d) { // NOLINT
+ CHECK(REAL(__C_specific_handler));
+ __asan_handle_no_return();
+ return REAL(__C_specific_handler)(a, b, c, d);
+}
+
+#else
+
+INTERCEPTOR(int, _except_handler3, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(_except_handler3));
+ __asan_handle_no_return();
+ return REAL(_except_handler3)(a, b, c, d);
+}
+
+#if ASAN_DYNAMIC
+// This handler is named differently in -MT and -MD CRTs.
+#define _except_handler4 _except_handler4_common
+#endif
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ CHECK(REAL(_except_handler4));
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
+}
+#endif
+
+static thread_return_t THREAD_CALLING_CONV asan_thread_start(void *arg) {
+ AsanThread *t = (AsanThread *)arg;
+ SetCurrentThread(t);
+ return t->ThreadStart(GetTid(), /* signal_thread_is_registered */ nullptr);
+}
+
+INTERCEPTOR_WINAPI(HANDLE, CreateThread, LPSECURITY_ATTRIBUTES security,
+ SIZE_T stack_size, LPTHREAD_START_ROUTINE start_routine,
+ void *arg, DWORD thr_flags, DWORD *tid) {
+ // Strict init-order checking is thread-hostile.
+ if (flags()->strict_init_order)
+ StopInitOrderChecking();
+ GET_STACK_TRACE_THREAD;
+ // FIXME: The CreateThread interceptor is not the same as a pthread_create
+ // one. This is a bandaid fix for PR22025.
+ bool detached = false; // FIXME: how can we determine it on Windows?
+ u32 current_tid = GetCurrentTidOrInvalid();
+ AsanThread *t =
+ AsanThread::Create(start_routine, arg, current_tid, &stack, detached);
+ return REAL(CreateThread)(security, stack_size, asan_thread_start, t,
+ thr_flags, tid);
+}
+
+// }}}
+
+namespace __asan {
+
+void InitializePlatformInterceptors() {
+ // The interceptors were not designed to be removable, so we have to keep this
+ // module alive for the life of the process.
+ HMODULE pinned;
+ CHECK(GetModuleHandleExW(
+ GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS | GET_MODULE_HANDLE_EX_FLAG_PIN,
+ (LPCWSTR)&InitializePlatformInterceptors, &pinned));
+
+ ASAN_INTERCEPT_FUNC(CreateThread);
+ ASAN_INTERCEPT_FUNC(SetUnhandledExceptionFilter);
+
+#ifdef _WIN64
+ ASAN_INTERCEPT_FUNC(__C_specific_handler);
+#else
+ ASAN_INTERCEPT_FUNC(_except_handler3);
+ ASAN_INTERCEPT_FUNC(_except_handler4);
+#endif
+
+ // Try to intercept kernel32!RaiseException, and if that fails, intercept
+ // ntdll!RtlRaiseException instead.
+ if (!::__interception::OverrideFunction("RaiseException",
+ (uptr)WRAP(RaiseException),
+ (uptr *)&REAL(RaiseException))) {
+ CHECK(::__interception::OverrideFunction("RtlRaiseException",
+ (uptr)WRAP(RtlRaiseException),
+ (uptr *)&REAL(RtlRaiseException)));
+ }
+}
+
+void AsanApplyToGlobals(globals_op_fptr op, const void *needle) {
+ UNIMPLEMENTED();
+}
+
+// ---------------------- TSD ---------------- {{{
+static bool tsd_key_inited = false;
+
+static __declspec(thread) void *fake_tsd = 0;
+
+// https://docs.microsoft.com/en-us/windows/desktop/api/winternl/ns-winternl-_teb
+// "[This structure may be altered in future versions of Windows. Applications
+// should use the alternate functions listed in this topic.]"
+typedef struct _TEB {
+ PVOID Reserved1[12];
+ // PVOID ThreadLocalStoragePointer; is here, at the last field in Reserved1.
+ PVOID ProcessEnvironmentBlock;
+ PVOID Reserved2[399];
+ BYTE Reserved3[1952];
+ PVOID TlsSlots[64];
+ BYTE Reserved4[8];
+ PVOID Reserved5[26];
+ PVOID ReservedForOle;
+ PVOID Reserved6[4];
+ PVOID TlsExpansionSlots;
+} TEB, *PTEB;
+
+constexpr size_t TEB_RESERVED_FIELDS_THREAD_LOCAL_STORAGE_OFFSET = 11;
+BOOL IsTlsInitialized() {
+ PTEB teb = (PTEB)NtCurrentTeb();
+ return teb->Reserved1[TEB_RESERVED_FIELDS_THREAD_LOCAL_STORAGE_OFFSET] !=
+ nullptr;
+}
+
+void AsanTSDInit(void (*destructor)(void *tsd)) {
+ // FIXME: we're ignoring the destructor for now.
+ tsd_key_inited = true;
+}
+
+void *AsanTSDGet() {
+ CHECK(tsd_key_inited);
+ return IsTlsInitialized() ? fake_tsd : nullptr;
+}
+
+void AsanTSDSet(void *tsd) {
+ CHECK(tsd_key_inited);
+ fake_tsd = tsd;
+}
+
+void PlatformTSDDtor(void *tsd) { AsanThread::TSDDtor(tsd); }
+// }}}
+
+// ---------------------- Various stuff ---------------- {{{
+void *AsanDoesNotSupportStaticLinkage() {
+#if defined(_DEBUG)
+#error Please build the runtime with a non-debug CRT: /MD or /MT
+#endif
+ return 0;
+}
+
+uptr FindDynamicShadowStart() {
+ uptr granularity = GetMmapGranularity();
+ uptr alignment = 8 * granularity;
+ uptr left_padding = granularity;
+ uptr space_size = kHighShadowEnd + left_padding;
+ uptr shadow_start = FindAvailableMemoryRange(space_size, alignment,
+ granularity, nullptr, nullptr);
+ CHECK_NE((uptr)0, shadow_start);
+ CHECK(IsAligned(shadow_start, alignment));
+ return shadow_start;
+}
+
+void AsanCheckDynamicRTPrereqs() {}
+
+void AsanCheckIncompatibleRT() {}
+
+void ReadContextStack(void *context, uptr *stack, uptr *ssize) {
+ UNIMPLEMENTED();
+}
+
+void AsanOnDeadlySignal(int, void *siginfo, void *context) { UNIMPLEMENTED(); }
+
+#if SANITIZER_WINDOWS64
+// Exception handler for dealing with shadow memory.
+static LONG CALLBACK
+ShadowExceptionHandler(PEXCEPTION_POINTERS exception_pointers) {
+ uptr page_size = GetPageSizeCached();
+ // Only handle access violations.
+ if (exception_pointers->ExceptionRecord->ExceptionCode !=
+ EXCEPTION_ACCESS_VIOLATION ||
+ exception_pointers->ExceptionRecord->NumberParameters < 2) {
+ __asan_handle_no_return();
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // Only handle access violations that land within the shadow memory.
+ uptr addr =
+ (uptr)(exception_pointers->ExceptionRecord->ExceptionInformation[1]);
+
+ // Check valid shadow range.
+ if (!AddrIsInShadow(addr)) {
+ __asan_handle_no_return();
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+
+ // This is an access violation while trying to read from the shadow. Commit
+ // the relevant page and let execution continue.
+
+ // Determine the address of the page that is being accessed.
+ uptr page = RoundDownTo(addr, page_size);
+
+ // Commit the page.
+ uptr result =
+ (uptr)::VirtualAlloc((LPVOID)page, page_size, MEM_COMMIT, PAGE_READWRITE);
+ if (result != page)
+ return EXCEPTION_CONTINUE_SEARCH;
+
+ // The page mapping succeeded, so continue execution as usual.
+ return EXCEPTION_CONTINUE_EXECUTION;
+}
+
+#endif
+
+void InitializePlatformExceptionHandlers() {
+#if SANITIZER_WINDOWS64
+ // On Win64, we map memory on demand with access violation handler.
+ // Install our exception handler.
+ CHECK(AddVectoredExceptionHandler(TRUE, &ShadowExceptionHandler));
+#endif
+}
+
+bool IsSystemHeapAddress(uptr addr) {
+ return ::HeapValidate(GetProcessHeap(), 0, (void *)addr) != FALSE;
+}
+
+// We want to install our own exception handler (EH) to print helpful reports
+// on access violations and whatnot. Unfortunately, the CRT initializers assume
+// they are run before any user code and drop any previously-installed EHs on
+// the floor, so we can't install our handler inside __asan_init.
+// (See crt0dat.c in the CRT sources for the details)
+//
+// Things get even more complicated with the dynamic runtime, as it finishes its
+// initialization before the .exe module CRT begins to initialize.
+//
+// For the static runtime (-MT), it's enough to put a callback to
+// __asan_set_seh_filter in the last section for C initializers.
+//
+// For the dynamic runtime (-MD), we want link the same
+// asan_dynamic_runtime_thunk.lib to all the modules, thus __asan_set_seh_filter
+// will be called for each instrumented module. This ensures that at least one
+// __asan_set_seh_filter call happens after the .exe module CRT is initialized.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE int __asan_set_seh_filter() {
+ // We should only store the previous handler if it's not our own handler in
+ // order to avoid loops in the EH chain.
+ auto prev_seh_handler = SetUnhandledExceptionFilter(SEHHandler);
+ if (prev_seh_handler != &SEHHandler)
+ default_seh_handler = prev_seh_handler;
+ return 0;
+}
+
+bool HandleDlopenInit() {
+ // Not supported on this platform.
+ static_assert(!SANITIZER_SUPPORTS_INIT_FOR_DLOPEN,
+ "Expected SANITIZER_SUPPORTS_INIT_FOR_DLOPEN to be false");
+ return false;
+}
+
+#if !ASAN_DYNAMIC
+// The CRT runs initializers in this order:
+// - C initializers, from XIA to XIZ
+// - C++ initializers, from XCA to XCZ
+// Prior to 2015, the CRT set the unhandled exception filter at priority XIY,
+// near the end of C initialization. Starting in 2015, it was moved to the
+// beginning of C++ initialization. We set our priority to XCAB to run
+// immediately after the CRT runs. This way, our exception filter is called
+// first and we can delegate to their filter if appropriate.
+#pragma section(".CRT$XCAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XCAB")) int (*__intercept_seh)() =
+ __asan_set_seh_filter;
+
+// Piggyback on the TLS initialization callback directory to initialize asan as
+// early as possible. Initializers in .CRT$XL* are called directly by ntdll,
+// which run before the CRT. Users also add code to .CRT$XLC, so it's important
+// to run our initializers first.
+static void NTAPI asan_thread_init(void *module, DWORD reason, void *reserved) {
+ if (reason == DLL_PROCESS_ATTACH)
+ __asan_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void(NTAPI *__asan_tls_init)(
+ void *, unsigned long, void *) = asan_thread_init;
+#endif
+
+static void NTAPI asan_thread_exit(void *module, DWORD reason, void *reserved) {
+ if (reason == DLL_THREAD_DETACH) {
+ // Unpoison the thread's stack because the memory may be re-used.
+ NT_TIB *tib = (NT_TIB *)NtCurrentTeb();
+ uptr stackSize = (uptr)tib->StackBase - (uptr)tib->StackLimit;
+ __asan_unpoison_memory_region(tib->StackLimit, stackSize);
+ }
+}
+
+#pragma section(".CRT$XLY", long, read) // NOLINT
+__declspec(allocate(".CRT$XLY")) void(NTAPI *__asan_tls_exit)(
+ void *, unsigned long, void *) = asan_thread_exit;
+
+WIN_FORCE_LINK(__asan_dso_reg_hook)
+
+// }}}
+} // namespace __asan
+
+#endif // SANITIZER_WINDOWS
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_dll_thunk.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_dll_thunk.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_dll_thunk.cc (revision 351984)
@@ -0,0 +1,152 @@
+//===-- asan_win_dll_thunk.cc ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have ASan instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+
+#ifdef SANITIZER_DLL_THUNK
+#include "asan_init_version.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_win_defs.h"
+#include "sanitizer_common/sanitizer_win_dll_thunk.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+// ASan own interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "asan_interface.inc"
+
+// Memory allocation functions.
+INTERCEPT_WRAP_V_W(free)
+INTERCEPT_WRAP_V_W(_free_base)
+INTERCEPT_WRAP_V_WW(_free_dbg)
+
+INTERCEPT_WRAP_W_W(malloc)
+INTERCEPT_WRAP_W_W(_malloc_base)
+INTERCEPT_WRAP_W_WWWW(_malloc_dbg)
+
+INTERCEPT_WRAP_W_WW(calloc)
+INTERCEPT_WRAP_W_WW(_calloc_base)
+INTERCEPT_WRAP_W_WWWWW(_calloc_dbg)
+INTERCEPT_WRAP_W_WWW(_calloc_impl)
+
+INTERCEPT_WRAP_W_WW(realloc)
+INTERCEPT_WRAP_W_WW(_realloc_base)
+INTERCEPT_WRAP_W_WWW(_realloc_dbg)
+INTERCEPT_WRAP_W_WWW(_recalloc)
+INTERCEPT_WRAP_W_WWW(_recalloc_base)
+
+INTERCEPT_WRAP_W_W(_msize)
+INTERCEPT_WRAP_W_W(_msize_base)
+INTERCEPT_WRAP_W_W(_expand)
+INTERCEPT_WRAP_W_W(_expand_dbg)
+
+// TODO(timurrrr): Might want to add support for _aligned_* allocation
+// functions to detect a bit more bugs. Those functions seem to wrap malloc().
+
+// TODO(timurrrr): Do we need to add _Crt* stuff here? (see asan_malloc_win.cc).
+
+INTERCEPT_LIBRARY_FUNCTION(atoi);
+INTERCEPT_LIBRARY_FUNCTION(atol);
+INTERCEPT_LIBRARY_FUNCTION(frexp);
+INTERCEPT_LIBRARY_FUNCTION(longjmp);
+#if SANITIZER_INTERCEPT_MEMCHR
+INTERCEPT_LIBRARY_FUNCTION(memchr);
+#endif
+INTERCEPT_LIBRARY_FUNCTION(memcmp);
+INTERCEPT_LIBRARY_FUNCTION(memcpy);
+INTERCEPT_LIBRARY_FUNCTION(memmove);
+INTERCEPT_LIBRARY_FUNCTION(memset);
+INTERCEPT_LIBRARY_FUNCTION(strcat); // NOLINT
+INTERCEPT_LIBRARY_FUNCTION(strchr);
+INTERCEPT_LIBRARY_FUNCTION(strcmp);
+INTERCEPT_LIBRARY_FUNCTION(strcpy); // NOLINT
+INTERCEPT_LIBRARY_FUNCTION(strcspn);
+INTERCEPT_LIBRARY_FUNCTION(strdup);
+INTERCEPT_LIBRARY_FUNCTION(strlen);
+INTERCEPT_LIBRARY_FUNCTION(strncat);
+INTERCEPT_LIBRARY_FUNCTION(strncmp);
+INTERCEPT_LIBRARY_FUNCTION(strncpy);
+INTERCEPT_LIBRARY_FUNCTION(strnlen);
+INTERCEPT_LIBRARY_FUNCTION(strpbrk);
+INTERCEPT_LIBRARY_FUNCTION(strrchr);
+INTERCEPT_LIBRARY_FUNCTION(strspn);
+INTERCEPT_LIBRARY_FUNCTION(strstr);
+INTERCEPT_LIBRARY_FUNCTION(strtok);
+INTERCEPT_LIBRARY_FUNCTION(strtol);
+INTERCEPT_LIBRARY_FUNCTION(wcslen);
+INTERCEPT_LIBRARY_FUNCTION(wcsnlen);
+
+#ifdef _WIN64
+INTERCEPT_LIBRARY_FUNCTION(__C_specific_handler);
+#else
+INTERCEPT_LIBRARY_FUNCTION(_except_handler3);
+// _except_handler4 checks -GS cookie which is different for each module, so we
+// can't use INTERCEPT_LIBRARY_FUNCTION(_except_handler4).
+INTERCEPTOR(int, _except_handler4, void *a, void *b, void *c, void *d) {
+ __asan_handle_no_return();
+ return REAL(_except_handler4)(a, b, c, d);
+}
+#endif
+
+// Windows specific functions not included in asan_interface.inc.
+INTERCEPT_WRAP_W_V(__asan_should_detect_stack_use_after_return)
+INTERCEPT_WRAP_W_V(__asan_get_shadow_memory_dynamic_address)
+INTERCEPT_WRAP_W_W(__asan_unhandled_exception_filter)
+
+using namespace __sanitizer;
+
+extern "C" {
+int __asan_option_detect_stack_use_after_return;
+uptr __asan_shadow_memory_dynamic_address;
+} // extern "C"
+
+static int asan_dll_thunk_init() {
+ typedef void (*fntype)();
+ static fntype fn = 0;
+ // asan_dll_thunk_init is expected to be called by only one thread.
+ if (fn) return 0;
+
+ // Ensure all interception was executed.
+ __dll_thunk_init();
+
+ fn = (fntype) dllThunkGetRealAddrOrDie("__asan_init");
+ fn();
+ __asan_option_detect_stack_use_after_return =
+ (__asan_should_detect_stack_use_after_return() != 0);
+ __asan_shadow_memory_dynamic_address =
+ (uptr)__asan_get_shadow_memory_dynamic_address();
+
+#ifndef _WIN64
+ INTERCEPT_FUNCTION(_except_handler4);
+#endif
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
+}
+
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__asan_preinit)() = asan_dll_thunk_init;
+
+static void WINAPI asan_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) asan_dll_thunk_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (WINAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
+
+WIN_FORCE_LINK(__asan_dso_reg_hook)
+
+#endif // SANITIZER_DLL_THUNK
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_dll_thunk.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_dynamic_runtime_thunk.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_dynamic_runtime_thunk.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_dynamic_runtime_thunk.cc (revision 351984)
@@ -0,0 +1,130 @@
+//===-- asan_win_dynamic_runtime_thunk.cc ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// This file defines things that need to be present in the application modules
+// to interact with the ASan DLL runtime correctly and can't be implemented
+// using the default "import library" generated when linking the DLL RTL.
+//
+// This includes:
+// - creating weak aliases to default implementation imported from asan dll.
+// - forwarding the detect_stack_use_after_return runtime option
+// - working around deficiencies of the MD runtime
+// - installing a custom SEH handler
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_common/sanitizer_win_defs.h"
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+// Define weak alias for all weak functions imported from asan dll.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "asan_interface.inc"
+
+// First, declare CRT sections we'll be using in this file
+#pragma section(".CRT$XIB", long, read) // NOLINT
+#pragma section(".CRT$XID", long, read) // NOLINT
+#pragma section(".CRT$XCAB", long, read) // NOLINT
+#pragma section(".CRT$XTW", long, read) // NOLINT
+#pragma section(".CRT$XTY", long, read) // NOLINT
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+
+////////////////////////////////////////////////////////////////////////////////
+// Define a copy of __asan_option_detect_stack_use_after_return that should be
+// used when linking an MD runtime with a set of object files on Windows.
+//
+// The ASan MD runtime dllexports '__asan_option_detect_stack_use_after_return',
+// so normally we would just dllimport it. Unfortunately, the dllimport
+// attribute adds __imp_ prefix to the symbol name of a variable.
+// Since in general we don't know if a given TU is going to be used
+// with a MT or MD runtime and we don't want to use ugly __imp_ names on Windows
+// just to work around this issue, let's clone the variable that is constant
+// after initialization anyways.
+extern "C" {
+__declspec(dllimport) int __asan_should_detect_stack_use_after_return();
+int __asan_option_detect_stack_use_after_return;
+
+__declspec(dllimport) void* __asan_get_shadow_memory_dynamic_address();
+void* __asan_shadow_memory_dynamic_address;
+}
+
+static int InitializeClonedVariables() {
+ __asan_option_detect_stack_use_after_return =
+ __asan_should_detect_stack_use_after_return();
+ __asan_shadow_memory_dynamic_address =
+ __asan_get_shadow_memory_dynamic_address();
+ return 0;
+}
+
+static void NTAPI asan_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == DLL_PROCESS_ATTACH) InitializeClonedVariables();
+}
+
+// Our cloned variables must be initialized before C/C++ constructors. If TLS
+// is used, our .CRT$XLAB initializer will run first. If not, our .CRT$XIB
+// initializer is needed as a backup.
+__declspec(allocate(".CRT$XIB")) int (*__asan_initialize_cloned_variables)() =
+ InitializeClonedVariables;
+__declspec(allocate(".CRT$XLAB")) void (NTAPI *__asan_tls_init)(void *,
+ unsigned long, void *) = asan_thread_init;
+
+////////////////////////////////////////////////////////////////////////////////
+// For some reason, the MD CRT doesn't call the C/C++ terminators during on DLL
+// unload or on exit. ASan relies on LLVM global_dtors to call
+// __asan_unregister_globals on these events, which unfortunately doesn't work
+// with the MD runtime, see PR22545 for the details.
+// To work around this, for each DLL we schedule a call to UnregisterGlobals
+// using atexit() that calls a small subset of C terminators
+// where LLVM global_dtors is placed. Fingers crossed, no other C terminators
+// are there.
+extern "C" int __cdecl atexit(void (__cdecl *f)(void));
+extern "C" void __cdecl _initterm(void *a, void *b);
+
+namespace {
+__declspec(allocate(".CRT$XTW")) void* before_global_dtors = 0;
+__declspec(allocate(".CRT$XTY")) void* after_global_dtors = 0;
+
+void UnregisterGlobals() {
+ _initterm(&before_global_dtors, &after_global_dtors);
+}
+
+int ScheduleUnregisterGlobals() {
+ return atexit(UnregisterGlobals);
+}
+} // namespace
+
+// We need to call 'atexit(UnregisterGlobals);' as early as possible, but after
+// atexit() is initialized (.CRT$XIC). As this is executed before C++
+// initializers (think ctors for globals), UnregisterGlobals gets executed after
+// dtors for C++ globals.
+__declspec(allocate(".CRT$XID"))
+int (*__asan_schedule_unregister_globals)() = ScheduleUnregisterGlobals;
+
+////////////////////////////////////////////////////////////////////////////////
+// ASan SEH handling.
+// We need to set the ASan-specific SEH handler at the end of CRT initialization
+// of each module (see also asan_win.cc).
+extern "C" {
+__declspec(dllimport) int __asan_set_seh_filter();
+static int SetSEHFilter() { return __asan_set_seh_filter(); }
+
+// Unfortunately, putting a pointer to __asan_set_seh_filter into
+// __asan_intercept_seh gets optimized out, so we have to use an extra function.
+__declspec(allocate(".CRT$XCAB")) int (*__asan_seh_interceptor)() =
+ SetSEHFilter;
+}
+
+WIN_FORCE_LINK(__asan_dso_reg_hook)
+
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_dynamic_runtime_thunk.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_weak_interception.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_weak_interception.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_weak_interception.cc (revision 351984)
@@ -0,0 +1,22 @@
+//===-- asan_win_weak_interception.cc -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Address Sanitizer when it is implemented as
+// a shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_common/sanitizer_win_weak_interception.h"
+#include "asan_interface_internal.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "asan_interface.inc"
+#endif // SANITIZER_DYNAMIC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_win_weak_interception.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/weak_symbols.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/weak_symbols.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/weak_symbols.txt (revision 351984)
@@ -0,0 +1,12 @@
+___asan_default_options
+___asan_default_suppressions
+___asan_on_error
+___asan_set_shadow_00
+___asan_set_shadow_f1
+___asan_set_shadow_f2
+___asan_set_shadow_f3
+___asan_set_shadow_f4
+___asan_set_shadow_f5
+___asan_set_shadow_f6
+___asan_set_shadow_f7
+___asan_set_shadow_f8
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/weak_symbols.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan.syms.extra
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan.syms.extra (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan.syms.extra (revision 351984)
@@ -0,0 +1,4 @@
+__asan_*
+__lsan_*
+__ubsan_*
+__sancov_*
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/README.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/README.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/README.txt (revision 351984)
@@ -0,0 +1,26 @@
+AddressSanitizer RT
+================================
+This directory contains sources of the AddressSanitizer (ASan) runtime library.
+
+Directory structure:
+README.txt : This file.
+Makefile.mk : File for make-based build.
+CMakeLists.txt : File for cmake-based build.
+asan_*.{cc,h} : Sources of the asan runtime library.
+scripts/* : Helper scripts.
+tests/* : ASan unit tests.
+
+Also ASan runtime needs the following libraries:
+lib/interception/ : Machinery used to intercept function calls.
+lib/sanitizer_common/ : Code shared between various sanitizers.
+
+ASan runtime currently also embeds part of LeakSanitizer runtime for
+leak detection (lib/lsan/lsan_common.{cc,h}).
+
+ASan runtime can only be built by CMake. You can run ASan tests
+from the root of your CMake build tree:
+
+make check-asan
+
+For more instructions see:
+https://github.com/google/sanitizers/wiki/AddressSanitizerHowToBuild
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_blacklist.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_blacklist.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_blacklist.txt (revision 351984)
@@ -0,0 +1,13 @@
+# Blacklist for AddressSanitizer. Turns off instrumentation of particular
+# functions or sources. Use with care. You may set location of blacklist
+# at compile-time using -fsanitize-blacklist=<path> flag.
+
+# Example usage:
+# fun:*bad_function_name*
+# src:file_with_tricky_code.cc
+# global:*global_with_bad_access_or_initialization*
+# global:*global_with_initialization_issues*=init
+# type:*Namespace::ClassName*=init
+
+# Stack buffer overflow in VC/INCLUDE/xlocnum, see http://goo.gl/L4qqUG
+fun:*_Find_elem@*@std*
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/asan/asan_lock.h
===================================================================
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan.cc (revision 351984)
@@ -0,0 +1,675 @@
+//===-- msan.cc -----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// MemorySanitizer runtime.
+//===----------------------------------------------------------------------===//
+
+#include "msan.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
+#include "msan_report.h"
+#include "msan_thread.h"
+#include "msan_poisoning.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "ubsan/ubsan_flags.h"
+#include "ubsan/ubsan_init.h"
+
+// ACHTUNG! No system header includes in this file.
+
+using namespace __sanitizer;
+
+// Globals.
+static THREADLOCAL int msan_expect_umr = 0;
+static THREADLOCAL int msan_expected_umr_found = 0;
+
+// Function argument shadow. Each argument starts at the next available 8-byte
+// aligned address.
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL u64 __msan_param_tls[kMsanParamTlsSize / sizeof(u64)];
+
+// Function argument origin. Each argument starts at the same offset as the
+// corresponding shadow in (__msan_param_tls). Slightly weird, but changing this
+// would break compatibility with older prebuilt binaries.
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL u32 __msan_param_origin_tls[kMsanParamTlsSize / sizeof(u32)];
+
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL u64 __msan_retval_tls[kMsanRetvalTlsSize / sizeof(u64)];
+
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL u32 __msan_retval_origin_tls;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+ALIGNED(16) THREADLOCAL u64 __msan_va_arg_tls[kMsanParamTlsSize / sizeof(u64)];
+
+SANITIZER_INTERFACE_ATTRIBUTE
+ALIGNED(16)
+THREADLOCAL u32 __msan_va_arg_origin_tls[kMsanParamTlsSize / sizeof(u32)];
+
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL u64 __msan_va_arg_overflow_size_tls;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL u32 __msan_origin_tls;
+
+static THREADLOCAL int is_in_symbolizer;
+
+extern "C" SANITIZER_WEAK_ATTRIBUTE const int __msan_track_origins;
+
+int __msan_get_track_origins() {
+ return &__msan_track_origins ? __msan_track_origins : 0;
+}
+
+extern "C" SANITIZER_WEAK_ATTRIBUTE const int __msan_keep_going;
+
+namespace __msan {
+
+void EnterSymbolizer() { ++is_in_symbolizer; }
+void ExitSymbolizer() { --is_in_symbolizer; }
+bool IsInSymbolizer() { return is_in_symbolizer; }
+
+static Flags msan_flags;
+
+Flags *flags() {
+ return &msan_flags;
+}
+
+int msan_inited = 0;
+bool msan_init_is_running;
+
+int msan_report_count = 0;
+
+// Array of stack origins.
+// FIXME: make it resizable.
+static const uptr kNumStackOriginDescrs = 1024 * 1024;
+static const char *StackOriginDescr[kNumStackOriginDescrs];
+static uptr StackOriginPC[kNumStackOriginDescrs];
+static atomic_uint32_t NumStackOriginDescrs;
+
+void Flags::SetDefaults() {
+#define MSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "msan_flags.inc"
+#undef MSAN_FLAG
+}
+
+// keep_going is an old name for halt_on_error,
+// and it has inverse meaning.
+class FlagHandlerKeepGoing : public FlagHandlerBase {
+ bool *halt_on_error_;
+
+ public:
+ explicit FlagHandlerKeepGoing(bool *halt_on_error)
+ : halt_on_error_(halt_on_error) {}
+ bool Parse(const char *value) final {
+ bool tmp;
+ FlagHandler<bool> h(&tmp);
+ if (!h.Parse(value)) return false;
+ *halt_on_error_ = !tmp;
+ return true;
+ }
+};
+
+static void RegisterMsanFlags(FlagParser *parser, Flags *f) {
+#define MSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "msan_flags.inc"
+#undef MSAN_FLAG
+
+ FlagHandlerKeepGoing *fh_keep_going = new (FlagParser::Alloc) // NOLINT
+ FlagHandlerKeepGoing(&f->halt_on_error);
+ parser->RegisterHandler("keep_going", fh_keep_going,
+ "deprecated, use halt_on_error");
+}
+
+static void InitializeFlags() {
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("MSAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = 20;
+ cf.handle_ioctl = true;
+ // FIXME: test and enable.
+ cf.check_printf = false;
+ cf.intercept_tls_get_addr = true;
+ cf.exitcode = 77;
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterMsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+#if MSAN_CONTAINS_UBSAN
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+#endif
+
+ // Override from user-specified string.
+ if (__msan_default_options)
+ parser.ParseString(__msan_default_options());
+#if MSAN_CONTAINS_UBSAN
+ const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ ubsan_parser.ParseString(ubsan_default_options);
+#endif
+
+ parser.ParseStringFromEnv("MSAN_OPTIONS");
+#if MSAN_CONTAINS_UBSAN
+ ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
+#endif
+
+ InitializeCommonFlags();
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ // Check if deprecated exit_code MSan flag is set.
+ if (f->exit_code != -1) {
+ if (Verbosity())
+ Printf("MSAN_OPTIONS=exit_code is deprecated! "
+ "Please use MSAN_OPTIONS=exitcode instead.\n");
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.exitcode = f->exit_code;
+ OverrideCommonFlags(cf);
+ }
+
+ // Check flag values:
+ if (f->origin_history_size < 0 ||
+ f->origin_history_size > Origin::kMaxDepth) {
+ Printf(
+ "Origin history size invalid: %d. Must be 0 (unlimited) or in [1, %d] "
+ "range.\n",
+ f->origin_history_size, Origin::kMaxDepth);
+ Die();
+ }
+ // Limiting to kStackDepotMaxUseCount / 2 to avoid overflow in
+ // StackDepotHandle::inc_use_count_unsafe.
+ if (f->origin_history_per_stack_limit < 0 ||
+ f->origin_history_per_stack_limit > kStackDepotMaxUseCount / 2) {
+ Printf(
+ "Origin per-stack limit invalid: %d. Must be 0 (unlimited) or in [1, "
+ "%d] range.\n",
+ f->origin_history_per_stack_limit, kStackDepotMaxUseCount / 2);
+ Die();
+ }
+ if (f->store_context_size < 1) f->store_context_size = 1;
+}
+
+void PrintWarning(uptr pc, uptr bp) {
+ PrintWarningWithOrigin(pc, bp, __msan_origin_tls);
+}
+
+void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin) {
+ if (msan_expect_umr) {
+ // Printf("Expected UMR\n");
+ __msan_origin_tls = origin;
+ msan_expected_umr_found = 1;
+ return;
+ }
+
+ ++msan_report_count;
+
+ GET_FATAL_STACK_TRACE_PC_BP(pc, bp);
+
+ u32 report_origin =
+ (__msan_get_track_origins() && Origin::isValidId(origin)) ? origin : 0;
+ ReportUMR(&stack, report_origin);
+
+ if (__msan_get_track_origins() && !Origin::isValidId(origin)) {
+ Printf(
+ " ORIGIN: invalid (%x). Might be a bug in MemorySanitizer origin "
+ "tracking.\n This could still be a bug in your code, too!\n",
+ origin);
+ }
+}
+
+void UnpoisonParam(uptr n) {
+ internal_memset(__msan_param_tls, 0, n * sizeof(*__msan_param_tls));
+}
+
+// Backup MSan runtime TLS state.
+// Implementation must be async-signal-safe.
+// Instances of this class may live on the signal handler stack, and data size
+// may be an issue.
+void ScopedThreadLocalStateBackup::Backup() {
+ va_arg_overflow_size_tls = __msan_va_arg_overflow_size_tls;
+}
+
+void ScopedThreadLocalStateBackup::Restore() {
+ // A lame implementation that only keeps essential state and resets the rest.
+ __msan_va_arg_overflow_size_tls = va_arg_overflow_size_tls;
+
+ internal_memset(__msan_param_tls, 0, sizeof(__msan_param_tls));
+ internal_memset(__msan_retval_tls, 0, sizeof(__msan_retval_tls));
+ internal_memset(__msan_va_arg_tls, 0, sizeof(__msan_va_arg_tls));
+ internal_memset(__msan_va_arg_origin_tls, 0,
+ sizeof(__msan_va_arg_origin_tls));
+
+ if (__msan_get_track_origins()) {
+ internal_memset(&__msan_retval_origin_tls, 0,
+ sizeof(__msan_retval_origin_tls));
+ internal_memset(__msan_param_origin_tls, 0,
+ sizeof(__msan_param_origin_tls));
+ }
+}
+
+void UnpoisonThreadLocalState() {
+}
+
+const char *GetStackOriginDescr(u32 id, uptr *pc) {
+ CHECK_LT(id, kNumStackOriginDescrs);
+ if (pc) *pc = StackOriginPC[id];
+ return StackOriginDescr[id];
+}
+
+u32 ChainOrigin(u32 id, StackTrace *stack) {
+ MsanThread *t = GetCurrentThread();
+ if (t && t->InSignalHandler())
+ return id;
+
+ Origin o = Origin::FromRawId(id);
+ stack->tag = StackTrace::TAG_UNKNOWN;
+ Origin chained = Origin::CreateChainedOrigin(o, stack);
+ return chained.raw_id();
+}
+
+} // namespace __msan
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ using namespace __msan;
+ MsanThread *t = GetCurrentThread();
+ if (!t || !StackTrace::WillUseFastUnwind(request_fast)) {
+ // Block reports from our interceptors during _Unwind_Backtrace.
+ SymbolizerScope sym_scope;
+ return Unwind(max_depth, pc, bp, context, 0, 0, false);
+ }
+ if (StackTrace::WillUseFastUnwind(request_fast))
+ Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(), true);
+ else
+ Unwind(max_depth, pc, 0, context, 0, 0, false);
+}
+
+// Interface.
+
+using namespace __msan;
+
+#define MSAN_MAYBE_WARNING(type, size) \
+ void __msan_maybe_warning_##size(type s, u32 o) { \
+ GET_CALLER_PC_BP_SP; \
+ (void) sp; \
+ if (UNLIKELY(s)) { \
+ PrintWarningWithOrigin(pc, bp, o); \
+ if (__msan::flags()->halt_on_error) { \
+ Printf("Exiting\n"); \
+ Die(); \
+ } \
+ } \
+ }
+
+MSAN_MAYBE_WARNING(u8, 1)
+MSAN_MAYBE_WARNING(u16, 2)
+MSAN_MAYBE_WARNING(u32, 4)
+MSAN_MAYBE_WARNING(u64, 8)
+
+#define MSAN_MAYBE_STORE_ORIGIN(type, size) \
+ void __msan_maybe_store_origin_##size(type s, void *p, u32 o) { \
+ if (UNLIKELY(s)) { \
+ if (__msan_get_track_origins() > 1) { \
+ GET_CALLER_PC_BP_SP; \
+ (void) sp; \
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp); \
+ o = ChainOrigin(o, &stack); \
+ } \
+ *(u32 *)MEM_TO_ORIGIN((uptr)p & ~3UL) = o; \
+ } \
+ }
+
+MSAN_MAYBE_STORE_ORIGIN(u8, 1)
+MSAN_MAYBE_STORE_ORIGIN(u16, 2)
+MSAN_MAYBE_STORE_ORIGIN(u32, 4)
+MSAN_MAYBE_STORE_ORIGIN(u64, 8)
+
+void __msan_warning() {
+ GET_CALLER_PC_BP_SP;
+ (void)sp;
+ PrintWarning(pc, bp);
+ if (__msan::flags()->halt_on_error) {
+ if (__msan::flags()->print_stats)
+ ReportStats();
+ Printf("Exiting\n");
+ Die();
+ }
+}
+
+void __msan_warning_noreturn() {
+ GET_CALLER_PC_BP_SP;
+ (void)sp;
+ PrintWarning(pc, bp);
+ if (__msan::flags()->print_stats)
+ ReportStats();
+ Printf("Exiting\n");
+ Die();
+}
+
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ stack->Unwind(sig.pc, sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+static void MsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
+}
+
+static void MsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ Report("MemorySanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
+ line, cond, (uptr)v1, (uptr)v2);
+ PRINT_CURRENT_STACK_CHECK();
+ Die();
+}
+
+void __msan_init() {
+ CHECK(!msan_init_is_running);
+ if (msan_inited) return;
+ msan_init_is_running = 1;
+ SanitizerToolName = "MemorySanitizer";
+
+ AvoidCVE_2016_2143();
+
+ CacheBinaryName();
+ InitializeFlags();
+
+ // Install tool-specific callbacks in sanitizer_common.
+ SetCheckFailedCallback(MsanCheckFailed);
+
+ __sanitizer_set_report_path(common_flags()->log_path);
+
+ InitializeInterceptors();
+ CheckASLR();
+ InitTlsSize();
+ InstallDeadlySignalHandlers(MsanOnDeadlySignal);
+ InstallAtExitHandler(); // Needs __cxa_atexit interceptor.
+
+ DisableCoreDumperIfNecessary();
+ if (StackSizeIsUnlimited()) {
+ VPrintf(1, "Unlimited stack, doing reexec\n");
+ // A reasonably large stack size. It is bigger than the usual 8Mb, because,
+ // well, the program could have been run with unlimited stack for a reason.
+ SetStackSizeLimitInBytes(32 * 1024 * 1024);
+ ReExec();
+ }
+
+ __msan_clear_on_return();
+ if (__msan_get_track_origins())
+ VPrintf(1, "msan_track_origins\n");
+ if (!InitShadow(__msan_get_track_origins())) {
+ Printf("FATAL: MemorySanitizer can not mmap the shadow memory.\n");
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
+ Printf("FATAL: Disabling ASLR is known to cause this error.\n");
+ Printf("FATAL: If running under GDB, try "
+ "'set disable-randomization off'.\n");
+ DumpProcessMap();
+ Die();
+ }
+
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ MsanTSDInit(MsanTSDDtor);
+
+ MsanAllocatorInit();
+
+ MsanThread *main_thread = MsanThread::Create(nullptr, nullptr);
+ SetCurrentThread(main_thread);
+ main_thread->ThreadStart();
+
+#if MSAN_CONTAINS_UBSAN
+ __ubsan::InitAsPlugin();
+#endif
+
+ VPrintf(1, "MemorySanitizer init done\n");
+
+ msan_init_is_running = 0;
+ msan_inited = 1;
+}
+
+void __msan_set_keep_going(int keep_going) {
+ flags()->halt_on_error = !keep_going;
+}
+
+void __msan_set_expect_umr(int expect_umr) {
+ if (expect_umr) {
+ msan_expected_umr_found = 0;
+ } else if (!msan_expected_umr_found) {
+ GET_CALLER_PC_BP_SP;
+ (void)sp;
+ GET_FATAL_STACK_TRACE_PC_BP(pc, bp);
+ ReportExpectedUMRNotFound(&stack);
+ Die();
+ }
+ msan_expect_umr = expect_umr;
+}
+
+void __msan_print_shadow(const void *x, uptr size) {
+ if (!MEM_IS_APP(x)) {
+ Printf("Not a valid application address: %p\n", x);
+ return;
+ }
+
+ DescribeMemoryRange(x, size);
+}
+
+void __msan_dump_shadow(const void *x, uptr size) {
+ if (!MEM_IS_APP(x)) {
+ Printf("Not a valid application address: %p\n", x);
+ return;
+ }
+
+ unsigned char *s = (unsigned char*)MEM_TO_SHADOW(x);
+ for (uptr i = 0; i < size; i++)
+ Printf("%x%x ", s[i] >> 4, s[i] & 0xf);
+ Printf("\n");
+}
+
+sptr __msan_test_shadow(const void *x, uptr size) {
+ if (!MEM_IS_APP(x)) return -1;
+ unsigned char *s = (unsigned char *)MEM_TO_SHADOW((uptr)x);
+ for (uptr i = 0; i < size; ++i)
+ if (s[i])
+ return i;
+ return -1;
+}
+
+void __msan_check_mem_is_initialized(const void *x, uptr size) {
+ if (!__msan::flags()->report_umrs) return;
+ sptr offset = __msan_test_shadow(x, size);
+ if (offset < 0)
+ return;
+
+ GET_CALLER_PC_BP_SP;
+ (void)sp;
+ ReportUMRInsideAddressRange(__func__, x, size, offset);
+ __msan::PrintWarningWithOrigin(pc, bp,
+ __msan_get_origin(((const char *)x) + offset));
+ if (__msan::flags()->halt_on_error) {
+ Printf("Exiting\n");
+ Die();
+ }
+}
+
+int __msan_set_poison_in_malloc(int do_poison) {
+ int old = flags()->poison_in_malloc;
+ flags()->poison_in_malloc = do_poison;
+ return old;
+}
+
+int __msan_has_dynamic_component() { return false; }
+
+NOINLINE
+void __msan_clear_on_return() {
+ __msan_param_tls[0] = 0;
+}
+
+void __msan_partial_poison(const void* data, void* shadow, uptr size) {
+ internal_memcpy((void*)MEM_TO_SHADOW((uptr)data), shadow, size);
+}
+
+void __msan_load_unpoisoned(const void *src, uptr size, void *dst) {
+ internal_memcpy(dst, src, size);
+ __msan_unpoison(dst, size);
+}
+
+void __msan_set_origin(const void *a, uptr size, u32 origin) {
+ if (__msan_get_track_origins()) SetOrigin(a, size, origin);
+}
+
+// 'descr' is created at compile time and contains '----' in the beginning.
+// When we see descr for the first time we replace '----' with a uniq id
+// and set the origin to (id | (31-th bit)).
+void __msan_set_alloca_origin(void *a, uptr size, char *descr) {
+ __msan_set_alloca_origin4(a, size, descr, 0);
+}
+
+void __msan_set_alloca_origin4(void *a, uptr size, char *descr, uptr pc) {
+ static const u32 dash = '-';
+ static const u32 first_timer =
+ dash + (dash << 8) + (dash << 16) + (dash << 24);
+ u32 *id_ptr = (u32*)descr;
+ bool print = false; // internal_strstr(descr + 4, "AllocaTOTest") != 0;
+ u32 id = *id_ptr;
+ if (id == first_timer) {
+ u32 idx = atomic_fetch_add(&NumStackOriginDescrs, 1, memory_order_relaxed);
+ CHECK_LT(idx, kNumStackOriginDescrs);
+ StackOriginDescr[idx] = descr + 4;
+#if SANITIZER_PPC64V1
+ // On PowerPC64 ELFv1, the address of a function actually points to a
+ // three-doubleword data structure with the first field containing
+ // the address of the function's code.
+ if (pc)
+ pc = *reinterpret_cast<uptr*>(pc);
+#endif
+ StackOriginPC[idx] = pc;
+ id = Origin::CreateStackOrigin(idx).raw_id();
+ *id_ptr = id;
+ if (print)
+ Printf("First time: idx=%d id=%d %s %p \n", idx, id, descr + 4, pc);
+ }
+ if (print)
+ Printf("__msan_set_alloca_origin: descr=%s id=%x\n", descr + 4, id);
+ __msan_set_origin(a, size, id);
+}
+
+u32 __msan_chain_origin(u32 id) {
+ GET_CALLER_PC_BP_SP;
+ (void)sp;
+ GET_STORE_STACK_TRACE_PC_BP(pc, bp);
+ return ChainOrigin(id, &stack);
+}
+
+u32 __msan_get_origin(const void *a) {
+ if (!__msan_get_track_origins()) return 0;
+ uptr x = (uptr)a;
+ uptr aligned = x & ~3ULL;
+ uptr origin_ptr = MEM_TO_ORIGIN(aligned);
+ return *(u32*)origin_ptr;
+}
+
+int __msan_origin_is_descendant_or_same(u32 this_id, u32 prev_id) {
+ Origin o = Origin::FromRawId(this_id);
+ while (o.raw_id() != prev_id && o.isChainedOrigin())
+ o = o.getNextChainedOrigin(nullptr);
+ return o.raw_id() == prev_id;
+}
+
+u32 __msan_get_umr_origin() {
+ return __msan_origin_tls;
+}
+
+u16 __sanitizer_unaligned_load16(const uu16 *p) {
+ *(uu16 *)&__msan_retval_tls[0] = *(uu16 *)MEM_TO_SHADOW((uptr)p);
+ if (__msan_get_track_origins())
+ __msan_retval_origin_tls = GetOriginIfPoisoned((uptr)p, sizeof(*p));
+ return *p;
+}
+u32 __sanitizer_unaligned_load32(const uu32 *p) {
+ *(uu32 *)&__msan_retval_tls[0] = *(uu32 *)MEM_TO_SHADOW((uptr)p);
+ if (__msan_get_track_origins())
+ __msan_retval_origin_tls = GetOriginIfPoisoned((uptr)p, sizeof(*p));
+ return *p;
+}
+u64 __sanitizer_unaligned_load64(const uu64 *p) {
+ __msan_retval_tls[0] = *(uu64 *)MEM_TO_SHADOW((uptr)p);
+ if (__msan_get_track_origins())
+ __msan_retval_origin_tls = GetOriginIfPoisoned((uptr)p, sizeof(*p));
+ return *p;
+}
+void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
+ u16 s = *(uu16 *)&__msan_param_tls[1];
+ *(uu16 *)MEM_TO_SHADOW((uptr)p) = s;
+ if (s && __msan_get_track_origins())
+ if (uu32 o = __msan_param_origin_tls[2])
+ SetOriginIfPoisoned((uptr)p, (uptr)&s, sizeof(s), o);
+ *p = x;
+}
+void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
+ u32 s = *(uu32 *)&__msan_param_tls[1];
+ *(uu32 *)MEM_TO_SHADOW((uptr)p) = s;
+ if (s && __msan_get_track_origins())
+ if (uu32 o = __msan_param_origin_tls[2])
+ SetOriginIfPoisoned((uptr)p, (uptr)&s, sizeof(s), o);
+ *p = x;
+}
+void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
+ u64 s = __msan_param_tls[1];
+ *(uu64 *)MEM_TO_SHADOW((uptr)p) = s;
+ if (s && __msan_get_track_origins())
+ if (uu32 o = __msan_param_origin_tls[2])
+ SetOriginIfPoisoned((uptr)p, (uptr)&s, sizeof(s), o);
+ *p = x;
+}
+
+void __msan_set_death_callback(void (*callback)(void)) {
+ SetUserDieCallback(callback);
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __msan_default_options() { return ""; }
+} // extern "C"
+#endif
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ stack.Print();
+}
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_linux.cc (revision 351984)
@@ -0,0 +1,262 @@
+//===-- msan_linux.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Linux-, NetBSD- and FreeBSD-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
+
+#include "msan.h"
+#include "msan_report.h"
+#include "msan_thread.h"
+
+#include <elf.h>
+#include <link.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <unwind.h>
+#include <execinfo.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+namespace __msan {
+
+void ReportMapRange(const char *descr, uptr beg, uptr size) {
+ if (size > 0) {
+ uptr end = beg + size - 1;
+ VPrintf(1, "%s : %p - %p\n", descr, beg, end);
+ }
+}
+
+static bool CheckMemoryRangeAvailability(uptr beg, uptr size) {
+ if (size > 0) {
+ uptr end = beg + size - 1;
+ if (!MemoryRangeIsAvailable(beg, end)) {
+ Printf("FATAL: Memory range %p - %p is not available.\n", beg, end);
+ return false;
+ }
+ }
+ return true;
+}
+
+static bool ProtectMemoryRange(uptr beg, uptr size, const char *name) {
+ if (size > 0) {
+ void *addr = MmapFixedNoAccess(beg, size, name);
+ if (beg == 0 && addr) {
+ // Depending on the kernel configuration, we may not be able to protect
+ // the page at address zero.
+ uptr gap = 16 * GetPageSizeCached();
+ beg += gap;
+ size -= gap;
+ addr = MmapFixedNoAccess(beg, size, name);
+ }
+ if ((uptr)addr != beg) {
+ uptr end = beg + size - 1;
+ Printf("FATAL: Cannot protect memory range %p - %p (%s).\n", beg, end,
+ name);
+ return false;
+ }
+ }
+ return true;
+}
+
+static void CheckMemoryLayoutSanity() {
+ uptr prev_end = 0;
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
+ uptr start = kMemoryLayout[i].start;
+ uptr end = kMemoryLayout[i].end;
+ MappingDesc::Type type = kMemoryLayout[i].type;
+ CHECK_LT(start, end);
+ CHECK_EQ(prev_end, start);
+ CHECK(addr_is_type(start, type));
+ CHECK(addr_is_type((start + end) / 2, type));
+ CHECK(addr_is_type(end - 1, type));
+ if (type == MappingDesc::APP) {
+ uptr addr = start;
+ CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+ CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+ CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+
+ addr = (start + end) / 2;
+ CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+ CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+ CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+
+ addr = end - 1;
+ CHECK(MEM_IS_SHADOW(MEM_TO_SHADOW(addr)));
+ CHECK(MEM_IS_ORIGIN(MEM_TO_ORIGIN(addr)));
+ CHECK_EQ(MEM_TO_ORIGIN(addr), SHADOW_TO_ORIGIN(MEM_TO_SHADOW(addr)));
+ }
+ prev_end = end;
+ }
+}
+
+bool InitShadow(bool init_origins) {
+ // Let user know mapping parameters first.
+ VPrintf(1, "__msan_init %p\n", &__msan_init);
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
+ VPrintf(1, "%s: %zx - %zx\n", kMemoryLayout[i].name, kMemoryLayout[i].start,
+ kMemoryLayout[i].end - 1);
+
+ CheckMemoryLayoutSanity();
+
+ if (!MEM_IS_APP(&__msan_init)) {
+ Printf("FATAL: Code %p is out of application range. Non-PIE build?\n",
+ (uptr)&__msan_init);
+ return false;
+ }
+
+ const uptr maxVirtualAddress = GetMaxUserVirtualAddress();
+
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i) {
+ uptr start = kMemoryLayout[i].start;
+ uptr end = kMemoryLayout[i].end;
+ uptr size= end - start;
+ MappingDesc::Type type = kMemoryLayout[i].type;
+
+ // Check if the segment should be mapped based on platform constraints.
+ if (start >= maxVirtualAddress)
+ continue;
+
+ bool map = type == MappingDesc::SHADOW ||
+ (init_origins && type == MappingDesc::ORIGIN);
+ bool protect = type == MappingDesc::INVALID ||
+ (!init_origins && type == MappingDesc::ORIGIN);
+ CHECK(!(map && protect));
+ if (!map && !protect)
+ CHECK(type == MappingDesc::APP);
+ if (map) {
+ if (!CheckMemoryRangeAvailability(start, size))
+ return false;
+ if (!MmapFixedNoReserve(start, size, kMemoryLayout[i].name))
+ return false;
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(start, size);
+ }
+ if (protect) {
+ if (!CheckMemoryRangeAvailability(start, size))
+ return false;
+ if (!ProtectMemoryRange(start, size, kMemoryLayout[i].name))
+ return false;
+ }
+ }
+
+ return true;
+}
+
+static void MsanAtExit(void) {
+ if (flags()->print_stats && (flags()->atexit || msan_report_count > 0))
+ ReportStats();
+ if (msan_report_count > 0) {
+ ReportAtExitStatistics();
+ if (common_flags()->exitcode)
+ internal__exit(common_flags()->exitcode);
+ }
+}
+
+void InstallAtExitHandler() {
+ atexit(MsanAtExit);
+}
+
+// ---------------------- TSD ---------------- {{{1
+
+#if SANITIZER_NETBSD
+// Thread Static Data cannot be used in early init on NetBSD.
+// Reuse the MSan TSD API for compatibility with existing code
+// with an alternative implementation.
+
+static void (*tsd_destructor)(void *tsd) = nullptr;
+
+struct tsd_key {
+ tsd_key() : key(nullptr) {}
+ ~tsd_key() {
+ CHECK(tsd_destructor);
+ if (key)
+ (*tsd_destructor)(key);
+ }
+ MsanThread *key;
+};
+
+static thread_local struct tsd_key key;
+
+void MsanTSDInit(void (*destructor)(void *tsd)) {
+ CHECK(!tsd_destructor);
+ tsd_destructor = destructor;
+}
+
+MsanThread *GetCurrentThread() {
+ CHECK(tsd_destructor);
+ return key.key;
+}
+
+void SetCurrentThread(MsanThread *tsd) {
+ CHECK(tsd_destructor);
+ CHECK(tsd);
+ CHECK(!key.key);
+ key.key = tsd;
+}
+
+void MsanTSDDtor(void *tsd) {
+ CHECK(tsd_destructor);
+ CHECK_EQ(key.key, tsd);
+ key.key = nullptr;
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
+ MsanThread::TSDDtor(tsd);
+}
+#else
+static pthread_key_t tsd_key;
+static bool tsd_key_inited = false;
+
+void MsanTSDInit(void (*destructor)(void *tsd)) {
+ CHECK(!tsd_key_inited);
+ tsd_key_inited = true;
+ CHECK_EQ(0, pthread_key_create(&tsd_key, destructor));
+}
+
+static THREADLOCAL MsanThread* msan_current_thread;
+
+MsanThread *GetCurrentThread() {
+ return msan_current_thread;
+}
+
+void SetCurrentThread(MsanThread *t) {
+ // Make sure we do not reset the current MsanThread.
+ CHECK_EQ(0, msan_current_thread);
+ msan_current_thread = t;
+ // Make sure that MsanTSDDtor gets called at the end.
+ CHECK(tsd_key_inited);
+ pthread_setspecific(tsd_key, (void *)t);
+}
+
+void MsanTSDDtor(void *tsd) {
+ MsanThread *t = (MsanThread*)tsd;
+ if (t->destructor_iterations_ > 1) {
+ t->destructor_iterations_--;
+ CHECK_EQ(0, pthread_setspecific(tsd_key, tsd));
+ return;
+ }
+ msan_current_thread = nullptr;
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
+ MsanThread::TSDDtor(tsd);
+}
+#endif
+
+} // namespace __msan
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan.h (revision 351984)
@@ -0,0 +1,397 @@
+//===-- msan.h --------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Private MSan header.
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_H
+#define MSAN_H
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "msan_interface_internal.h"
+#include "msan_flags.h"
+#include "ubsan/ubsan_platform.h"
+
+#ifndef MSAN_REPLACE_OPERATORS_NEW_AND_DELETE
+# define MSAN_REPLACE_OPERATORS_NEW_AND_DELETE 1
+#endif
+
+#ifndef MSAN_CONTAINS_UBSAN
+# define MSAN_CONTAINS_UBSAN CAN_SANITIZE_UB
+#endif
+
+struct MappingDesc {
+ uptr start;
+ uptr end;
+ enum Type {
+ INVALID, APP, SHADOW, ORIGIN
+ } type;
+ const char *name;
+};
+
+
+#if SANITIZER_LINUX && defined(__mips64)
+
+// MIPS64 maps:
+// - 0x0000000000-0x0200000000: Program own segments
+// - 0xa200000000-0xc000000000: PIE program segments
+// - 0xe200000000-0xffffffffff: libraries segments.
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "app-1"},
+ {0x000200000000ULL, 0x002200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x002200000000ULL, 0x004000000000ULL, MappingDesc::SHADOW, "shadow-2"},
+ {0x004000000000ULL, 0x004200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x004200000000ULL, 0x006000000000ULL, MappingDesc::ORIGIN, "origin-2"},
+ {0x006000000000ULL, 0x006200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x006200000000ULL, 0x008000000000ULL, MappingDesc::SHADOW, "shadow-3"},
+ {0x008000000000ULL, 0x008200000000ULL, MappingDesc::SHADOW, "shadow-1"},
+ {0x008200000000ULL, 0x00a000000000ULL, MappingDesc::ORIGIN, "origin-3"},
+ {0x00a000000000ULL, 0x00a200000000ULL, MappingDesc::ORIGIN, "origin-1"},
+ {0x00a200000000ULL, 0x00c000000000ULL, MappingDesc::APP, "app-2"},
+ {0x00c000000000ULL, 0x00e200000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x00e200000000ULL, 0x00ffffffffffULL, MappingDesc::APP, "app-3"}};
+
+#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x8000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x2000000000ULL)
+
+#elif SANITIZER_LINUX && defined(__aarch64__)
+
+// The mapping describes both 39-bits, 42-bits, and 48-bits VMA. AArch64
+// maps:
+// - 0x0000000000000-0x0000010000000: 39/42/48-bits program own segments
+// - 0x0005500000000-0x0005600000000: 39-bits PIE program segments
+// - 0x0007f80000000-0x0007fffffffff: 39-bits libraries segments
+// - 0x002aa00000000-0x002ab00000000: 42-bits PIE program segments
+// - 0x003ff00000000-0x003ffffffffff: 42-bits libraries segments
+// - 0x0aaaaa0000000-0x0aaab00000000: 48-bits PIE program segments
+// - 0xffff000000000-0x1000000000000: 48-bits libraries segments
+// It is fragmented in multiples segments to increase the memory available
+// on 42-bits (12.21% of total VMA available for 42-bits and 13.28 for
+// 39 bits). The 48-bits segments only cover the usual PIE/default segments
+// plus some more segments (262144GB total, 0.39% total VMA).
+const MappingDesc kMemoryLayout[] = {
+ {0x00000000000ULL, 0x01000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x01000000000ULL, 0x02000000000ULL, MappingDesc::SHADOW, "shadow-2"},
+ {0x02000000000ULL, 0x03000000000ULL, MappingDesc::ORIGIN, "origin-2"},
+ {0x03000000000ULL, 0x04000000000ULL, MappingDesc::SHADOW, "shadow-1"},
+ {0x04000000000ULL, 0x05000000000ULL, MappingDesc::ORIGIN, "origin-1"},
+ {0x05000000000ULL, 0x06000000000ULL, MappingDesc::APP, "app-1"},
+ {0x06000000000ULL, 0x07000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x07000000000ULL, 0x08000000000ULL, MappingDesc::APP, "app-2"},
+ {0x08000000000ULL, 0x09000000000ULL, MappingDesc::INVALID, "invalid"},
+ // The mappings below are used only for 42-bits VMA.
+ {0x09000000000ULL, 0x0A000000000ULL, MappingDesc::SHADOW, "shadow-3"},
+ {0x0A000000000ULL, 0x0B000000000ULL, MappingDesc::ORIGIN, "origin-3"},
+ {0x0B000000000ULL, 0x0F000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0F000000000ULL, 0x10000000000ULL, MappingDesc::APP, "app-3"},
+ {0x10000000000ULL, 0x11000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x11000000000ULL, 0x12000000000ULL, MappingDesc::APP, "app-4"},
+ {0x12000000000ULL, 0x17000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x17000000000ULL, 0x18000000000ULL, MappingDesc::SHADOW, "shadow-4"},
+ {0x18000000000ULL, 0x19000000000ULL, MappingDesc::ORIGIN, "origin-4"},
+ {0x19000000000ULL, 0x20000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x20000000000ULL, 0x21000000000ULL, MappingDesc::APP, "app-5"},
+ {0x21000000000ULL, 0x26000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x26000000000ULL, 0x27000000000ULL, MappingDesc::SHADOW, "shadow-5"},
+ {0x27000000000ULL, 0x28000000000ULL, MappingDesc::ORIGIN, "origin-5"},
+ {0x28000000000ULL, 0x29000000000ULL, MappingDesc::SHADOW, "shadow-7"},
+ {0x29000000000ULL, 0x2A000000000ULL, MappingDesc::ORIGIN, "origin-7"},
+ {0x2A000000000ULL, 0x2B000000000ULL, MappingDesc::APP, "app-6"},
+ {0x2B000000000ULL, 0x2C000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x2C000000000ULL, 0x2D000000000ULL, MappingDesc::SHADOW, "shadow-6"},
+ {0x2D000000000ULL, 0x2E000000000ULL, MappingDesc::ORIGIN, "origin-6"},
+ {0x2E000000000ULL, 0x2F000000000ULL, MappingDesc::APP, "app-7"},
+ {0x2F000000000ULL, 0x39000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x39000000000ULL, 0x3A000000000ULL, MappingDesc::SHADOW, "shadow-9"},
+ {0x3A000000000ULL, 0x3B000000000ULL, MappingDesc::ORIGIN, "origin-9"},
+ {0x3B000000000ULL, 0x3C000000000ULL, MappingDesc::APP, "app-8"},
+ {0x3C000000000ULL, 0x3D000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x3D000000000ULL, 0x3E000000000ULL, MappingDesc::SHADOW, "shadow-8"},
+ {0x3E000000000ULL, 0x3F000000000ULL, MappingDesc::ORIGIN, "origin-8"},
+ {0x3F000000000ULL, 0x40000000000ULL, MappingDesc::APP, "app-9"},
+ // The mappings below are used only for 48-bits VMA.
+ // TODO(unknown): 48-bit mapping ony covers the usual PIE, non-PIE
+ // segments and some more segments totalizing 262144GB of VMA (which cover
+ // only 0.32% of all 48-bit VMA). Memory avaliability can be increase by
+ // adding multiple application segments like 39 and 42 mapping.
+ {0x0040000000000ULL, 0x0041000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0041000000000ULL, 0x0042000000000ULL, MappingDesc::APP, "app-10"},
+ {0x0042000000000ULL, 0x0047000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0047000000000ULL, 0x0048000000000ULL, MappingDesc::SHADOW, "shadow-10"},
+ {0x0048000000000ULL, 0x0049000000000ULL, MappingDesc::ORIGIN, "origin-10"},
+ {0x0049000000000ULL, 0x0050000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0050000000000ULL, 0x0051000000000ULL, MappingDesc::APP, "app-11"},
+ {0x0051000000000ULL, 0x0056000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0056000000000ULL, 0x0057000000000ULL, MappingDesc::SHADOW, "shadow-11"},
+ {0x0057000000000ULL, 0x0058000000000ULL, MappingDesc::ORIGIN, "origin-11"},
+ {0x0058000000000ULL, 0x0059000000000ULL, MappingDesc::APP, "app-12"},
+ {0x0059000000000ULL, 0x005E000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x005E000000000ULL, 0x005F000000000ULL, MappingDesc::SHADOW, "shadow-12"},
+ {0x005F000000000ULL, 0x0060000000000ULL, MappingDesc::ORIGIN, "origin-12"},
+ {0x0060000000000ULL, 0x0061000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0061000000000ULL, 0x0062000000000ULL, MappingDesc::APP, "app-13"},
+ {0x0062000000000ULL, 0x0067000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0067000000000ULL, 0x0068000000000ULL, MappingDesc::SHADOW, "shadow-13"},
+ {0x0068000000000ULL, 0x0069000000000ULL, MappingDesc::ORIGIN, "origin-13"},
+ {0x0069000000000ULL, 0x0AAAAA0000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0AAAAA0000000ULL, 0x0AAAB00000000ULL, MappingDesc::APP, "app-14"},
+ {0x0AAAB00000000ULL, 0x0AACAA0000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0AACAA0000000ULL, 0x0AACB00000000ULL, MappingDesc::SHADOW, "shadow-14"},
+ {0x0AACB00000000ULL, 0x0AADAA0000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0AADAA0000000ULL, 0x0AADB00000000ULL, MappingDesc::ORIGIN, "origin-14"},
+ {0x0AADB00000000ULL, 0x0FF9F00000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0FF9F00000000ULL, 0x0FFA000000000ULL, MappingDesc::SHADOW, "shadow-15"},
+ {0x0FFA000000000ULL, 0x0FFAF00000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0FFAF00000000ULL, 0x0FFB000000000ULL, MappingDesc::ORIGIN, "origin-15"},
+ {0x0FFB000000000ULL, 0x0FFFF00000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x0FFFF00000000ULL, 0x1000000000000ULL, MappingDesc::APP, "app-15"},
+};
+# define MEM_TO_SHADOW(mem) ((uptr)mem ^ 0x6000000000ULL)
+# define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x1000000000ULL)
+
+#elif SANITIZER_LINUX && SANITIZER_PPC64
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x000200000000ULL, MappingDesc::APP, "low memory"},
+ {0x000200000000ULL, 0x080000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x080000000000ULL, 0x180200000000ULL, MappingDesc::SHADOW, "shadow"},
+ {0x180200000000ULL, 0x1C0000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x1C0000000000ULL, 0x2C0200000000ULL, MappingDesc::ORIGIN, "origin"},
+ {0x2C0200000000ULL, 0x300000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x300000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}};
+
+// Various kernels use different low end ranges but we can combine them into one
+// big range. They also use different high end ranges but we can map them all to
+// one range.
+// Maps low and high app ranges to contiguous space with zero base:
+// Low: 0000 0000 0000 - 0001 ffff ffff -> 1000 0000 0000 - 1001 ffff ffff
+// High: 3000 0000 0000 - 3fff ffff ffff -> 0000 0000 0000 - 0fff ffff ffff
+// High: 4000 0000 0000 - 4fff ffff ffff -> 0000 0000 0000 - 0fff ffff ffff
+// High: 7000 0000 0000 - 7fff ffff ffff -> 0000 0000 0000 - 0fff ffff ffff
+#define LINEARIZE_MEM(mem) \
+ (((uptr)(mem) & ~0xE00000000000ULL) ^ 0x100000000000ULL)
+#define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x080000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x140000000000ULL)
+
+#elif SANITIZER_FREEBSD && SANITIZER_WORDSIZE == 64
+
+// Low memory: main binary, MAP_32BIT mappings and modules
+// High memory: heap, modules and main thread stack
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "low memory"},
+ {0x010000000000ULL, 0x100000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x100000000000ULL, 0x310000000000ULL, MappingDesc::SHADOW, "shadow"},
+ {0x310000000000ULL, 0x380000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x380000000000ULL, 0x590000000000ULL, MappingDesc::ORIGIN, "origin"},
+ {0x590000000000ULL, 0x600000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "high memory"}};
+
+// Maps low and high app ranges to contiguous space with zero base:
+// Low: 0000 0000 0000 - 00ff ffff ffff -> 2000 0000 0000 - 20ff ffff ffff
+// High: 6000 0000 0000 - 7fff ffff ffff -> 0000 0000 0000 - 1fff ffff ffff
+#define LINEARIZE_MEM(mem) \
+ (((uptr)(mem) & ~0xc00000000000ULL) ^ 0x200000000000ULL)
+#define MEM_TO_SHADOW(mem) (LINEARIZE_MEM((mem)) + 0x100000000000ULL)
+#define SHADOW_TO_ORIGIN(shadow) (((uptr)(shadow)) + 0x280000000000)
+
+#elif SANITIZER_NETBSD || (SANITIZER_LINUX && SANITIZER_WORDSIZE == 64)
+
+#ifdef MSAN_LINUX_X86_64_OLD_MAPPING
+// Requries PIE binary and ASLR enabled.
+// Main thread stack and DSOs at 0x7f0000000000 (sometimes 0x7e0000000000).
+// Heap at 0x600000000000.
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x200000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x200000000000ULL, 0x400000000000ULL, MappingDesc::SHADOW, "shadow"},
+ {0x400000000000ULL, 0x600000000000ULL, MappingDesc::ORIGIN, "origin"},
+ {0x600000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app"}};
+
+#define MEM_TO_SHADOW(mem) (((uptr)(mem)) & ~0x400000000000ULL)
+#define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x200000000000ULL)
+#else // MSAN_LINUX_X86_64_OLD_MAPPING
+// All of the following configurations are supported.
+// ASLR disabled: main executable and DSOs at 0x555550000000
+// PIE and ASLR: main executable and DSOs at 0x7f0000000000
+// non-PIE: main executable below 0x100000000, DSOs at 0x7f0000000000
+// Heap at 0x700000000000.
+const MappingDesc kMemoryLayout[] = {
+ {0x000000000000ULL, 0x010000000000ULL, MappingDesc::APP, "app-1"},
+ {0x010000000000ULL, 0x100000000000ULL, MappingDesc::SHADOW, "shadow-2"},
+ {0x100000000000ULL, 0x110000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x110000000000ULL, 0x200000000000ULL, MappingDesc::ORIGIN, "origin-2"},
+ {0x200000000000ULL, 0x300000000000ULL, MappingDesc::SHADOW, "shadow-3"},
+ {0x300000000000ULL, 0x400000000000ULL, MappingDesc::ORIGIN, "origin-3"},
+ {0x400000000000ULL, 0x500000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x500000000000ULL, 0x510000000000ULL, MappingDesc::SHADOW, "shadow-1"},
+ {0x510000000000ULL, 0x600000000000ULL, MappingDesc::APP, "app-2"},
+ {0x600000000000ULL, 0x610000000000ULL, MappingDesc::ORIGIN, "origin-1"},
+ {0x610000000000ULL, 0x700000000000ULL, MappingDesc::INVALID, "invalid"},
+ {0x700000000000ULL, 0x800000000000ULL, MappingDesc::APP, "app-3"}};
+#define MEM_TO_SHADOW(mem) (((uptr)(mem)) ^ 0x500000000000ULL)
+#define SHADOW_TO_ORIGIN(mem) (((uptr)(mem)) + 0x100000000000ULL)
+#endif // MSAN_LINUX_X86_64_OLD_MAPPING
+
+#else
+#error "Unsupported platform"
+#endif
+
+const uptr kMemoryLayoutSize = sizeof(kMemoryLayout) / sizeof(kMemoryLayout[0]);
+
+#define MEM_TO_ORIGIN(mem) (SHADOW_TO_ORIGIN(MEM_TO_SHADOW((mem))))
+
+#ifndef __clang__
+__attribute__((optimize("unroll-loops")))
+#endif
+inline bool addr_is_type(uptr addr, MappingDesc::Type mapping_type) {
+// It is critical for performance that this loop is unrolled (because then it is
+// simplified into just a few constant comparisons).
+#ifdef __clang__
+#pragma unroll
+#endif
+ for (unsigned i = 0; i < kMemoryLayoutSize; ++i)
+ if (kMemoryLayout[i].type == mapping_type &&
+ addr >= kMemoryLayout[i].start && addr < kMemoryLayout[i].end)
+ return true;
+ return false;
+}
+
+#define MEM_IS_APP(mem) addr_is_type((uptr)(mem), MappingDesc::APP)
+#define MEM_IS_SHADOW(mem) addr_is_type((uptr)(mem), MappingDesc::SHADOW)
+#define MEM_IS_ORIGIN(mem) addr_is_type((uptr)(mem), MappingDesc::ORIGIN)
+
+// These constants must be kept in sync with the ones in MemorySanitizer.cc.
+const int kMsanParamTlsSize = 800;
+const int kMsanRetvalTlsSize = 800;
+
+namespace __msan {
+extern int msan_inited;
+extern bool msan_init_is_running;
+extern int msan_report_count;
+
+bool ProtectRange(uptr beg, uptr end);
+bool InitShadow(bool init_origins);
+char *GetProcSelfMaps();
+void InitializeInterceptors();
+
+void MsanAllocatorInit();
+void MsanAllocatorThreadFinish();
+void MsanDeallocate(StackTrace *stack, void *ptr);
+
+void *msan_malloc(uptr size, StackTrace *stack);
+void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack);
+void *msan_realloc(void *ptr, uptr size, StackTrace *stack);
+void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack);
+void *msan_valloc(uptr size, StackTrace *stack);
+void *msan_pvalloc(uptr size, StackTrace *stack);
+void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
+void *msan_memalign(uptr alignment, uptr size, StackTrace *stack);
+int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ StackTrace *stack);
+
+void InstallTrapHandler();
+void InstallAtExitHandler();
+
+const char *GetStackOriginDescr(u32 id, uptr *pc);
+
+void EnterSymbolizer();
+void ExitSymbolizer();
+bool IsInSymbolizer();
+
+struct SymbolizerScope {
+ SymbolizerScope() { EnterSymbolizer(); }
+ ~SymbolizerScope() { ExitSymbolizer(); }
+};
+
+void PrintWarning(uptr pc, uptr bp);
+void PrintWarningWithOrigin(uptr pc, uptr bp, u32 origin);
+
+// Unpoison first n function arguments.
+void UnpoisonParam(uptr n);
+void UnpoisonThreadLocalState();
+
+// Returns a "chained" origin id, pointing to the given stack trace followed by
+// the previous origin id.
+u32 ChainOrigin(u32 id, StackTrace *stack);
+
+const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
+
+#define GET_MALLOC_STACK_TRACE \
+ BufferedStackTrace stack; \
+ if (__msan_get_track_origins() && msan_inited) \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
+ nullptr, common_flags()->fast_unwind_on_malloc, \
+ common_flags()->malloc_context_size)
+
+// For platforms which support slow unwinder only, we restrict the store context
+// size to 1, basically only storing the current pc. We do this because the slow
+// unwinder which is based on libunwind is not async signal safe and causes
+// random freezes in forking applications as well as in signal handlers.
+#define GET_STORE_STACK_TRACE_PC_BP(pc, bp) \
+ BufferedStackTrace stack; \
+ if (__msan_get_track_origins() > 1 && msan_inited) { \
+ int size = flags()->store_context_size; \
+ if (!SANITIZER_CAN_FAST_UNWIND) \
+ size = Min(size, 1); \
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_malloc, size);\
+ }
+
+#define GET_STORE_STACK_TRACE \
+ GET_STORE_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
+
+#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
+ BufferedStackTrace stack; \
+ if (msan_inited) \
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
+
+#define GET_FATAL_STACK_TRACE_HERE \
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
+
+#define PRINT_CURRENT_STACK_CHECK() \
+ { \
+ GET_FATAL_STACK_TRACE_HERE; \
+ stack.Print(); \
+ }
+
+class ScopedThreadLocalStateBackup {
+ public:
+ ScopedThreadLocalStateBackup() { Backup(); }
+ ~ScopedThreadLocalStateBackup() { Restore(); }
+ void Backup();
+ void Restore();
+ private:
+ u64 va_arg_overflow_size_tls;
+};
+
+void MsanTSDInit(void (*destructor)(void *tsd));
+void *MsanTSDGet();
+void MsanTSDSet(void *tsd);
+void MsanTSDDtor(void *tsd);
+
+} // namespace __msan
+
+#define MSAN_MALLOC_HOOK(ptr, size) \
+ do { \
+ if (&__sanitizer_malloc_hook) { \
+ UnpoisonParam(2); \
+ __sanitizer_malloc_hook(ptr, size); \
+ } \
+ RunMallocHooks(ptr, size); \
+ } while (false)
+#define MSAN_FREE_HOOK(ptr) \
+ do { \
+ if (&__sanitizer_free_hook) { \
+ UnpoisonParam(1); \
+ __sanitizer_free_hook(ptr); \
+ } \
+ RunFreeHooks(ptr); \
+ } while (false)
+
+#endif // MSAN_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_allocator.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_allocator.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_allocator.cc (revision 351984)
@@ -0,0 +1,349 @@
+//===-- msan_allocator.cc --------------------------- ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// MemorySanitizer allocator.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "msan.h"
+#include "msan_allocator.h"
+#include "msan_origin.h"
+#include "msan_thread.h"
+#include "msan_poisoning.h"
+
+namespace __msan {
+
+struct Metadata {
+ uptr requested_size;
+};
+
+struct MsanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const {}
+ void OnUnmap(uptr p, uptr size) const {
+ __msan_unpoison((void *)p, size);
+
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ uptr shadow_p = MEM_TO_SHADOW(p);
+ ReleaseMemoryPagesToOS(shadow_p, shadow_p + size);
+ if (__msan_get_track_origins()) {
+ uptr origin_p = MEM_TO_ORIGIN(p);
+ ReleaseMemoryPagesToOS(origin_p, origin_p + size);
+ }
+ }
+};
+
+#if defined(__mips64)
+static const uptr kMaxAllowedMallocSize = 2UL << 30;
+
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#elif defined(__x86_64__)
+#if SANITIZER_NETBSD || \
+ (SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING))
+static const uptr kAllocatorSpace = 0x700000000000ULL;
+#else
+static const uptr kAllocatorSpace = 0x600000000000ULL;
+#endif
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
+
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = 0x40000000000; // 4T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+
+#elif defined(__powerpc64__)
+static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = 0x300000000000;
+ static const uptr kSpaceSize = 0x020000000000; // 2T.
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#elif defined(__aarch64__)
+static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef MsanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#endif
+typedef CombinedAllocator<PrimaryAllocator> Allocator;
+typedef Allocator::AllocatorCache AllocatorCache;
+
+static Allocator allocator;
+static AllocatorCache fallback_allocator_cache;
+static StaticSpinMutex fallback_mutex;
+
+void MsanAllocatorInit() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+}
+
+AllocatorCache *GetAllocatorCache(MsanThreadLocalMallocStorage *ms) {
+ CHECK(ms);
+ CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache));
+ return reinterpret_cast<AllocatorCache *>(ms->allocator_cache);
+}
+
+void MsanThreadLocalMallocStorage::CommitBack() {
+ allocator.SwallowCache(GetAllocatorCache(this));
+}
+
+static void *MsanAllocate(StackTrace *stack, uptr size, uptr alignment,
+ bool zeroise) {
+ if (size > kMaxAllowedMallocSize) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: MemorySanitizer failed to allocate 0x%zx bytes\n", size);
+ return nullptr;
+ }
+ ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, stack);
+ }
+ MsanThread *t = GetCurrentThread();
+ void *allocated;
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocated = allocator.Allocate(cache, size, alignment);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated = allocator.Allocate(cache, size, alignment);
+ }
+ if (UNLIKELY(!allocated)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, stack);
+ }
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
+ meta->requested_size = size;
+ if (zeroise) {
+ __msan_clear_and_unpoison(allocated, size);
+ } else if (flags()->poison_in_malloc) {
+ __msan_poison(allocated, size);
+ if (__msan_get_track_origins()) {
+ stack->tag = StackTrace::TAG_ALLOC;
+ Origin o = Origin::CreateHeapOrigin(stack);
+ __msan_set_origin(allocated, size, o.raw_id());
+ }
+ }
+ MSAN_MALLOC_HOOK(allocated, size);
+ return allocated;
+}
+
+void MsanDeallocate(StackTrace *stack, void *p) {
+ CHECK(p);
+ MSAN_FREE_HOOK(p);
+ Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(p));
+ uptr size = meta->requested_size;
+ meta->requested_size = 0;
+ // This memory will not be reused by anyone else, so we are free to keep it
+ // poisoned.
+ if (flags()->poison_in_free) {
+ __msan_poison(p, size);
+ if (__msan_get_track_origins()) {
+ stack->tag = StackTrace::TAG_DEALLOC;
+ Origin o = Origin::CreateHeapOrigin(stack);
+ __msan_set_origin(p, size, o.raw_id());
+ }
+ }
+ MsanThread *t = GetCurrentThread();
+ if (t) {
+ AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage());
+ allocator.Deallocate(cache, p);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocator.Deallocate(cache, p);
+ }
+}
+
+void *MsanReallocate(StackTrace *stack, void *old_p, uptr new_size,
+ uptr alignment) {
+ Metadata *meta = reinterpret_cast<Metadata*>(allocator.GetMetaData(old_p));
+ uptr old_size = meta->requested_size;
+ uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p);
+ if (new_size <= actually_allocated_size) {
+ // We are not reallocating here.
+ meta->requested_size = new_size;
+ if (new_size > old_size) {
+ if (flags()->poison_in_malloc) {
+ stack->tag = StackTrace::TAG_ALLOC;
+ PoisonMemory((char *)old_p + old_size, new_size - old_size, stack);
+ }
+ }
+ return old_p;
+ }
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = MsanAllocate(stack, new_size, alignment, false /*zeroise*/);
+ if (new_p) {
+ CopyMemory(new_p, old_p, memcpy_size, stack);
+ MsanDeallocate(stack, old_p);
+ }
+ return new_p;
+}
+
+void *MsanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, stack);
+ }
+ return MsanAllocate(stack, nmemb * size, sizeof(u64), true);
+}
+
+static uptr AllocationSize(const void *p) {
+ if (!p) return 0;
+ const void *beg = allocator.GetBlockBegin(p);
+ if (beg != p) return 0;
+ Metadata *b = (Metadata *)allocator.GetMetaData(p);
+ return b->requested_size;
+}
+
+void *msan_malloc(uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
+}
+
+void *msan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(MsanCalloc(stack, nmemb, size));
+}
+
+void *msan_realloc(void *ptr, uptr size, StackTrace *stack) {
+ if (!ptr)
+ return SetErrnoOnNull(MsanAllocate(stack, size, sizeof(u64), false));
+ if (size == 0) {
+ MsanDeallocate(stack, ptr);
+ return nullptr;
+ }
+ return SetErrnoOnNull(MsanReallocate(stack, ptr, size, sizeof(u64)));
+}
+
+void *msan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportReallocArrayOverflow(nmemb, size, stack);
+ }
+ return msan_realloc(ptr, nmemb * size, stack);
+}
+
+void *msan_valloc(uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(MsanAllocate(stack, size, GetPageSizeCached(), false));
+}
+
+void *msan_pvalloc(uptr size, StackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(MsanAllocate(stack, size, PageSize, false));
+}
+
+void *msan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, stack);
+ }
+ return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
+}
+
+void *msan_memalign(uptr alignment, uptr size, StackTrace *stack) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, stack);
+ }
+ return SetErrnoOnNull(MsanAllocate(stack, size, alignment, false));
+}
+
+int msan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ StackTrace *stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, stack);
+ }
+ void *ptr = MsanAllocate(stack, size, alignment, false);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by MsanAllocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+} // namespace __msan
+
+using namespace __msan;
+
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() { return 1; }
+
+uptr __sanitizer_get_unmapped_bytes() { return 1; }
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+
+uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_allocator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_allocator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_allocator.h (revision 351984)
@@ -0,0 +1,32 @@
+//===-- msan_allocator.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_ALLOCATOR_H
+#define MSAN_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __msan {
+
+struct MsanThreadLocalMallocStorage {
+ uptr quarantine_cache[16];
+ // Allocator cache contains atomic_uint64_t which must be 8-byte aligned.
+ ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque.
+ void CommitBack();
+
+ private:
+ // These objects are allocated via mmap() and are zero-initialized.
+ MsanThreadLocalMallocStorage() {}
+};
+
+} // namespace __msan
+#endif // MSAN_ALLOCATOR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_allocator.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_chained_origin_depot.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_chained_origin_depot.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_chained_origin_depot.cc (revision 351984)
@@ -0,0 +1,131 @@
+//===-- msan_chained_origin_depot.cc -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+
+#include "msan_chained_origin_depot.h"
+
+#include "sanitizer_common/sanitizer_stackdepotbase.h"
+
+namespace __msan {
+
+struct ChainedOriginDepotDesc {
+ u32 here_id;
+ u32 prev_id;
+};
+
+struct ChainedOriginDepotNode {
+ ChainedOriginDepotNode *link;
+ u32 id;
+ u32 here_id;
+ u32 prev_id;
+
+ typedef ChainedOriginDepotDesc args_type;
+
+ bool eq(u32 hash, const args_type &args) const {
+ return here_id == args.here_id && prev_id == args.prev_id;
+ }
+
+ static uptr storage_size(const args_type &args) {
+ return sizeof(ChainedOriginDepotNode);
+ }
+
+ /* This is murmur2 hash for the 64->32 bit case.
+ It does not behave all that well because the keys have a very biased
+ distribution (I've seen 7-element buckets with the table only 14% full).
+
+ here_id is built of
+ * (1 bits) Reserved, zero.
+ * (8 bits) Part id = bits 13..20 of the hash value of here_id's key.
+ * (23 bits) Sequential number (each part has each own sequence).
+
+ prev_id has either the same distribution as here_id (but with 3:8:21)
+ split, or one of two reserved values (-1) or (-2). Either case can
+ dominate depending on the workload.
+ */
+ static u32 hash(const args_type &args) {
+ const u32 m = 0x5bd1e995;
+ const u32 seed = 0x9747b28c;
+ const u32 r = 24;
+ u32 h = seed;
+ u32 k = args.here_id;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+
+ k = args.prev_id;
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+ return h;
+ }
+ static bool is_valid(const args_type &args) { return true; }
+ void store(const args_type &args, u32 other_hash) {
+ here_id = args.here_id;
+ prev_id = args.prev_id;
+ }
+
+ args_type load() const {
+ args_type ret = {here_id, prev_id};
+ return ret;
+ }
+
+ struct Handle {
+ ChainedOriginDepotNode *node_;
+ Handle() : node_(nullptr) {}
+ explicit Handle(ChainedOriginDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id() { return node_->id; }
+ int here_id() { return node_->here_id; }
+ int prev_id() { return node_->prev_id; }
+ };
+
+ Handle get_handle() { return Handle(this); }
+
+ typedef Handle handle_type;
+};
+
+static StackDepotBase<ChainedOriginDepotNode, 4, 20> chainedOriginDepot;
+
+StackDepotStats *ChainedOriginDepotGetStats() {
+ return chainedOriginDepot.GetStats();
+}
+
+bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id) {
+ ChainedOriginDepotDesc desc = {here_id, prev_id};
+ bool inserted;
+ ChainedOriginDepotNode::Handle h = chainedOriginDepot.Put(desc, &inserted);
+ *new_id = h.valid() ? h.id() : 0;
+ return inserted;
+}
+
+// Retrieves a stored stack trace by the id.
+u32 ChainedOriginDepotGet(u32 id, u32 *other) {
+ ChainedOriginDepotDesc desc = chainedOriginDepot.Get(id);
+ *other = desc.prev_id;
+ return desc.here_id;
+}
+
+void ChainedOriginDepotLockAll() {
+ chainedOriginDepot.LockAll();
+}
+
+void ChainedOriginDepotUnlockAll() {
+ chainedOriginDepot.UnlockAll();
+}
+
+} // namespace __msan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_chained_origin_depot.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_chained_origin_depot.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_chained_origin_depot.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_chained_origin_depot.h (revision 351984)
@@ -0,0 +1,28 @@
+//===-- msan_chained_origin_depot.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A storage for chained origins.
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_CHAINED_ORIGIN_DEPOT_H
+#define MSAN_CHAINED_ORIGIN_DEPOT_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __msan {
+
+StackDepotStats *ChainedOriginDepotGetStats();
+bool ChainedOriginDepotPut(u32 here_id, u32 prev_id, u32 *new_id);
+// Retrieves a stored stack trace by the id.
+u32 ChainedOriginDepotGet(u32 id, u32 *other);
+
+void ChainedOriginDepotLockAll();
+void ChainedOriginDepotUnlockAll();
+
+} // namespace __msan
+
+#endif // MSAN_CHAINED_ORIGIN_DEPOT_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_chained_origin_depot.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_flags.h (revision 351984)
@@ -0,0 +1,29 @@
+//===-- msan_flags.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_FLAGS_H
+#define MSAN_FLAGS_H
+
+namespace __msan {
+
+struct Flags {
+#define MSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "msan_flags.inc"
+#undef MSAN_FLAG
+
+ void SetDefaults();
+};
+
+Flags *flags();
+
+} // namespace __msan
+
+#endif // MSAN_FLAGS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_flags.inc (revision 351984)
@@ -0,0 +1,34 @@
+//===-- msan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// MSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_FLAG
+# error "Define MSAN_FLAG prior to including this file!"
+#endif
+
+// MSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+MSAN_FLAG(int, exit_code, -1,
+ "DEPRECATED. Use exitcode from common flags instead.")
+MSAN_FLAG(int, origin_history_size, Origin::kMaxDepth, "")
+MSAN_FLAG(int, origin_history_per_stack_limit, 20000, "")
+MSAN_FLAG(bool, poison_heap_with_zeroes, false, "")
+MSAN_FLAG(bool, poison_stack_with_zeroes, false, "")
+MSAN_FLAG(bool, poison_in_malloc, true, "")
+MSAN_FLAG(bool, poison_in_free, true, "")
+MSAN_FLAG(bool, poison_in_dtor, false, "")
+MSAN_FLAG(bool, report_umrs, true, "")
+MSAN_FLAG(bool, wrap_signals, true, "")
+MSAN_FLAG(bool, print_stats, false, "")
+MSAN_FLAG(bool, halt_on_error, !&__msan_keep_going, "")
+MSAN_FLAG(bool, atexit, false, "")
+MSAN_FLAG(int, store_context_size, 20,
+ "Like malloc_context_size, but for uninit stores.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_interceptors.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_interceptors.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_interceptors.cc (revision 351984)
@@ -0,0 +1,1715 @@
+//===-- msan_interceptors.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Interceptors for standard library functions.
+//
+// FIXME: move as many interceptors as possible into
+// sanitizer_common/sanitizer_common_interceptors.h
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "msan.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
+#include "msan_report.h"
+#include "msan_thread.h"
+#include "msan_poisoning.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "sanitizer_common/sanitizer_vector.h"
+
+#if SANITIZER_NETBSD
+#define fstat __fstat50
+#define gettimeofday __gettimeofday50
+#define getrusage __getrusage50
+#define tzset __tzset50
+#endif
+
+#include <stdarg.h>
+// ACHTUNG! No other system header includes in this file.
+// Ideally, we should get rid of stdarg.h as well.
+
+using namespace __msan;
+
+using __sanitizer::memory_order;
+using __sanitizer::atomic_load;
+using __sanitizer::atomic_store;
+using __sanitizer::atomic_uintptr_t;
+
+DECLARE_REAL(SIZE_T, strlen, const char *s)
+DECLARE_REAL(SIZE_T, strnlen, const char *s, SIZE_T maxlen)
+DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
+DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
+
+// True if this is a nested interceptor.
+static THREADLOCAL int in_interceptor_scope;
+
+void __msan_scoped_disable_interceptor_checks() { ++in_interceptor_scope; }
+void __msan_scoped_enable_interceptor_checks() { --in_interceptor_scope; }
+
+struct InterceptorScope {
+ InterceptorScope() { ++in_interceptor_scope; }
+ ~InterceptorScope() { --in_interceptor_scope; }
+};
+
+bool IsInInterceptorScope() {
+ return in_interceptor_scope;
+}
+
+static uptr allocated_for_dlsym;
+static const uptr kDlsymAllocPoolSize = 1024;
+static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
+
+static bool IsInDlsymAllocPool(const void *ptr) {
+ uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ return off < sizeof(alloc_memory_for_dlsym);
+}
+
+static void *AllocateFromLocalPool(uptr size_in_bytes) {
+ uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
+ void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ allocated_for_dlsym += size_in_words;
+ CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
+ return mem;
+}
+
+#define ENSURE_MSAN_INITED() do { \
+ CHECK(!msan_init_is_running); \
+ if (!msan_inited) { \
+ __msan_init(); \
+ } \
+} while (0)
+
+// Check that [x, x+n) range is unpoisoned.
+#define CHECK_UNPOISONED_0(x, n) \
+ do { \
+ sptr __offset = __msan_test_shadow(x, n); \
+ if (__msan::IsInSymbolizer()) break; \
+ if (__offset >= 0 && __msan::flags()->report_umrs) { \
+ GET_CALLER_PC_BP_SP; \
+ (void)sp; \
+ ReportUMRInsideAddressRange(__func__, x, n, __offset); \
+ __msan::PrintWarningWithOrigin( \
+ pc, bp, __msan_get_origin((const char *)x + __offset)); \
+ if (__msan::flags()->halt_on_error) { \
+ Printf("Exiting\n"); \
+ Die(); \
+ } \
+ } \
+ } while (0)
+
+// Check that [x, x+n) range is unpoisoned unless we are in a nested
+// interceptor.
+#define CHECK_UNPOISONED(x, n) \
+ do { \
+ if (!IsInInterceptorScope()) CHECK_UNPOISONED_0(x, n); \
+ } while (0)
+
+#define CHECK_UNPOISONED_STRING_OF_LEN(x, len, n) \
+ CHECK_UNPOISONED((x), \
+ common_flags()->strict_string_checks ? (len) + 1 : (n) )
+
+#define CHECK_UNPOISONED_STRING(x, n) \
+ CHECK_UNPOISONED_STRING_OF_LEN((x), internal_strlen(x), (n))
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(SIZE_T, fread_unlocked, void *ptr, SIZE_T size, SIZE_T nmemb,
+ void *file) {
+ ENSURE_MSAN_INITED();
+ SIZE_T res = REAL(fread_unlocked)(ptr, size, nmemb, file);
+ if (res > 0)
+ __msan_unpoison(ptr, res *size);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED INTERCEPT_FUNCTION(fread_unlocked)
+#else
+#define MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED
+#endif
+
+#if !SANITIZER_NETBSD
+INTERCEPTOR(void *, mempcpy, void *dest, const void *src, SIZE_T n) {
+ return (char *)__msan_memcpy(dest, src, n) + n;
+}
+#define MSAN_MAYBE_INTERCEPT_MEMPCPY INTERCEPT_FUNCTION(mempcpy)
+#else
+#define MSAN_MAYBE_INTERCEPT_MEMPCPY
+#endif
+
+INTERCEPTOR(void *, memccpy, void *dest, const void *src, int c, SIZE_T n) {
+ ENSURE_MSAN_INITED();
+ void *res = REAL(memccpy)(dest, src, c, n);
+ CHECK(!res || (res >= dest && res <= (char *)dest + n));
+ SIZE_T sz = res ? (char *)res - (char *)dest : n;
+ CHECK_UNPOISONED(src, sz);
+ __msan_unpoison(dest, sz);
+ return res;
+}
+
+INTERCEPTOR(void *, bcopy, const void *src, void *dest, SIZE_T n) {
+ return __msan_memmove(dest, src, n);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ CHECK_NE(memptr, 0);
+ int res = msan_posix_memalign(memptr, alignment, size, &stack);
+ if (!res)
+ __msan_unpoison(memptr, sizeof(*memptr));
+ return res;
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(void *, memalign, SIZE_T alignment, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ return msan_memalign(alignment, size, &stack);
+}
+#define MSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
+#else
+#define MSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
+
+INTERCEPTOR(void *, aligned_alloc, SIZE_T alignment, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ return msan_aligned_alloc(alignment, size, &stack);
+}
+
+#if !SANITIZER_NETBSD
+INTERCEPTOR(void *, __libc_memalign, SIZE_T alignment, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ void *ptr = msan_memalign(alignment, size, &stack);
+ if (ptr)
+ DTLS_on_libc_memalign(ptr, size);
+ return ptr;
+}
+#define MSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
+#else
+#define MSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
+#endif
+
+INTERCEPTOR(void *, valloc, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ return msan_valloc(size, &stack);
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(void *, pvalloc, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ return msan_pvalloc(size, &stack);
+}
+#define MSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
+#else
+#define MSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
+
+INTERCEPTOR(void, free, void *ptr) {
+ GET_MALLOC_STACK_TRACE;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
+ MsanDeallocate(&stack, ptr);
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(void, cfree, void *ptr) {
+ GET_MALLOC_STACK_TRACE;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
+ MsanDeallocate(&stack, ptr);
+}
+#define MSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+#else
+#define MSAN_MAYBE_INTERCEPT_CFREE
+#endif
+
+#if !SANITIZER_NETBSD
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+ return __sanitizer_get_allocated_size(ptr);
+}
+#define MSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
+ INTERCEPT_FUNCTION(malloc_usable_size)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
+#endif
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+// This function actually returns a struct by value, but we can't unpoison a
+// temporary! The following is equivalent on all supported platforms but
+// aarch64 (which uses a different register for sret value). We have a test
+// to confirm that.
+INTERCEPTOR(void, mallinfo, __sanitizer_struct_mallinfo *sret) {
+#ifdef __aarch64__
+ uptr r8;
+ asm volatile("mov %0,x8" : "=r" (r8));
+ sret = reinterpret_cast<__sanitizer_struct_mallinfo*>(r8);
+#endif
+ REAL(memset)(sret, 0, sizeof(*sret));
+ __msan_unpoison(sret, sizeof(*sret));
+}
+#define MSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLINFO
+#endif
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(int, mallopt, int cmd, int value) {
+ return 0;
+}
+#define MSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLOPT
+#endif
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(void, malloc_stats, void) {
+ // FIXME: implement, but don't call REAL(malloc_stats)!
+}
+#define MSAN_MAYBE_INTERCEPT_MALLOC_STATS INTERCEPT_FUNCTION(malloc_stats)
+#else
+#define MSAN_MAYBE_INTERCEPT_MALLOC_STATS
+#endif
+
+INTERCEPTOR(char *, strcpy, char *dest, const char *src) { // NOLINT
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ SIZE_T n = REAL(strlen)(src);
+ CHECK_UNPOISONED_STRING(src + n, 0);
+ char *res = REAL(strcpy)(dest, src); // NOLINT
+ CopyShadowAndOrigin(dest, src, n + 1, &stack);
+ return res;
+}
+
+INTERCEPTOR(char *, strncpy, char *dest, const char *src, SIZE_T n) { // NOLINT
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ SIZE_T copy_size = REAL(strnlen)(src, n);
+ if (copy_size < n)
+ copy_size++; // trailing \0
+ char *res = REAL(strncpy)(dest, src, n); // NOLINT
+ CopyShadowAndOrigin(dest, src, copy_size, &stack);
+ __msan_unpoison(dest + copy_size, n - copy_size);
+ return res;
+}
+
+#if !SANITIZER_NETBSD
+INTERCEPTOR(char *, stpcpy, char *dest, const char *src) { // NOLINT
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ SIZE_T n = REAL(strlen)(src);
+ CHECK_UNPOISONED_STRING(src + n, 0);
+ char *res = REAL(stpcpy)(dest, src); // NOLINT
+ CopyShadowAndOrigin(dest, src, n + 1, &stack);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_STPCPY INTERCEPT_FUNCTION(stpcpy)
+#else
+#define MSAN_MAYBE_INTERCEPT_STPCPY
+#endif
+
+INTERCEPTOR(char *, strdup, char *src) {
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ // On FreeBSD strdup() leverages strlen().
+ InterceptorScope interceptor_scope;
+ SIZE_T n = REAL(strlen)(src);
+ CHECK_UNPOISONED_STRING(src + n, 0);
+ char *res = REAL(strdup)(src);
+ CopyShadowAndOrigin(res, src, n + 1, &stack);
+ return res;
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(char *, __strdup, char *src) {
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ SIZE_T n = REAL(strlen)(src);
+ CHECK_UNPOISONED_STRING(src + n, 0);
+ char *res = REAL(__strdup)(src);
+ CopyShadowAndOrigin(res, src, n + 1, &stack);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT___STRDUP INTERCEPT_FUNCTION(__strdup)
+#else
+#define MSAN_MAYBE_INTERCEPT___STRDUP
+#endif
+
+#if !SANITIZER_NETBSD
+INTERCEPTOR(char *, gcvt, double number, SIZE_T ndigit, char *buf) {
+ ENSURE_MSAN_INITED();
+ char *res = REAL(gcvt)(number, ndigit, buf);
+ SIZE_T n = REAL(strlen)(buf);
+ __msan_unpoison(buf, n + 1);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_GCVT INTERCEPT_FUNCTION(gcvt)
+#else
+#define MSAN_MAYBE_INTERCEPT_GCVT
+#endif
+
+INTERCEPTOR(char *, strcat, char *dest, const char *src) { // NOLINT
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ SIZE_T src_size = REAL(strlen)(src);
+ SIZE_T dest_size = REAL(strlen)(dest);
+ CHECK_UNPOISONED_STRING(src + src_size, 0);
+ CHECK_UNPOISONED_STRING(dest + dest_size, 0);
+ char *res = REAL(strcat)(dest, src); // NOLINT
+ CopyShadowAndOrigin(dest + dest_size, src, src_size + 1, &stack);
+ return res;
+}
+
+INTERCEPTOR(char *, strncat, char *dest, const char *src, SIZE_T n) { // NOLINT
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ SIZE_T dest_size = REAL(strlen)(dest);
+ SIZE_T copy_size = REAL(strnlen)(src, n);
+ CHECK_UNPOISONED_STRING(dest + dest_size, 0);
+ char *res = REAL(strncat)(dest, src, n); // NOLINT
+ CopyShadowAndOrigin(dest + dest_size, src, copy_size, &stack);
+ __msan_unpoison(dest + dest_size + copy_size, 1); // \0
+ return res;
+}
+
+// Hack: always pass nptr and endptr as part of __VA_ARGS_ to avoid having to
+// deal with empty __VA_ARGS__ in the case of INTERCEPTOR_STRTO.
+#define INTERCEPTOR_STRTO_BODY(ret_type, func, ...) \
+ ENSURE_MSAN_INITED(); \
+ ret_type res = REAL(func)(__VA_ARGS__); \
+ __msan_unpoison(endptr, sizeof(*endptr)); \
+ return res;
+
+#define INTERCEPTOR_STRTO(ret_type, func, char_type) \
+ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr) { \
+ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr); \
+ }
+
+#define INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
+ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
+ int base) { \
+ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base); \
+ }
+
+#define INTERCEPTOR_STRTO_LOC(ret_type, func, char_type) \
+ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
+ void *loc) { \
+ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, loc); \
+ }
+
+#define INTERCEPTOR_STRTO_BASE_LOC(ret_type, func, char_type) \
+ INTERCEPTOR(ret_type, func, const char_type *nptr, char_type **endptr, \
+ int base, void *loc) { \
+ INTERCEPTOR_STRTO_BODY(ret_type, func, nptr, endptr, base, loc); \
+ }
+
+#if SANITIZER_NETBSD
+#define INTERCEPTORS_STRTO(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_LOC(ret_type, func##_l, char_type)
+
+#define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type)
+
+#else
+#define INTERCEPTORS_STRTO(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_LOC(ret_type, func##_l, char_type) \
+ INTERCEPTOR_STRTO_LOC(ret_type, __##func##_l, char_type) \
+ INTERCEPTOR_STRTO_LOC(ret_type, __##func##_internal, char_type)
+
+#define INTERCEPTORS_STRTO_BASE(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_BASE(ret_type, func, char_type) \
+ INTERCEPTOR_STRTO_BASE_LOC(ret_type, func##_l, char_type) \
+ INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_l, char_type) \
+ INTERCEPTOR_STRTO_BASE_LOC(ret_type, __##func##_internal, char_type)
+#endif
+
+INTERCEPTORS_STRTO(double, strtod, char) // NOLINT
+INTERCEPTORS_STRTO(float, strtof, char) // NOLINT
+INTERCEPTORS_STRTO(long double, strtold, char) // NOLINT
+INTERCEPTORS_STRTO_BASE(long, strtol, char) // NOLINT
+INTERCEPTORS_STRTO_BASE(long long, strtoll, char) // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long, strtoul, char) // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long long, strtoull, char) // NOLINT
+INTERCEPTORS_STRTO_BASE(u64, strtouq, char) // NOLINT
+
+INTERCEPTORS_STRTO(double, wcstod, wchar_t) // NOLINT
+INTERCEPTORS_STRTO(float, wcstof, wchar_t) // NOLINT
+INTERCEPTORS_STRTO(long double, wcstold, wchar_t) // NOLINT
+INTERCEPTORS_STRTO_BASE(long, wcstol, wchar_t) // NOLINT
+INTERCEPTORS_STRTO_BASE(long long, wcstoll, wchar_t) // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long, wcstoul, wchar_t) // NOLINT
+INTERCEPTORS_STRTO_BASE(unsigned long long, wcstoull, wchar_t) // NOLINT
+
+#if SANITIZER_NETBSD
+#define INTERCEPT_STRTO(func) \
+ INTERCEPT_FUNCTION(func); \
+ INTERCEPT_FUNCTION(func##_l);
+#else
+#define INTERCEPT_STRTO(func) \
+ INTERCEPT_FUNCTION(func); \
+ INTERCEPT_FUNCTION(func##_l); \
+ INTERCEPT_FUNCTION(__##func##_l); \
+ INTERCEPT_FUNCTION(__##func##_internal);
+#endif
+
+
+// FIXME: support *wprintf in common format interceptors.
+INTERCEPTOR(int, vswprintf, void *str, uptr size, void *format, va_list ap) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(vswprintf)(str, size, format, ap);
+ if (res >= 0) {
+ __msan_unpoison(str, 4 * (res + 1));
+ }
+ return res;
+}
+
+INTERCEPTOR(int, swprintf, void *str, uptr size, void *format, ...) {
+ ENSURE_MSAN_INITED();
+ va_list ap;
+ va_start(ap, format);
+ int res = vswprintf(str, size, format, ap);
+ va_end(ap);
+ return res;
+}
+
+#define INTERCEPTOR_STRFTIME_BODY(char_type, ret_type, func, s, ...) \
+ ENSURE_MSAN_INITED(); \
+ InterceptorScope interceptor_scope; \
+ ret_type res = REAL(func)(s, __VA_ARGS__); \
+ if (s) __msan_unpoison(s, sizeof(char_type) * (res + 1)); \
+ return res;
+
+INTERCEPTOR(SIZE_T, strftime, char *s, SIZE_T max, const char *format,
+ __sanitizer_tm *tm) {
+ INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, strftime, s, max, format, tm);
+}
+
+INTERCEPTOR(SIZE_T, strftime_l, char *s, SIZE_T max, const char *format,
+ __sanitizer_tm *tm, void *loc) {
+ INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, strftime_l, s, max, format, tm, loc);
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(SIZE_T, __strftime_l, char *s, SIZE_T max, const char *format,
+ __sanitizer_tm *tm, void *loc) {
+ INTERCEPTOR_STRFTIME_BODY(char, SIZE_T, __strftime_l, s, max, format, tm,
+ loc);
+}
+#define MSAN_MAYBE_INTERCEPT___STRFTIME_L INTERCEPT_FUNCTION(__strftime_l)
+#else
+#define MSAN_MAYBE_INTERCEPT___STRFTIME_L
+#endif
+
+INTERCEPTOR(SIZE_T, wcsftime, wchar_t *s, SIZE_T max, const wchar_t *format,
+ __sanitizer_tm *tm) {
+ INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, wcsftime, s, max, format, tm);
+}
+
+INTERCEPTOR(SIZE_T, wcsftime_l, wchar_t *s, SIZE_T max, const wchar_t *format,
+ __sanitizer_tm *tm, void *loc) {
+ INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, wcsftime_l, s, max, format, tm,
+ loc);
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(SIZE_T, __wcsftime_l, wchar_t *s, SIZE_T max, const wchar_t *format,
+ __sanitizer_tm *tm, void *loc) {
+ INTERCEPTOR_STRFTIME_BODY(wchar_t, SIZE_T, __wcsftime_l, s, max, format, tm,
+ loc);
+}
+#define MSAN_MAYBE_INTERCEPT___WCSFTIME_L INTERCEPT_FUNCTION(__wcsftime_l)
+#else
+#define MSAN_MAYBE_INTERCEPT___WCSFTIME_L
+#endif
+
+INTERCEPTOR(int, mbtowc, wchar_t *dest, const char *src, SIZE_T n) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(mbtowc)(dest, src, n);
+ if (res != -1 && dest) __msan_unpoison(dest, sizeof(wchar_t));
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, mbrtowc, wchar_t *dest, const char *src, SIZE_T n,
+ void *ps) {
+ ENSURE_MSAN_INITED();
+ SIZE_T res = REAL(mbrtowc)(dest, src, n, ps);
+ if (res != (SIZE_T)-1 && dest) __msan_unpoison(dest, sizeof(wchar_t));
+ return res;
+}
+
+// wchar_t *wmemcpy(wchar_t *dest, const wchar_t *src, SIZE_T n);
+INTERCEPTOR(wchar_t *, wmemcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) {
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ wchar_t *res = REAL(wmemcpy)(dest, src, n);
+ CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
+ return res;
+}
+
+#if !SANITIZER_NETBSD
+INTERCEPTOR(wchar_t *, wmempcpy, wchar_t *dest, const wchar_t *src, SIZE_T n) {
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ wchar_t *res = REAL(wmempcpy)(dest, src, n);
+ CopyShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_WMEMPCPY INTERCEPT_FUNCTION(wmempcpy)
+#else
+#define MSAN_MAYBE_INTERCEPT_WMEMPCPY
+#endif
+
+INTERCEPTOR(wchar_t *, wmemset, wchar_t *s, wchar_t c, SIZE_T n) {
+ CHECK(MEM_IS_APP(s));
+ ENSURE_MSAN_INITED();
+ wchar_t *res = REAL(wmemset)(s, c, n);
+ __msan_unpoison(s, n * sizeof(wchar_t));
+ return res;
+}
+
+INTERCEPTOR(wchar_t *, wmemmove, wchar_t *dest, const wchar_t *src, SIZE_T n) {
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ wchar_t *res = REAL(wmemmove)(dest, src, n);
+ MoveShadowAndOrigin(dest, src, n * sizeof(wchar_t), &stack);
+ return res;
+}
+
+INTERCEPTOR(int, wcscmp, const wchar_t *s1, const wchar_t *s2) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(wcscmp)(s1, s2);
+ return res;
+}
+
+INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(gettimeofday)(tv, tz);
+ if (tv)
+ __msan_unpoison(tv, 16);
+ if (tz)
+ __msan_unpoison(tz, 8);
+ return res;
+}
+
+#if !SANITIZER_NETBSD
+INTERCEPTOR(char *, fcvt, double x, int a, int *b, int *c) {
+ ENSURE_MSAN_INITED();
+ char *res = REAL(fcvt)(x, a, b, c);
+ __msan_unpoison(b, sizeof(*b));
+ __msan_unpoison(c, sizeof(*c));
+ if (res) __msan_unpoison(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_FCVT INTERCEPT_FUNCTION(fcvt)
+#else
+#define MSAN_MAYBE_INTERCEPT_FCVT
+#endif
+
+INTERCEPTOR(char *, getenv, char *name) {
+ if (msan_init_is_running)
+ return REAL(getenv)(name);
+ ENSURE_MSAN_INITED();
+ char *res = REAL(getenv)(name);
+ if (res) __msan_unpoison(res, REAL(strlen)(res) + 1);
+ return res;
+}
+
+extern char **environ;
+
+static void UnpoisonEnviron() {
+ char **envp = environ;
+ for (; *envp; ++envp) {
+ __msan_unpoison(envp, sizeof(*envp));
+ __msan_unpoison(*envp, REAL(strlen)(*envp) + 1);
+ }
+ // Trailing NULL pointer.
+ __msan_unpoison(envp, sizeof(*envp));
+}
+
+INTERCEPTOR(int, setenv, const char *name, const char *value, int overwrite) {
+ ENSURE_MSAN_INITED();
+ CHECK_UNPOISONED_STRING(name, 0);
+ int res = REAL(setenv)(name, value, overwrite);
+ if (!res) UnpoisonEnviron();
+ return res;
+}
+
+INTERCEPTOR(int, putenv, char *string) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(putenv)(string);
+ if (!res) UnpoisonEnviron();
+ return res;
+}
+
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+INTERCEPTOR(int, fstat, int fd, void *buf) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(fstat)(fd, buf);
+ if (!res)
+ __msan_unpoison(buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_FSTAT INTERCEPT_FUNCTION(fstat)
+#else
+#define MSAN_MAYBE_INTERCEPT_FSTAT
+#endif
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(int, __fxstat, int magic, int fd, void *buf) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(__fxstat)(magic, fd, buf);
+ if (!res)
+ __msan_unpoison(buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT___FXSTAT INTERCEPT_FUNCTION(__fxstat)
+#else
+#define MSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(int, __fxstat64, int magic, int fd, void *buf) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(__fxstat64)(magic, fd, buf);
+ if (!res)
+ __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT___FXSTAT64 INTERCEPT_FUNCTION(__fxstat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+INTERCEPTOR(int, fstatat, int fd, char *pathname, void *buf, int flags) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(fstatat)(fd, pathname, buf, flags);
+ if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(fstatat)
+#else
+INTERCEPTOR(int, __fxstatat, int magic, int fd, char *pathname, void *buf,
+ int flags) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(__fxstatat)(magic, fd, pathname, buf, flags);
+ if (!res) __msan_unpoison(buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+# define MSAN_INTERCEPT_FSTATAT INTERCEPT_FUNCTION(__fxstatat)
+#endif
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(int, __fxstatat64, int magic, int fd, char *pathname, void *buf,
+ int flags) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(__fxstatat64)(magic, fd, pathname, buf, flags);
+ if (!res) __msan_unpoison(buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT___FXSTATAT64 INTERCEPT_FUNCTION(__fxstatat64)
+#else
+#define MSAN_MAYBE_INTERCEPT___FXSTATAT64
+#endif
+
+INTERCEPTOR(int, pipe, int pipefd[2]) {
+ if (msan_init_is_running)
+ return REAL(pipe)(pipefd);
+ ENSURE_MSAN_INITED();
+ int res = REAL(pipe)(pipefd);
+ if (!res)
+ __msan_unpoison(pipefd, sizeof(int[2]));
+ return res;
+}
+
+INTERCEPTOR(int, pipe2, int pipefd[2], int flags) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(pipe2)(pipefd, flags);
+ if (!res)
+ __msan_unpoison(pipefd, sizeof(int[2]));
+ return res;
+}
+
+INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int sv[2]) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(socketpair)(domain, type, protocol, sv);
+ if (!res)
+ __msan_unpoison(sv, sizeof(int[2]));
+ return res;
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(char *, fgets_unlocked, char *s, int size, void *stream) {
+ ENSURE_MSAN_INITED();
+ char *res = REAL(fgets_unlocked)(s, size, stream);
+ if (res)
+ __msan_unpoison(s, REAL(strlen)(s) + 1);
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED INTERCEPT_FUNCTION(fgets_unlocked)
+#else
+#define MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED
+#endif
+
+INTERCEPTOR(int, getrlimit, int resource, void *rlim) {
+ if (msan_init_is_running)
+ return REAL(getrlimit)(resource, rlim);
+ ENSURE_MSAN_INITED();
+ int res = REAL(getrlimit)(resource, rlim);
+ if (!res)
+ __msan_unpoison(rlim, __sanitizer::struct_rlimit_sz);
+ return res;
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(int, getrlimit64, int resource, void *rlim) {
+ if (msan_init_is_running) return REAL(getrlimit64)(resource, rlim);
+ ENSURE_MSAN_INITED();
+ int res = REAL(getrlimit64)(resource, rlim);
+ if (!res) __msan_unpoison(rlim, __sanitizer::struct_rlimit64_sz);
+ return res;
+}
+
+INTERCEPTOR(int, prlimit, int pid, int resource, void *new_rlimit,
+ void *old_rlimit) {
+ if (msan_init_is_running)
+ return REAL(prlimit)(pid, resource, new_rlimit, old_rlimit);
+ ENSURE_MSAN_INITED();
+ CHECK_UNPOISONED(new_rlimit, __sanitizer::struct_rlimit_sz);
+ int res = REAL(prlimit)(pid, resource, new_rlimit, old_rlimit);
+ if (!res) __msan_unpoison(old_rlimit, __sanitizer::struct_rlimit_sz);
+ return res;
+}
+
+INTERCEPTOR(int, prlimit64, int pid, int resource, void *new_rlimit,
+ void *old_rlimit) {
+ if (msan_init_is_running)
+ return REAL(prlimit64)(pid, resource, new_rlimit, old_rlimit);
+ ENSURE_MSAN_INITED();
+ CHECK_UNPOISONED(new_rlimit, __sanitizer::struct_rlimit64_sz);
+ int res = REAL(prlimit64)(pid, resource, new_rlimit, old_rlimit);
+ if (!res) __msan_unpoison(old_rlimit, __sanitizer::struct_rlimit64_sz);
+ return res;
+}
+
+#define MSAN_MAYBE_INTERCEPT_GETRLIMIT64 INTERCEPT_FUNCTION(getrlimit64)
+#define MSAN_MAYBE_INTERCEPT_PRLIMIT INTERCEPT_FUNCTION(prlimit)
+#define MSAN_MAYBE_INTERCEPT_PRLIMIT64 INTERCEPT_FUNCTION(prlimit64)
+#else
+#define MSAN_MAYBE_INTERCEPT_GETRLIMIT64
+#define MSAN_MAYBE_INTERCEPT_PRLIMIT
+#define MSAN_MAYBE_INTERCEPT_PRLIMIT64
+#endif
+
+#if SANITIZER_FREEBSD
+// FreeBSD's <sys/utsname.h> define uname() as
+// static __inline int uname(struct utsname *name) {
+// return __xuname(SYS_NMLN, (void*)name);
+// }
+INTERCEPTOR(int, __xuname, int size, void *utsname) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(__xuname)(size, utsname);
+ if (!res)
+ __msan_unpoison(utsname, __sanitizer::struct_utsname_sz);
+ return res;
+}
+#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(__xuname)
+#else
+INTERCEPTOR(int, uname, struct utsname *utsname) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(uname)(utsname);
+ if (!res)
+ __msan_unpoison(utsname, __sanitizer::struct_utsname_sz);
+ return res;
+}
+#define MSAN_INTERCEPT_UNAME INTERCEPT_FUNCTION(uname)
+#endif
+
+INTERCEPTOR(int, gethostname, char *name, SIZE_T len) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(gethostname)(name, len);
+ if (!res) {
+ SIZE_T real_len = REAL(strnlen)(name, len);
+ if (real_len < len)
+ ++real_len;
+ __msan_unpoison(name, real_len);
+ }
+ return res;
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(int, epoll_wait, int epfd, void *events, int maxevents,
+ int timeout) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(epoll_wait)(epfd, events, maxevents, timeout);
+ if (res > 0) {
+ __msan_unpoison(events, __sanitizer::struct_epoll_event_sz * res);
+ }
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT INTERCEPT_FUNCTION(epoll_wait)
+#else
+#define MSAN_MAYBE_INTERCEPT_EPOLL_WAIT
+#endif
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR(int, epoll_pwait, int epfd, void *events, int maxevents,
+ int timeout, void *sigmask) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(epoll_pwait)(epfd, events, maxevents, timeout, sigmask);
+ if (res > 0) {
+ __msan_unpoison(events, __sanitizer::struct_epoll_event_sz * res);
+ }
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT INTERCEPT_FUNCTION(epoll_pwait)
+#else
+#define MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT
+#endif
+
+INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(!msan_inited))
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(nmemb * size);
+ return msan_calloc(nmemb, size, &stack);
+}
+
+INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
+ uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
+ void *new_ptr;
+ if (UNLIKELY(!msan_inited)) {
+ new_ptr = AllocateFromLocalPool(copy_size);
+ } else {
+ copy_size = size;
+ new_ptr = msan_malloc(copy_size, &stack);
+ }
+ internal_memcpy(new_ptr, ptr, copy_size);
+ return new_ptr;
+ }
+ return msan_realloc(ptr, size, &stack);
+}
+
+INTERCEPTOR(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ return msan_reallocarray(ptr, nmemb, size, &stack);
+}
+
+INTERCEPTOR(void *, malloc, SIZE_T size) {
+ GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(!msan_inited))
+ // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(size);
+ return msan_malloc(size, &stack);
+}
+
+void __msan_allocated_memory(const void *data, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ if (flags()->poison_in_malloc) {
+ stack.tag = STACK_TRACE_TAG_POISON;
+ PoisonMemory(data, size, &stack);
+ }
+}
+
+void __msan_copy_shadow(void *dest, const void *src, uptr n) {
+ GET_STORE_STACK_TRACE;
+ MoveShadowAndOrigin(dest, src, n, &stack);
+}
+
+void __sanitizer_dtor_callback(const void *data, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ if (flags()->poison_in_dtor) {
+ stack.tag = STACK_TRACE_TAG_POISON;
+ PoisonMemory(data, size, &stack);
+ }
+}
+
+template <class Mmap>
+static void *mmap_interceptor(Mmap real_mmap, void *addr, SIZE_T length,
+ int prot, int flags, int fd, OFF64_T offset) {
+ if (addr && !MEM_IS_APP(addr)) {
+ if (flags & map_fixed) {
+ errno = errno_EINVAL;
+ return (void *)-1;
+ } else {
+ addr = nullptr;
+ }
+ }
+ void *res = real_mmap(addr, length, prot, flags, fd, offset);
+ if (res != (void *)-1) __msan_unpoison(res, RoundUpTo(length, GetPageSize()));
+ return res;
+}
+
+INTERCEPTOR(int, getrusage, int who, void *usage) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(getrusage)(who, usage);
+ if (res == 0) {
+ __msan_unpoison(usage, __sanitizer::struct_rusage_sz);
+ }
+ return res;
+}
+
+class SignalHandlerScope {
+ public:
+ SignalHandlerScope() {
+ if (MsanThread *t = GetCurrentThread())
+ t->EnterSignalHandler();
+ }
+ ~SignalHandlerScope() {
+ if (MsanThread *t = GetCurrentThread())
+ t->LeaveSignalHandler();
+ }
+};
+
+// sigactions_mu guarantees atomicity of sigaction() and signal() calls.
+// Access to sigactions[] is gone with relaxed atomics to avoid data race with
+// the signal handler.
+const int kMaxSignals = 1024;
+static atomic_uintptr_t sigactions[kMaxSignals];
+static StaticSpinMutex sigactions_mu;
+
+static void SignalHandler(int signo) {
+ SignalHandlerScope signal_handler_scope;
+ ScopedThreadLocalStateBackup stlsb;
+ UnpoisonParam(1);
+
+ typedef void (*signal_cb)(int x);
+ signal_cb cb =
+ (signal_cb)atomic_load(&sigactions[signo], memory_order_relaxed);
+ cb(signo);
+}
+
+static void SignalAction(int signo, void *si, void *uc) {
+ SignalHandlerScope signal_handler_scope;
+ ScopedThreadLocalStateBackup stlsb;
+ UnpoisonParam(3);
+ __msan_unpoison(si, sizeof(__sanitizer_sigaction));
+ __msan_unpoison(uc, __sanitizer::ucontext_t_sz);
+
+ typedef void (*sigaction_cb)(int, void *, void *);
+ sigaction_cb cb =
+ (sigaction_cb)atomic_load(&sigactions[signo], memory_order_relaxed);
+ cb(signo, si, uc);
+}
+
+static void read_sigaction(const __sanitizer_sigaction *act) {
+ CHECK_UNPOISONED(&act->sa_flags, sizeof(act->sa_flags));
+ if (act->sa_flags & __sanitizer::sa_siginfo)
+ CHECK_UNPOISONED(&act->sigaction, sizeof(act->sigaction));
+ else
+ CHECK_UNPOISONED(&act->handler, sizeof(act->handler));
+ CHECK_UNPOISONED(&act->sa_mask, sizeof(act->sa_mask));
+}
+
+extern "C" int pthread_attr_init(void *attr);
+extern "C" int pthread_attr_destroy(void *attr);
+
+static void *MsanThreadStartFunc(void *arg) {
+ MsanThread *t = (MsanThread *)arg;
+ SetCurrentThread(t);
+ return t->ThreadStart();
+}
+
+INTERCEPTOR(int, pthread_create, void *th, void *attr, void *(*callback)(void*),
+ void * param) {
+ ENSURE_MSAN_INITED(); // for GetTlsSize()
+ __sanitizer_pthread_attr_t myattr;
+ if (!attr) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+
+ AdjustStackSize(attr);
+
+ MsanThread *t = MsanThread::Create(callback, param);
+
+ int res = REAL(pthread_create)(th, attr, MsanThreadStartFunc, t);
+
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ if (!res) {
+ __msan_unpoison(th, __sanitizer::pthread_t_sz);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, pthread_key_create, __sanitizer_pthread_key_t *key,
+ void (*dtor)(void *value)) {
+ if (msan_init_is_running) return REAL(pthread_key_create)(key, dtor);
+ ENSURE_MSAN_INITED();
+ int res = REAL(pthread_key_create)(key, dtor);
+ if (!res && key)
+ __msan_unpoison(key, sizeof(*key));
+ return res;
+}
+
+#if SANITIZER_NETBSD
+INTERCEPTOR(void, __libc_thr_keycreate, void *m, void (*dtor)(void *value)) \
+ ALIAS(WRAPPER_NAME(pthread_key_create));
+#endif
+
+INTERCEPTOR(int, pthread_join, void *th, void **retval) {
+ ENSURE_MSAN_INITED();
+ int res = REAL(pthread_join)(th, retval);
+ if (!res && retval)
+ __msan_unpoison(retval, sizeof(*retval));
+ return res;
+}
+
+extern char *tzname[2];
+
+INTERCEPTOR(void, tzset, int fake) {
+ ENSURE_MSAN_INITED();
+ InterceptorScope interceptor_scope;
+ REAL(tzset)(fake);
+ if (tzname[0])
+ __msan_unpoison(tzname[0], REAL(strlen)(tzname[0]) + 1);
+ if (tzname[1])
+ __msan_unpoison(tzname[1], REAL(strlen)(tzname[1]) + 1);
+ return;
+}
+
+struct MSanAtExitRecord {
+ void (*func)(void *arg);
+ void *arg;
+};
+
+struct InterceptorContext {
+ BlockingMutex atexit_mu;
+ Vector<struct MSanAtExitRecord *> AtExitStack;
+
+ InterceptorContext()
+ : AtExitStack() {
+ }
+};
+
+static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
+InterceptorContext *interceptor_ctx() {
+ return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
+}
+
+void MSanAtExitWrapper() {
+ MSanAtExitRecord *r;
+ {
+ BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+
+ uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
+ r = interceptor_ctx()->AtExitStack[element];
+ interceptor_ctx()->AtExitStack.PopBack();
+ }
+
+ UnpoisonParam(1);
+ ((void(*)())r->func)();
+ InternalFree(r);
+}
+
+void MSanCxaAtExitWrapper(void *arg) {
+ UnpoisonParam(1);
+ MSanAtExitRecord *r = (MSanAtExitRecord *)arg;
+ // libc before 2.27 had race which caused occasional double handler execution
+ // https://sourceware.org/ml/libc-alpha/2017-08/msg01204.html
+ if (!r->func)
+ return;
+ r->func(r->arg);
+ r->func = nullptr;
+}
+
+static int setup_at_exit_wrapper(void(*f)(), void *arg, void *dso);
+
+// Unpoison argument shadow for C++ module destructors.
+INTERCEPTOR(int, __cxa_atexit, void (*func)(void *), void *arg,
+ void *dso_handle) {
+ if (msan_init_is_running) return REAL(__cxa_atexit)(func, arg, dso_handle);
+ return setup_at_exit_wrapper((void(*)())func, arg, dso_handle);
+}
+
+// Unpoison argument shadow for C++ module destructors.
+INTERCEPTOR(int, atexit, void (*func)()) {
+ // Avoid calling real atexit as it is unrechable on at least on Linux.
+ if (msan_init_is_running)
+ return REAL(__cxa_atexit)((void (*)(void *a))func, 0, 0);
+ return setup_at_exit_wrapper((void(*)())func, 0, 0);
+}
+
+static int setup_at_exit_wrapper(void(*f)(), void *arg, void *dso) {
+ ENSURE_MSAN_INITED();
+ MSanAtExitRecord *r =
+ (MSanAtExitRecord *)InternalAlloc(sizeof(MSanAtExitRecord));
+ r->func = (void(*)(void *a))f;
+ r->arg = arg;
+ int res;
+ if (!dso) {
+ // NetBSD does not preserve the 2nd argument if dso is equal to 0
+ // Store ctx in a local stack-like structure
+
+ BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+
+ res = REAL(__cxa_atexit)((void (*)(void *a))MSanAtExitWrapper, 0, 0);
+ if (!res) {
+ interceptor_ctx()->AtExitStack.PushBack(r);
+ }
+ } else {
+ res = REAL(__cxa_atexit)(MSanCxaAtExitWrapper, r, dso);
+ }
+ return res;
+}
+
+static void BeforeFork() {
+ StackDepotLockAll();
+ ChainedOriginDepotLockAll();
+}
+
+static void AfterFork() {
+ ChainedOriginDepotUnlockAll();
+ StackDepotUnlockAll();
+}
+
+INTERCEPTOR(int, fork, void) {
+ ENSURE_MSAN_INITED();
+ BeforeFork();
+ int pid = REAL(fork)();
+ AfterFork();
+ return pid;
+}
+
+// NetBSD ships with openpty(3) in -lutil, that needs to be prebuilt explicitly
+// with MSan.
+#if SANITIZER_LINUX
+INTERCEPTOR(int, openpty, int *aparent, int *aworker, char *name,
+ const void *termp, const void *winp) {
+ ENSURE_MSAN_INITED();
+ InterceptorScope interceptor_scope;
+ int res = REAL(openpty)(aparent, aworker, name, termp, winp);
+ if (!res) {
+ __msan_unpoison(aparent, sizeof(*aparent));
+ __msan_unpoison(aworker, sizeof(*aworker));
+ }
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_OPENPTY INTERCEPT_FUNCTION(openpty)
+#else
+#define MSAN_MAYBE_INTERCEPT_OPENPTY
+#endif
+
+// NetBSD ships with forkpty(3) in -lutil, that needs to be prebuilt explicitly
+// with MSan.
+#if SANITIZER_LINUX
+INTERCEPTOR(int, forkpty, int *aparent, char *name, const void *termp,
+ const void *winp) {
+ ENSURE_MSAN_INITED();
+ InterceptorScope interceptor_scope;
+ int res = REAL(forkpty)(aparent, name, termp, winp);
+ if (res != -1)
+ __msan_unpoison(aparent, sizeof(*aparent));
+ return res;
+}
+#define MSAN_MAYBE_INTERCEPT_FORKPTY INTERCEPT_FUNCTION(forkpty)
+#else
+#define MSAN_MAYBE_INTERCEPT_FORKPTY
+#endif
+
+struct MSanInterceptorContext {
+ bool in_interceptor_scope;
+};
+
+namespace __msan {
+
+int OnExit() {
+ // FIXME: ask frontend whether we need to return failure.
+ return 0;
+}
+
+} // namespace __msan
+
+// A version of CHECK_UNPOISONED using a saved scope value. Used in common
+// interceptors.
+#define CHECK_UNPOISONED_CTX(ctx, x, n) \
+ do { \
+ if (!((MSanInterceptorContext *)ctx)->in_interceptor_scope) \
+ CHECK_UNPOISONED_0(x, n); \
+ } while (0)
+
+#define MSAN_INTERCEPT_FUNC(name) \
+ do { \
+ if (!INTERCEPT_FUNCTION(name)) \
+ VReport(1, "MemorySanitizer: failed to intercept '%s'\n'", #name); \
+ } while (0)
+
+#define MSAN_INTERCEPT_FUNC_VER(name, ver) \
+ do { \
+ if (!INTERCEPT_FUNCTION_VER(name, ver)) \
+ VReport(1, "MemorySanitizer: failed to intercept '%s@@%s'\n", #name, \
+ #ver); \
+ } while (0)
+
+#define COMMON_INTERCEPT_FUNCTION(name) MSAN_INTERCEPT_FUNC(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ MSAN_INTERCEPT_FUNC_VER(name, ver)
+#define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) \
+ UnpoisonParam(count)
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ __msan_unpoison(ptr, size)
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ CHECK_UNPOISONED_CTX(ctx, ptr, size)
+#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ptr, size) \
+ __msan_unpoison(ptr, size)
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ if (msan_init_is_running) return REAL(func)(__VA_ARGS__); \
+ ENSURE_MSAN_INITED(); \
+ MSanInterceptorContext msan_ctx = {IsInInterceptorScope()}; \
+ ctx = (void *)&msan_ctx; \
+ (void)ctx; \
+ InterceptorScope interceptor_scope; \
+ __msan_unpoison(__errno_location(), sizeof(int)); /* NOLINT */
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ do { \
+ } while (false)
+#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ do { \
+ } while (false) // FIXME
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ do { \
+ } while (false) // FIXME
+#define COMMON_INTERCEPTOR_BLOCK_REAL(name) REAL(name)
+#define COMMON_INTERCEPTOR_ON_EXIT(ctx) OnExit()
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ do { \
+ link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE((handle)); \
+ if (filename && map) \
+ ForEachMappedRegion(map, __msan_unpoison); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (MsanThread *t = GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
+
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size) \
+ { \
+ (void)ctx; \
+ return __msan_memset(block, c, size); \
+ }
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size) \
+ { \
+ (void)ctx; \
+ return __msan_memmove(to, from, size); \
+ }
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size) \
+ { \
+ (void)ctx; \
+ return __msan_memcpy(to, from, size); \
+ }
+
+#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) \
+ do { \
+ GET_STORE_STACK_TRACE; \
+ CopyShadowAndOrigin(to, from, size, &stack); \
+ __msan_unpoison(to + size, 1); \
+ } while (false)
+
+#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, length, prot, flags, fd, \
+ offset) \
+ do { \
+ return mmap_interceptor(REAL(mmap), addr, sz, prot, flags, fd, off); \
+ } while (false)
+
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_common_interceptors.inc"
+
+static uptr signal_impl(int signo, uptr cb);
+static int sigaction_impl(int signo, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *oldact);
+
+#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
+ { return sigaction_impl(signo, act, oldact); }
+
+#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
+ { \
+ handler = signal_impl(signo, handler); \
+ InterceptorScope interceptor_scope; \
+ return REAL(func)(signo, handler); \
+ }
+
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+static int sigaction_impl(int signo, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *oldact) {
+ ENSURE_MSAN_INITED();
+ if (act) read_sigaction(act);
+ int res;
+ if (flags()->wrap_signals) {
+ SpinMutexLock lock(&sigactions_mu);
+ CHECK_LT(signo, kMaxSignals);
+ uptr old_cb = atomic_load(&sigactions[signo], memory_order_relaxed);
+ __sanitizer_sigaction new_act;
+ __sanitizer_sigaction *pnew_act = act ? &new_act : nullptr;
+ if (act) {
+ REAL(memcpy)(pnew_act, act, sizeof(__sanitizer_sigaction));
+ uptr cb = (uptr)pnew_act->sigaction;
+ uptr new_cb = (pnew_act->sa_flags & __sanitizer::sa_siginfo)
+ ? (uptr)SignalAction
+ : (uptr)SignalHandler;
+ if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) {
+ atomic_store(&sigactions[signo], cb, memory_order_relaxed);
+ pnew_act->sigaction = (decltype(pnew_act->sigaction))new_cb;
+ }
+ }
+ res = REAL(SIGACTION_SYMNAME)(signo, pnew_act, oldact);
+ if (res == 0 && oldact) {
+ uptr cb = (uptr)oldact->sigaction;
+ if (cb == (uptr)SignalAction || cb == (uptr)SignalHandler) {
+ oldact->sigaction = (decltype(oldact->sigaction))old_cb;
+ }
+ }
+ } else {
+ res = REAL(SIGACTION_SYMNAME)(signo, act, oldact);
+ }
+
+ if (res == 0 && oldact) {
+ __msan_unpoison(oldact, sizeof(__sanitizer_sigaction));
+ }
+ return res;
+}
+
+static uptr signal_impl(int signo, uptr cb) {
+ ENSURE_MSAN_INITED();
+ if (flags()->wrap_signals) {
+ CHECK_LT(signo, kMaxSignals);
+ SpinMutexLock lock(&sigactions_mu);
+ if (cb != __sanitizer::sig_ign && cb != __sanitizer::sig_dfl) {
+ atomic_store(&sigactions[signo], cb, memory_order_relaxed);
+ cb = (uptr)&SignalHandler;
+ }
+ }
+ return cb;
+}
+
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) CHECK_UNPOISONED(p, s)
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
+ do { \
+ } while (false)
+#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ } while (false)
+#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) __msan_unpoison(p, s)
+#include "sanitizer_common/sanitizer_common_syscalls.inc"
+#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
+
+struct dlinfo {
+ char *dli_fname;
+ void *dli_fbase;
+ char *dli_sname;
+ void *dli_saddr;
+};
+
+INTERCEPTOR(int, dladdr, void *addr, dlinfo *info) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, dladdr, addr, info);
+ int res = REAL(dladdr)(addr, info);
+ if (res != 0) {
+ __msan_unpoison(info, sizeof(*info));
+ if (info->dli_fname)
+ __msan_unpoison(info->dli_fname, REAL(strlen)(info->dli_fname) + 1);
+ if (info->dli_sname)
+ __msan_unpoison(info->dli_sname, REAL(strlen)(info->dli_sname) + 1);
+ }
+ return res;
+}
+
+INTERCEPTOR(char *, dlerror, int fake) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, dlerror, fake);
+ char *res = REAL(dlerror)(fake);
+ if (res) __msan_unpoison(res, REAL(strlen)(res) + 1);
+ return res;
+}
+
+typedef int (*dl_iterate_phdr_cb)(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data);
+struct dl_iterate_phdr_data {
+ dl_iterate_phdr_cb callback;
+ void *data;
+};
+
+static int msan_dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data) {
+ if (info) {
+ __msan_unpoison(info, size);
+ if (info->dlpi_phdr && info->dlpi_phnum)
+ __msan_unpoison(info->dlpi_phdr, struct_ElfW_Phdr_sz * info->dlpi_phnum);
+ if (info->dlpi_name)
+ __msan_unpoison(info->dlpi_name, REAL(strlen)(info->dlpi_name) + 1);
+ }
+ dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
+ UnpoisonParam(3);
+ return cbdata->callback(info, size, cbdata->data);
+}
+
+INTERCEPTOR(void *, shmat, int shmid, const void *shmaddr, int shmflg) {
+ ENSURE_MSAN_INITED();
+ void *p = REAL(shmat)(shmid, shmaddr, shmflg);
+ if (p != (void *)-1) {
+ __sanitizer_shmid_ds ds;
+ int res = REAL(shmctl)(shmid, shmctl_ipc_stat, &ds);
+ if (!res) {
+ __msan_unpoison(p, ds.shm_segsz);
+ }
+ }
+ return p;
+}
+
+INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb callback, void *data) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, dl_iterate_phdr, callback, data);
+ dl_iterate_phdr_data cbdata;
+ cbdata.callback = callback;
+ cbdata.data = data;
+ int res = REAL(dl_iterate_phdr)(msan_dl_iterate_phdr_cb, (void *)&cbdata);
+ return res;
+}
+
+// wchar_t *wcschr(const wchar_t *wcs, wchar_t wc);
+INTERCEPTOR(wchar_t *, wcschr, void *s, wchar_t wc, void *ps) {
+ ENSURE_MSAN_INITED();
+ wchar_t *res = REAL(wcschr)(s, wc, ps);
+ return res;
+}
+
+// wchar_t *wcscpy(wchar_t *dest, const wchar_t *src);
+INTERCEPTOR(wchar_t *, wcscpy, wchar_t *dest, const wchar_t *src) {
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ wchar_t *res = REAL(wcscpy)(dest, src);
+ CopyShadowAndOrigin(dest, src, sizeof(wchar_t) * (REAL(wcslen)(src) + 1),
+ &stack);
+ return res;
+}
+
+INTERCEPTOR(wchar_t *, wcsncpy, wchar_t *dest, const wchar_t *src,
+ SIZE_T n) { // NOLINT
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ SIZE_T copy_size = REAL(wcsnlen)(src, n);
+ if (copy_size < n) copy_size++; // trailing \0
+ wchar_t *res = REAL(wcsncpy)(dest, src, n); // NOLINT
+ CopyShadowAndOrigin(dest, src, copy_size * sizeof(wchar_t), &stack);
+ __msan_unpoison(dest + copy_size, (n - copy_size) * sizeof(wchar_t));
+ return res;
+}
+
+// These interface functions reside here so that they can use
+// REAL(memset), etc.
+void __msan_unpoison(const void *a, uptr size) {
+ if (!MEM_IS_APP(a)) return;
+ SetShadow(a, size, 0);
+}
+
+void __msan_poison(const void *a, uptr size) {
+ if (!MEM_IS_APP(a)) return;
+ SetShadow(a, size, __msan::flags()->poison_heap_with_zeroes ? 0 : -1);
+}
+
+void __msan_poison_stack(void *a, uptr size) {
+ if (!MEM_IS_APP(a)) return;
+ SetShadow(a, size, __msan::flags()->poison_stack_with_zeroes ? 0 : -1);
+}
+
+void __msan_unpoison_param(uptr n) { UnpoisonParam(n); }
+
+void __msan_clear_and_unpoison(void *a, uptr size) {
+ REAL(memset)(a, 0, size);
+ SetShadow(a, size, 0);
+}
+
+void *__msan_memcpy(void *dest, const void *src, SIZE_T n) {
+ if (!msan_inited) return internal_memcpy(dest, src, n);
+ if (msan_init_is_running || __msan::IsInSymbolizer())
+ return REAL(memcpy)(dest, src, n);
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ void *res = REAL(memcpy)(dest, src, n);
+ CopyShadowAndOrigin(dest, src, n, &stack);
+ return res;
+}
+
+void *__msan_memset(void *s, int c, SIZE_T n) {
+ if (!msan_inited) return internal_memset(s, c, n);
+ if (msan_init_is_running) return REAL(memset)(s, c, n);
+ ENSURE_MSAN_INITED();
+ void *res = REAL(memset)(s, c, n);
+ __msan_unpoison(s, n);
+ return res;
+}
+
+void *__msan_memmove(void *dest, const void *src, SIZE_T n) {
+ if (!msan_inited) return internal_memmove(dest, src, n);
+ if (msan_init_is_running) return REAL(memmove)(dest, src, n);
+ ENSURE_MSAN_INITED();
+ GET_STORE_STACK_TRACE;
+ void *res = REAL(memmove)(dest, src, n);
+ MoveShadowAndOrigin(dest, src, n, &stack);
+ return res;
+}
+
+void __msan_unpoison_string(const char* s) {
+ if (!MEM_IS_APP(s)) return;
+ __msan_unpoison(s, REAL(strlen)(s) + 1);
+}
+
+namespace __msan {
+
+void InitializeInterceptors() {
+ static int inited = 0;
+ CHECK_EQ(inited, 0);
+
+ new(interceptor_ctx()) InterceptorContext();
+
+ InitializeCommonInterceptors();
+ InitializeSignalInterceptors();
+
+ INTERCEPT_FUNCTION(posix_memalign);
+ MSAN_MAYBE_INTERCEPT_MEMALIGN;
+ MSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
+ INTERCEPT_FUNCTION(valloc);
+ MSAN_MAYBE_INTERCEPT_PVALLOC;
+ INTERCEPT_FUNCTION(malloc);
+ INTERCEPT_FUNCTION(calloc);
+ INTERCEPT_FUNCTION(realloc);
+ INTERCEPT_FUNCTION(reallocarray);
+ INTERCEPT_FUNCTION(free);
+ MSAN_MAYBE_INTERCEPT_CFREE;
+ MSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
+ MSAN_MAYBE_INTERCEPT_MALLINFO;
+ MSAN_MAYBE_INTERCEPT_MALLOPT;
+ MSAN_MAYBE_INTERCEPT_MALLOC_STATS;
+ INTERCEPT_FUNCTION(fread);
+ MSAN_MAYBE_INTERCEPT_FREAD_UNLOCKED;
+ INTERCEPT_FUNCTION(memccpy);
+ MSAN_MAYBE_INTERCEPT_MEMPCPY;
+ INTERCEPT_FUNCTION(bcopy);
+ INTERCEPT_FUNCTION(wmemset);
+ INTERCEPT_FUNCTION(wmemcpy);
+ MSAN_MAYBE_INTERCEPT_WMEMPCPY;
+ INTERCEPT_FUNCTION(wmemmove);
+ INTERCEPT_FUNCTION(strcpy); // NOLINT
+ MSAN_MAYBE_INTERCEPT_STPCPY; // NOLINT
+ INTERCEPT_FUNCTION(strdup);
+ MSAN_MAYBE_INTERCEPT___STRDUP;
+ INTERCEPT_FUNCTION(strncpy); // NOLINT
+ MSAN_MAYBE_INTERCEPT_GCVT;
+ INTERCEPT_FUNCTION(strcat); // NOLINT
+ INTERCEPT_FUNCTION(strncat); // NOLINT
+ INTERCEPT_STRTO(strtod);
+ INTERCEPT_STRTO(strtof);
+ INTERCEPT_STRTO(strtold);
+ INTERCEPT_STRTO(strtol);
+ INTERCEPT_STRTO(strtoul);
+ INTERCEPT_STRTO(strtoll);
+ INTERCEPT_STRTO(strtoull);
+ INTERCEPT_STRTO(strtouq);
+ INTERCEPT_STRTO(wcstod);
+ INTERCEPT_STRTO(wcstof);
+ INTERCEPT_STRTO(wcstold);
+ INTERCEPT_STRTO(wcstol);
+ INTERCEPT_STRTO(wcstoul);
+ INTERCEPT_STRTO(wcstoll);
+ INTERCEPT_STRTO(wcstoull);
+#ifdef SANITIZER_NLDBL_VERSION
+ INTERCEPT_FUNCTION_VER(vswprintf, SANITIZER_NLDBL_VERSION);
+ INTERCEPT_FUNCTION_VER(swprintf, SANITIZER_NLDBL_VERSION);
+#else
+ INTERCEPT_FUNCTION(vswprintf);
+ INTERCEPT_FUNCTION(swprintf);
+#endif
+ INTERCEPT_FUNCTION(strftime);
+ INTERCEPT_FUNCTION(strftime_l);
+ MSAN_MAYBE_INTERCEPT___STRFTIME_L;
+ INTERCEPT_FUNCTION(wcsftime);
+ INTERCEPT_FUNCTION(wcsftime_l);
+ MSAN_MAYBE_INTERCEPT___WCSFTIME_L;
+ INTERCEPT_FUNCTION(mbtowc);
+ INTERCEPT_FUNCTION(mbrtowc);
+ INTERCEPT_FUNCTION(wcslen);
+ INTERCEPT_FUNCTION(wcsnlen);
+ INTERCEPT_FUNCTION(wcschr);
+ INTERCEPT_FUNCTION(wcscpy);
+ INTERCEPT_FUNCTION(wcsncpy);
+ INTERCEPT_FUNCTION(wcscmp);
+ INTERCEPT_FUNCTION(getenv);
+ INTERCEPT_FUNCTION(setenv);
+ INTERCEPT_FUNCTION(putenv);
+ INTERCEPT_FUNCTION(gettimeofday);
+ MSAN_MAYBE_INTERCEPT_FCVT;
+ MSAN_MAYBE_INTERCEPT_FSTAT;
+ MSAN_MAYBE_INTERCEPT___FXSTAT;
+ MSAN_INTERCEPT_FSTATAT;
+ MSAN_MAYBE_INTERCEPT___FXSTAT64;
+ MSAN_MAYBE_INTERCEPT___FXSTATAT64;
+ INTERCEPT_FUNCTION(pipe);
+ INTERCEPT_FUNCTION(pipe2);
+ INTERCEPT_FUNCTION(socketpair);
+ MSAN_MAYBE_INTERCEPT_FGETS_UNLOCKED;
+ INTERCEPT_FUNCTION(getrlimit);
+ MSAN_MAYBE_INTERCEPT_GETRLIMIT64;
+ MSAN_MAYBE_INTERCEPT_PRLIMIT;
+ MSAN_MAYBE_INTERCEPT_PRLIMIT64;
+ MSAN_INTERCEPT_UNAME;
+ INTERCEPT_FUNCTION(gethostname);
+ MSAN_MAYBE_INTERCEPT_EPOLL_WAIT;
+ MSAN_MAYBE_INTERCEPT_EPOLL_PWAIT;
+ INTERCEPT_FUNCTION(dladdr);
+ INTERCEPT_FUNCTION(dlerror);
+ INTERCEPT_FUNCTION(dl_iterate_phdr);
+ INTERCEPT_FUNCTION(getrusage);
+#if defined(__mips__)
+ INTERCEPT_FUNCTION_VER(pthread_create, "GLIBC_2.2");
+#else
+ INTERCEPT_FUNCTION(pthread_create);
+#endif
+ INTERCEPT_FUNCTION(pthread_key_create);
+
+#if SANITIZER_NETBSD
+ INTERCEPT_FUNCTION(__libc_thr_keycreate);
+#endif
+
+ INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(tzset);
+ INTERCEPT_FUNCTION(atexit);
+ INTERCEPT_FUNCTION(__cxa_atexit);
+ INTERCEPT_FUNCTION(shmat);
+ INTERCEPT_FUNCTION(fork);
+ MSAN_MAYBE_INTERCEPT_OPENPTY;
+ MSAN_MAYBE_INTERCEPT_FORKPTY;
+
+ inited = 1;
+}
+} // namespace __msan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_interface_internal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_interface_internal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_interface_internal.h (revision 351984)
@@ -0,0 +1,186 @@
+//===-- msan_interface_internal.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Private MSan interface header.
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_INTERFACE_INTERNAL_H
+#define MSAN_INTERFACE_INTERNAL_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+extern "C" {
+// FIXME: document all interface functions.
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __msan_get_track_origins();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_init();
+
+// Print a warning and maybe return.
+// This function can die based on common_flags()->exitcode.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_warning();
+
+// Print a warning and die.
+// Intrumentation inserts calls to this function when building in "fast" mode
+// (i.e. -mllvm -msan-keep-going)
+SANITIZER_INTERFACE_ATTRIBUTE __attribute__((noreturn))
+void __msan_warning_noreturn();
+
+using __sanitizer::uptr;
+using __sanitizer::sptr;
+using __sanitizer::uu64;
+using __sanitizer::uu32;
+using __sanitizer::uu16;
+using __sanitizer::u64;
+using __sanitizer::u32;
+using __sanitizer::u16;
+using __sanitizer::u8;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_maybe_warning_1(u8 s, u32 o);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_maybe_warning_2(u16 s, u32 o);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_maybe_warning_4(u32 s, u32 o);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_maybe_warning_8(u64 s, u32 o);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_maybe_store_origin_1(u8 s, void *p, u32 o);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_maybe_store_origin_2(u16 s, void *p, u32 o);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_maybe_store_origin_4(u32 s, void *p, u32 o);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_maybe_store_origin_8(u64 s, void *p, u32 o);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_unpoison(const void *a, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_unpoison_string(const char *s);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_unpoison_param(uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_clear_and_unpoison(void *a, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void* __msan_memcpy(void *dst, const void *src, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void* __msan_memset(void *s, int c, uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE
+void* __msan_memmove(void* dest, const void* src, uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_poison(const void *a, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_poison_stack(void *a, uptr size);
+
+// Copy size bytes from src to dst and unpoison the result.
+// Useful to implement unsafe loads.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_load_unpoisoned(void *src, uptr size, void *dst);
+
+// Returns the offset of the first (at least partially) poisoned byte,
+// or -1 if the whole range is good.
+SANITIZER_INTERFACE_ATTRIBUTE
+sptr __msan_test_shadow(const void *x, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_check_mem_is_initialized(const void *x, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_set_origin(const void *a, uptr size, u32 origin);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_set_alloca_origin(void *a, uptr size, char *descr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_set_alloca_origin4(void *a, uptr size, char *descr, uptr pc);
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __msan_chain_origin(u32 id);
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __msan_get_origin(const void *a);
+
+// Test that this_id is a descendant of prev_id (or they are simply equal).
+// "descendant" here means that are part of the same chain, created with
+// __msan_chain_origin.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __msan_origin_is_descendant_or_same(u32 this_id, u32 prev_id);
+
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_clear_on_return();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_set_keep_going(int keep_going);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __msan_set_poison_in_malloc(int do_poison);
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+/* OPTIONAL */ const char* __msan_default_options();
+
+// For testing.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_set_expect_umr(int expect_umr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_print_shadow(const void *x, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_dump_shadow(const void *x, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __msan_has_dynamic_component();
+
+// For testing.
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __msan_get_umr_origin();
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_partial_poison(const void* data, void* shadow, uptr size);
+
+// Tell MSan about newly allocated memory (ex.: custom allocator).
+// Memory will be marked uninitialized, with origin at the call site.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_allocated_memory(const void* data, uptr size);
+
+// Tell MSan about newly destroyed memory. Memory will be marked
+// uninitialized.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_dtor_callback(const void* data, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *p, u16 x);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *p, u32 x);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *p, u64 x);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_set_death_callback(void (*callback)(void));
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_copy_shadow(void *dst, const void *src, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_scoped_disable_interceptor_checks();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __msan_scoped_enable_interceptor_checks();
+} // extern "C"
+
+#endif // MSAN_INTERFACE_INTERNAL_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_new_delete.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_new_delete.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_new_delete.cc (revision 351984)
@@ -0,0 +1,108 @@
+//===-- msan_new_delete.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+
+#include "msan.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+
+#if MSAN_REPLACE_OPERATORS_NEW_AND_DELETE
+
+#include <stddef.h>
+
+using namespace __msan; // NOLINT
+
+// Fake std::nothrow_t and std::align_val_t to avoid including <new>.
+namespace std {
+ struct nothrow_t {};
+ enum class align_val_t: size_t {};
+} // namespace std
+
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = msan_malloc(size, &stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res
+#define OPERATOR_NEW_BODY_ALIGN(nothrow) \
+ GET_MALLOC_STACK_TRACE;\
+ void *res = msan_memalign((uptr)align, size, &stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res;
+
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+
+#define OPERATOR_DELETE_BODY \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) MsanDeallocate(&stack, ptr)
+
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY;
+}
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+
+
+#endif // MSAN_REPLACE_OPERATORS_NEW_AND_DELETE
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_origin.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_origin.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_origin.h (revision 351984)
@@ -0,0 +1,168 @@
+//===-- msan_origin.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Origin id utils.
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_ORIGIN_H
+#define MSAN_ORIGIN_H
+
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "msan_chained_origin_depot.h"
+
+namespace __msan {
+
+// Origin handling.
+//
+// Origin is a 32-bit identifier that is attached to any uninitialized value in
+// the program and describes, more or less exactly, how this memory came to be
+// uninitialized.
+//
+// There are 3 kinds of origin ids:
+// 1xxx xxxx xxxx xxxx heap origin id
+// 0000 xxxx xxxx xxxx stack origin id
+// 0zzz xxxx xxxx xxxx chained origin id
+//
+// Heap origin id describes a heap memory allocation and contains (in the xxx
+// part) a value of StackDepot.
+//
+// Stack origin id describes a stack memory allocation and contains (in the xxx
+// part) an index into StackOriginDescr and StackOriginPC. We don't store a
+// stack trace for such origins for performance reasons.
+//
+// Chained origin id describes an event of storing an uninitialized value to
+// memory. The xxx part is a value of ChainedOriginDepot, which is a mapping of
+// (stack_id, prev_id) -> id, where
+// * stack_id describes the event.
+// StackDepot keeps a mapping between those and corresponding stack traces.
+// * prev_id is another origin id that describes the earlier part of the
+// uninitialized value history.
+// Following a chain of prev_id provides the full recorded history of an
+// uninitialized value.
+//
+// This, effectively, defines a tree (or 2 trees, see below) where nodes are
+// points in value history marked with origin ids, and edges are events that are
+// marked with stack_id.
+//
+// The "zzz" bits of chained origin id are used to store the length (or depth)
+// of the origin chain.
+
+class Origin {
+ public:
+ static bool isValidId(u32 id) { return id != 0 && id != (u32)-1; }
+
+ u32 raw_id() const { return raw_id_; }
+ bool isHeapOrigin() const {
+ // 1xxx xxxx xxxx xxxx
+ return raw_id_ >> kHeapShift == 0;
+ }
+ bool isStackOrigin() const {
+ // 1000 xxxx xxxx xxxx
+ return (raw_id_ >> kDepthShift) == (1 << kDepthBits);
+ }
+ bool isChainedOrigin() const {
+ // 1zzz xxxx xxxx xxxx, zzz != 000
+ return (raw_id_ >> kDepthShift) > (1 << kDepthBits);
+ }
+ u32 getChainedId() const {
+ CHECK(isChainedOrigin());
+ return raw_id_ & kChainedIdMask;
+ }
+ u32 getStackId() const {
+ CHECK(isStackOrigin());
+ return raw_id_ & kChainedIdMask;
+ }
+ u32 getHeapId() const {
+ CHECK(isHeapOrigin());
+ return raw_id_ & kHeapIdMask;
+ }
+
+ // Returns the next origin in the chain and the current stack trace.
+ Origin getNextChainedOrigin(StackTrace *stack) const {
+ CHECK(isChainedOrigin());
+ u32 prev_id;
+ u32 stack_id = ChainedOriginDepotGet(getChainedId(), &prev_id);
+ if (stack) *stack = StackDepotGet(stack_id);
+ return Origin(prev_id);
+ }
+
+ StackTrace getStackTraceForHeapOrigin() const {
+ return StackDepotGet(getHeapId());
+ }
+
+ static Origin CreateStackOrigin(u32 id) {
+ CHECK((id & kStackIdMask) == id);
+ return Origin((1 << kHeapShift) | id);
+ }
+
+ static Origin CreateHeapOrigin(StackTrace *stack) {
+ u32 stack_id = StackDepotPut(*stack);
+ CHECK(stack_id);
+ CHECK((stack_id & kHeapIdMask) == stack_id);
+ return Origin(stack_id);
+ }
+
+ static Origin CreateChainedOrigin(Origin prev, StackTrace *stack) {
+ int depth = prev.isChainedOrigin() ? prev.depth() : 0;
+ // depth is the length of the chain minus 1.
+ // origin_history_size of 0 means unlimited depth.
+ if (flags()->origin_history_size > 0) {
+ if (depth + 1 >= flags()->origin_history_size) {
+ return prev;
+ } else {
+ ++depth;
+ CHECK(depth < (1 << kDepthBits));
+ }
+ }
+
+ StackDepotHandle h = StackDepotPut_WithHandle(*stack);
+ if (!h.valid()) return prev;
+
+ if (flags()->origin_history_per_stack_limit > 0) {
+ int use_count = h.use_count();
+ if (use_count > flags()->origin_history_per_stack_limit) return prev;
+ }
+
+ u32 chained_id;
+ bool inserted = ChainedOriginDepotPut(h.id(), prev.raw_id(), &chained_id);
+ CHECK((chained_id & kChainedIdMask) == chained_id);
+
+ if (inserted && flags()->origin_history_per_stack_limit > 0)
+ h.inc_use_count_unsafe();
+
+ return Origin((1 << kHeapShift) | (depth << kDepthShift) | chained_id);
+ }
+
+ static Origin FromRawId(u32 id) {
+ return Origin(id);
+ }
+
+ private:
+ static const int kDepthBits = 3;
+ static const int kDepthShift = 32 - kDepthBits - 1;
+
+ static const int kHeapShift = 31;
+ static const u32 kChainedIdMask = ((u32)-1) >> (32 - kDepthShift);
+ static const u32 kStackIdMask = ((u32)-1) >> (32 - kDepthShift);
+ static const u32 kHeapIdMask = ((u32)-1) >> (32 - kHeapShift);
+
+ u32 raw_id_;
+
+ explicit Origin(u32 raw_id) : raw_id_(raw_id) {}
+
+ int depth() const {
+ CHECK(isChainedOrigin());
+ return (raw_id_ >> kDepthShift) & ((1 << kDepthBits) - 1);
+ }
+
+ public:
+ static const int kMaxDepth = (1 << kDepthBits) - 1;
+};
+
+} // namespace __msan
+
+#endif // MSAN_ORIGIN_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_origin.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_poisoning.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_poisoning.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_poisoning.cc (revision 351984)
@@ -0,0 +1,174 @@
+//===-- msan_poisoning.cc ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "msan_poisoning.h"
+
+#include "interception/interception.h"
+#include "msan_origin.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+DECLARE_REAL(void *, memset, void *dest, int c, uptr n)
+DECLARE_REAL(void *, memcpy, void *dest, const void *src, uptr n)
+DECLARE_REAL(void *, memmove, void *dest, const void *src, uptr n)
+
+namespace __msan {
+
+u32 GetOriginIfPoisoned(uptr addr, uptr size) {
+ unsigned char *s = (unsigned char *)MEM_TO_SHADOW(addr);
+ for (uptr i = 0; i < size; ++i)
+ if (s[i]) return *(u32 *)SHADOW_TO_ORIGIN(((uptr)s + i) & ~3UL);
+ return 0;
+}
+
+void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size,
+ u32 src_origin) {
+ uptr dst_s = MEM_TO_SHADOW(addr);
+ uptr src_s = src_shadow;
+ uptr src_s_end = src_s + size;
+
+ for (; src_s < src_s_end; ++dst_s, ++src_s)
+ if (*(u8 *)src_s) *(u32 *)SHADOW_TO_ORIGIN(dst_s & ~3UL) = src_origin;
+}
+
+void CopyOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ if (!MEM_IS_APP(dst) || !MEM_IS_APP(src)) return;
+
+ uptr d = (uptr)dst;
+ uptr beg = d & ~3UL;
+ // Copy left unaligned origin if that memory is poisoned.
+ if (beg < d) {
+ u32 o = GetOriginIfPoisoned((uptr)src, d - beg);
+ if (o) {
+ if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
+ *(u32 *)MEM_TO_ORIGIN(beg) = o;
+ }
+ beg += 4;
+ }
+
+ uptr end = (d + size) & ~3UL;
+ // If both ends fall into the same 4-byte slot, we are done.
+ if (end < beg) return;
+
+ // Copy right unaligned origin if that memory is poisoned.
+ if (end < d + size) {
+ u32 o = GetOriginIfPoisoned((uptr)src + (end - d), (d + size) - end);
+ if (o) {
+ if (__msan_get_track_origins() > 1) o = ChainOrigin(o, stack);
+ *(u32 *)MEM_TO_ORIGIN(end) = o;
+ }
+ }
+
+ if (beg < end) {
+ // Align src up.
+ uptr s = ((uptr)src + 3) & ~3UL;
+ // FIXME: factor out to msan_copy_origin_aligned
+ if (__msan_get_track_origins() > 1) {
+ u32 *src = (u32 *)MEM_TO_ORIGIN(s);
+ u32 *src_s = (u32 *)MEM_TO_SHADOW(s);
+ u32 *src_end = (u32 *)MEM_TO_ORIGIN(s + (end - beg));
+ u32 *dst = (u32 *)MEM_TO_ORIGIN(beg);
+ u32 src_o = 0;
+ u32 dst_o = 0;
+ for (; src < src_end; ++src, ++src_s, ++dst) {
+ if (!*src_s) continue;
+ if (*src != src_o) {
+ src_o = *src;
+ dst_o = ChainOrigin(src_o, stack);
+ }
+ *dst = dst_o;
+ }
+ } else {
+ REAL(memcpy)((void *)MEM_TO_ORIGIN(beg), (void *)MEM_TO_ORIGIN(s),
+ end - beg);
+ }
+ }
+}
+
+void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ if (!MEM_IS_APP(dst)) return;
+ if (!MEM_IS_APP(src)) return;
+ if (src == dst) return;
+ REAL(memmove)((void *)MEM_TO_SHADOW((uptr)dst),
+ (void *)MEM_TO_SHADOW((uptr)src), size);
+ if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
+}
+
+void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack) {
+ if (!MEM_IS_APP(dst)) return;
+ if (!MEM_IS_APP(src)) return;
+ REAL(memcpy)((void *)MEM_TO_SHADOW((uptr)dst),
+ (void *)MEM_TO_SHADOW((uptr)src), size);
+ if (__msan_get_track_origins()) CopyOrigin(dst, src, size, stack);
+}
+
+void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack) {
+ REAL(memcpy)(dst, src, size);
+ CopyShadowAndOrigin(dst, src, size, stack);
+}
+
+void SetShadow(const void *ptr, uptr size, u8 value) {
+ uptr PageSize = GetPageSizeCached();
+ uptr shadow_beg = MEM_TO_SHADOW(ptr);
+ uptr shadow_end = shadow_beg + size;
+ if (value ||
+ shadow_end - shadow_beg < common_flags()->clear_shadow_mmap_threshold) {
+ REAL(memset)((void *)shadow_beg, value, shadow_end - shadow_beg);
+ } else {
+ uptr page_beg = RoundUpTo(shadow_beg, PageSize);
+ uptr page_end = RoundDownTo(shadow_end, PageSize);
+
+ if (page_beg >= page_end) {
+ REAL(memset)((void *)shadow_beg, 0, shadow_end - shadow_beg);
+ } else {
+ if (page_beg != shadow_beg) {
+ REAL(memset)((void *)shadow_beg, 0, page_beg - shadow_beg);
+ }
+ if (page_end != shadow_end) {
+ REAL(memset)((void *)page_end, 0, shadow_end - page_end);
+ }
+ if (!MmapFixedNoReserve(page_beg, page_end - page_beg))
+ Die();
+ }
+ }
+}
+
+void SetOrigin(const void *dst, uptr size, u32 origin) {
+ // Origin mapping is 4 bytes per 4 bytes of application memory.
+ // Here we extend the range such that its left and right bounds are both
+ // 4 byte aligned.
+ uptr x = MEM_TO_ORIGIN((uptr)dst);
+ uptr beg = x & ~3UL; // align down.
+ uptr end = (x + size + 3) & ~3UL; // align up.
+ u64 origin64 = ((u64)origin << 32) | origin;
+ // This is like memset, but the value is 32-bit. We unroll by 2 to write
+ // 64 bits at once. May want to unroll further to get 128-bit stores.
+ if (beg & 7ULL) {
+ *(u32 *)beg = origin;
+ beg += 4;
+ }
+ for (uptr addr = beg; addr < (end & ~7UL); addr += 8) *(u64 *)addr = origin64;
+ if (end & 7ULL) *(u32 *)(end - 4) = origin;
+}
+
+void PoisonMemory(const void *dst, uptr size, StackTrace *stack) {
+ SetShadow(dst, size, (u8)-1);
+
+ if (__msan_get_track_origins()) {
+ Origin o = Origin::CreateHeapOrigin(stack);
+ SetOrigin(dst, size, o.raw_id());
+ }
+}
+
+} // namespace __msan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_poisoning.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_poisoning.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_poisoning.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_poisoning.h (revision 351984)
@@ -0,0 +1,58 @@
+//===-- msan_poisoning.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_POISONING_H
+#define MSAN_POISONING_H
+
+#include "msan.h"
+
+namespace __msan {
+
+// Return origin for the first poisoned byte in the memory range, or 0.
+u32 GetOriginIfPoisoned(uptr addr, uptr size);
+
+// Walk [addr, addr+size) app memory region, copying origin tags from the
+// corresponding positions in [src_origin, src_origin+size) where the
+// corresponding shadow in [src_shadow, src_shadow+size) is non-zero.
+void SetOriginIfPoisoned(uptr addr, uptr src_shadow, uptr size, u32 src_origin);
+
+// Copy origin from src (app address) to dst (app address), creating chained
+// origin ids as necessary, without overriding origin for fully initialized
+// quads.
+void CopyOrigin(const void *dst, const void *src, uptr size, StackTrace *stack);
+
+// memmove() shadow and origin. Dst and src are application addresses.
+// See CopyOrigin() for the origin copying logic.
+void MoveShadowAndOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack);
+
+// memcpy() shadow and origin. Dst and src are application addresses.
+// See CopyOrigin() for the origin copying logic.
+void CopyShadowAndOrigin(const void *dst, const void *src, uptr size,
+ StackTrace *stack);
+
+// memcpy() app memory, and do "the right thing" to the corresponding shadow and
+// origin regions.
+void CopyMemory(void *dst, const void *src, uptr size, StackTrace *stack);
+
+// Fill shadow will value. Ptr is an application address.
+void SetShadow(const void *ptr, uptr size, u8 value);
+
+// Set origin for the memory region.
+void SetOrigin(const void *dst, uptr size, u32 origin);
+
+// Mark memory region uninitialized, with origins.
+void PoisonMemory(const void *dst, uptr size, StackTrace *stack);
+
+} // namespace __msan
+
+#endif // MSAN_POISONING_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_poisoning.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_report.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_report.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_report.cc (revision 351984)
@@ -0,0 +1,272 @@
+//===-- msan_report.cc ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+#include "msan.h"
+#include "msan_chained_origin_depot.h"
+#include "msan_origin.h"
+#include "msan_report.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+using namespace __sanitizer;
+
+namespace __msan {
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() { }
+ const char *Origin() const { return Magenta(); }
+ const char *Name() const { return Green(); }
+};
+
+static void DescribeStackOrigin(const char *so, uptr pc) {
+ Decorator d;
+ char *s = internal_strdup(so);
+ char *sep = internal_strchr(s, '@');
+ CHECK(sep);
+ *sep = '\0';
+ Printf("%s", d.Origin());
+ Printf(
+ " %sUninitialized value was created by an allocation of '%s%s%s'"
+ " in the stack frame of function '%s%s%s'%s\n",
+ d.Origin(), d.Name(), s, d.Origin(), d.Name(), sep + 1, d.Origin(),
+ d.Default());
+ InternalFree(s);
+
+ if (pc) {
+ // For some reason function address in LLVM IR is 1 less then the address
+ // of the first instruction.
+ pc = StackTrace::GetNextInstructionPc(pc);
+ StackTrace(&pc, 1).Print();
+ }
+}
+
+static void DescribeOrigin(u32 id) {
+ VPrintf(1, " raw origin id: %d\n", id);
+ Decorator d;
+ Origin o = Origin::FromRawId(id);
+ while (o.isChainedOrigin()) {
+ StackTrace stack;
+ o = o.getNextChainedOrigin(&stack);
+ Printf(" %sUninitialized value was stored to memory at%s\n", d.Origin(),
+ d.Default());
+ stack.Print();
+ }
+ if (o.isStackOrigin()) {
+ uptr pc;
+ const char *so = GetStackOriginDescr(o.getStackId(), &pc);
+ DescribeStackOrigin(so, pc);
+ } else {
+ StackTrace stack = o.getStackTraceForHeapOrigin();
+ switch (stack.tag) {
+ case StackTrace::TAG_ALLOC:
+ Printf(" %sUninitialized value was created by a heap allocation%s\n",
+ d.Origin(), d.Default());
+ break;
+ case StackTrace::TAG_DEALLOC:
+ Printf(" %sUninitialized value was created by a heap deallocation%s\n",
+ d.Origin(), d.Default());
+ break;
+ case STACK_TRACE_TAG_POISON:
+ Printf(" %sMemory was marked as uninitialized%s\n", d.Origin(),
+ d.Default());
+ break;
+ default:
+ Printf(" %sUninitialized value was created%s\n", d.Origin(),
+ d.Default());
+ break;
+ }
+ stack.Print();
+ }
+}
+
+void ReportUMR(StackTrace *stack, u32 origin) {
+ if (!__msan::flags()->report_umrs) return;
+
+ ScopedErrorReportLock l;
+
+ Decorator d;
+ Printf("%s", d.Warning());
+ Report("WARNING: MemorySanitizer: use-of-uninitialized-value\n");
+ Printf("%s", d.Default());
+ stack->Print();
+ if (origin) {
+ DescribeOrigin(origin);
+ }
+ ReportErrorSummary("use-of-uninitialized-value", stack);
+}
+
+void ReportExpectedUMRNotFound(StackTrace *stack) {
+ ScopedErrorReportLock l;
+
+ Printf("WARNING: Expected use of uninitialized value not found\n");
+ stack->Print();
+}
+
+void ReportStats() {
+ ScopedErrorReportLock l;
+
+ if (__msan_get_track_origins() > 0) {
+ StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ // FIXME: we want this at normal exit, too!
+ // FIXME: but only with verbosity=1 or something
+ Printf("Unique heap origins: %zu\n", stack_depot_stats->n_uniq_ids);
+ Printf("Stack depot allocated bytes: %zu\n", stack_depot_stats->allocated);
+
+ StackDepotStats *chained_origin_depot_stats = ChainedOriginDepotGetStats();
+ Printf("Unique origin histories: %zu\n",
+ chained_origin_depot_stats->n_uniq_ids);
+ Printf("History depot allocated bytes: %zu\n",
+ chained_origin_depot_stats->allocated);
+ }
+}
+
+void ReportAtExitStatistics() {
+ ScopedErrorReportLock l;
+
+ if (msan_report_count > 0) {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Printf("MemorySanitizer: %d warnings reported.\n", msan_report_count);
+ Printf("%s", d.Default());
+ }
+}
+
+class OriginSet {
+ public:
+ OriginSet() : next_id_(0) {}
+ int insert(u32 o) {
+ // Scan from the end for better locality.
+ for (int i = next_id_ - 1; i >= 0; --i)
+ if (origins_[i] == o) return i;
+ if (next_id_ == kMaxSize_) return OVERFLOW;
+ int id = next_id_++;
+ origins_[id] = o;
+ return id;
+ }
+ int size() { return next_id_; }
+ u32 get(int id) { return origins_[id]; }
+ static char asChar(int id) {
+ switch (id) {
+ case MISSING:
+ return '.';
+ case OVERFLOW:
+ return '*';
+ default:
+ return 'A' + id;
+ }
+ }
+ static const int OVERFLOW = -1;
+ static const int MISSING = -2;
+
+ private:
+ static const int kMaxSize_ = 'Z' - 'A' + 1;
+ u32 origins_[kMaxSize_];
+ int next_id_;
+};
+
+void DescribeMemoryRange(const void *x, uptr size) {
+ // Real limits.
+ uptr start = MEM_TO_SHADOW(x);
+ uptr end = start + size;
+ // Scan limits: align start down to 4; align size up to 16.
+ uptr s = start & ~3UL;
+ size = end - s;
+ size = (size + 15) & ~15UL;
+ uptr e = s + size;
+
+ // Single letter names to origin id mapping.
+ OriginSet origin_set;
+
+ uptr pos = 0; // Offset from aligned start.
+ bool with_origins = __msan_get_track_origins();
+ // True if there is at least 1 poisoned bit in the last 4-byte group.
+ bool last_quad_poisoned;
+ int origin_ids[4]; // Single letter origin ids for the current line.
+
+ Decorator d;
+ Printf("%s", d.Warning());
+ Printf("Shadow map of [%p, %p), %zu bytes:\n", start, end, end - start);
+ Printf("%s", d.Default());
+ while (s < e) {
+ // Line start.
+ if (pos % 16 == 0) {
+ for (int i = 0; i < 4; ++i) origin_ids[i] = -1;
+ Printf("%p:", s);
+ }
+ // Group start.
+ if (pos % 4 == 0) {
+ Printf(" ");
+ last_quad_poisoned = false;
+ }
+ // Print shadow byte.
+ if (s < start || s >= end) {
+ Printf("..");
+ } else {
+ unsigned char v = *(unsigned char *)s;
+ if (v) last_quad_poisoned = true;
+ Printf("%x%x", v >> 4, v & 0xf);
+ }
+ // Group end.
+ if (pos % 4 == 3 && with_origins) {
+ int id = OriginSet::MISSING;
+ if (last_quad_poisoned) {
+ u32 o = *(u32 *)SHADOW_TO_ORIGIN(s - 3);
+ id = origin_set.insert(o);
+ }
+ origin_ids[(pos % 16) / 4] = id;
+ }
+ // Line end.
+ if (pos % 16 == 15) {
+ if (with_origins) {
+ Printf(" |");
+ for (int i = 0; i < 4; ++i) {
+ char c = OriginSet::asChar(origin_ids[i]);
+ Printf("%c", c);
+ if (i != 3) Printf(" ");
+ }
+ Printf("|");
+ }
+ Printf("\n");
+ }
+ size--;
+ s++;
+ pos++;
+ }
+
+ Printf("\n");
+
+ for (int i = 0; i < origin_set.size(); ++i) {
+ u32 o = origin_set.get(i);
+ Printf("Origin %c (origin_id %x):\n", OriginSet::asChar(i), o);
+ DescribeOrigin(o);
+ }
+}
+
+void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
+ uptr offset) {
+ Decorator d;
+ Printf("%s", d.Warning());
+ Printf("%sUninitialized bytes in %s%s%s at offset %zu inside [%p, %zu)%s\n",
+ d.Warning(), d.Name(), what, d.Warning(), offset, start, size,
+ d.Default());
+ if (__sanitizer::Verbosity())
+ DescribeMemoryRange(start, size);
+}
+
+} // namespace __msan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_report.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_report.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_report.h (revision 351984)
@@ -0,0 +1,33 @@
+//===-- msan_report.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of MemorySanitizer. MSan-private header for error
+/// reporting functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_REPORT_H
+#define MSAN_REPORT_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __msan {
+
+void ReportUMR(StackTrace *stack, u32 origin);
+void ReportExpectedUMRNotFound(StackTrace *stack);
+void ReportStats();
+void ReportAtExitStatistics();
+void DescribeMemoryRange(const void *x, uptr size);
+void ReportUMRInsideAddressRange(const char *what, const void *start, uptr size,
+ uptr offset);
+
+} // namespace __msan
+
+#endif // MSAN_REPORT_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_report.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_thread.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_thread.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_thread.h (revision 351984)
@@ -0,0 +1,70 @@
+//===-- msan_thread.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef MSAN_THREAD_H
+#define MSAN_THREAD_H
+
+#include "msan_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __msan {
+
+class MsanThread {
+ public:
+ static MsanThread *Create(thread_callback_t start_routine, void *arg);
+ static void TSDDtor(void *tsd);
+ void Destroy();
+
+ void Init(); // Should be called from the thread itself.
+ thread_return_t ThreadStart();
+
+ uptr stack_top() { return stack_top_; }
+ uptr stack_bottom() { return stack_bottom_; }
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
+ bool IsMainThread() { return start_routine_ == nullptr; }
+
+ bool AddrIsInStack(uptr addr) {
+ return addr >= stack_bottom_ && addr < stack_top_;
+ }
+
+ bool InSignalHandler() { return in_signal_handler_; }
+ void EnterSignalHandler() { in_signal_handler_++; }
+ void LeaveSignalHandler() { in_signal_handler_--; }
+
+ MsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; }
+
+ int destructor_iterations_;
+
+ private:
+ // NOTE: There is no MsanThread constructor. It is allocated
+ // via mmap() and *must* be valid in zero-initialized state.
+ void SetThreadStackAndTls();
+ void ClearShadowForThreadStackAndTLS();
+ thread_callback_t start_routine_;
+ void *arg_;
+ uptr stack_top_;
+ uptr stack_bottom_;
+ uptr tls_begin_;
+ uptr tls_end_;
+
+ unsigned in_signal_handler_;
+
+ MsanThreadLocalMallocStorage malloc_storage_;
+};
+
+MsanThread *GetCurrentThread();
+void SetCurrentThread(MsanThread *t);
+
+} // namespace __msan
+
+#endif // MSAN_THREAD_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_thread.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_thread.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_thread.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_thread.cc (revision 351984)
@@ -0,0 +1,82 @@
+
+#include "msan.h"
+#include "msan_thread.h"
+#include "msan_interface_internal.h"
+
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+namespace __msan {
+
+MsanThread *MsanThread::Create(thread_callback_t start_routine,
+ void *arg) {
+ uptr PageSize = GetPageSizeCached();
+ uptr size = RoundUpTo(sizeof(MsanThread), PageSize);
+ MsanThread *thread = (MsanThread*)MmapOrDie(size, __func__);
+ thread->start_routine_ = start_routine;
+ thread->arg_ = arg;
+ thread->destructor_iterations_ = GetPthreadDestructorIterations();
+
+ return thread;
+}
+
+void MsanThread::SetThreadStackAndTls() {
+ uptr tls_size = 0;
+ uptr stack_size = 0;
+ GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size,
+ &tls_begin_, &tls_size);
+ stack_top_ = stack_bottom_ + stack_size;
+ tls_end_ = tls_begin_ + tls_size;
+
+ int local;
+ CHECK(AddrIsInStack((uptr)&local));
+}
+
+void MsanThread::ClearShadowForThreadStackAndTLS() {
+ __msan_unpoison((void *)stack_bottom_, stack_top_ - stack_bottom_);
+ if (tls_begin_ != tls_end_)
+ __msan_unpoison((void *)tls_begin_, tls_end_ - tls_begin_);
+ DTLS *dtls = DTLS_Get();
+ CHECK_NE(dtls, 0);
+ for (uptr i = 0; i < dtls->dtv_size; ++i)
+ __msan_unpoison((void *)(dtls->dtv[i].beg), dtls->dtv[i].size);
+}
+
+void MsanThread::Init() {
+ SetThreadStackAndTls();
+ CHECK(MEM_IS_APP(stack_bottom_));
+ CHECK(MEM_IS_APP(stack_top_ - 1));
+ ClearShadowForThreadStackAndTLS();
+}
+
+void MsanThread::TSDDtor(void *tsd) {
+ MsanThread *t = (MsanThread*)tsd;
+ t->Destroy();
+}
+
+void MsanThread::Destroy() {
+ malloc_storage().CommitBack();
+ // We also clear the shadow on thread destruction because
+ // some code may still be executing in later TSD destructors
+ // and we don't want it to have any poisoned stack.
+ ClearShadowForThreadStackAndTLS();
+ uptr size = RoundUpTo(sizeof(MsanThread), GetPageSizeCached());
+ UnmapOrDie(this, size);
+ DTLS_Destroy();
+}
+
+thread_return_t MsanThread::ThreadStart() {
+ Init();
+
+ if (!start_routine_) {
+ // start_routine_ == 0 if we're on the main thread or on one of the
+ // OS X libdispatch worker threads. But nobody is supposed to call
+ // ThreadStart() for the worker threads.
+ return 0;
+ }
+
+ thread_return_t res = start_routine_(arg_);
+
+ return res;
+}
+
+} // namespace __msan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_thread.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan.syms.extra
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan.syms.extra (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan.syms.extra (revision 351984)
@@ -0,0 +1,2 @@
+__msan_*
+__ubsan_*
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_blacklist.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_blacklist.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/msan/msan_blacklist.txt (revision 351984)
@@ -0,0 +1,7 @@
+# Blacklist for MemorySanitizer. Turns off instrumentation of particular
+# functions or sources. Use with care. You may set location of blacklist
+# at compile-time using -fsanitize-blacklist=<path> flag.
+
+# Example usage:
+# fun:*bad_function_name*
+# src:file_with_tricky_code.cc
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux.cc (revision 351984)
@@ -0,0 +1,2135 @@
+//===-- sanitizer_linux.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_getauxval.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#if SANITIZER_LINUX
+#include <asm/param.h>
+#endif
+
+// For mips64, syscall(__NR_stat) fills the buffer in the 'struct kernel_stat'
+// format. Struct kernel_stat is defined as 'struct stat' in asm/stat.h. To
+// access stat from asm/stat.h, without conflicting with definition in
+// sys/stat.h, we use this trick.
+#if defined(__mips64)
+#include <asm/unistd.h>
+#include <sys/types.h>
+#define stat kernel_stat
+#include <asm/stat.h>
+#undef stat
+#endif
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <link.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/param.h>
+#if !SANITIZER_SOLARIS
+#include <sys/ptrace.h>
+#endif
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#if !SANITIZER_OPENBSD
+#include <ucontext.h>
+#endif
+#if SANITIZER_OPENBSD
+#include <sys/futex.h>
+#include <sys/sysctl.h>
+#endif
+#include <unistd.h>
+
+#if SANITIZER_LINUX
+#include <sys/utsname.h>
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#include <sys/personality.h>
+#endif
+
+#if SANITIZER_FREEBSD
+#include <sys/exec.h>
+#include <sys/sysctl.h>
+#include <machine/atomic.h>
+extern "C" {
+// <sys/umtx.h> must be included after <errno.h> and <sys/types.h> on
+// FreeBSD 9.2 and 10.0.
+#include <sys/umtx.h>
+}
+#include <sys/thr.h>
+#endif // SANITIZER_FREEBSD
+
+#if SANITIZER_NETBSD
+#include <limits.h> // For NAME_MAX
+#include <sys/sysctl.h>
+#include <sys/exec.h>
+extern struct ps_strings *__ps_strings;
+#endif // SANITIZER_NETBSD
+
+#if SANITIZER_SOLARIS
+#include <stdlib.h>
+#include <thread.h>
+#define environ _environ
+#endif
+
+extern char **environ;
+
+#if SANITIZER_LINUX
+// <linux/time.h>
+struct kernel_timeval {
+ long tv_sec;
+ long tv_usec;
+};
+
+// <linux/futex.h> is broken on some linux distributions.
+const int FUTEX_WAIT = 0;
+const int FUTEX_WAKE = 1;
+const int FUTEX_PRIVATE_FLAG = 128;
+const int FUTEX_WAIT_PRIVATE = FUTEX_WAIT | FUTEX_PRIVATE_FLAG;
+const int FUTEX_WAKE_PRIVATE = FUTEX_WAKE | FUTEX_PRIVATE_FLAG;
+#endif // SANITIZER_LINUX
+
+// Are we using 32-bit or 64-bit Linux syscalls?
+// x32 (which defines __x86_64__) has SANITIZER_WORDSIZE == 32
+// but it still needs to use 64-bit syscalls.
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__powerpc64__) || \
+ SANITIZER_WORDSIZE == 64)
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 1
+#else
+# define SANITIZER_LINUX_USES_64BIT_SYSCALLS 0
+#endif
+
+// Note : FreeBSD had implemented both
+// Linux and OpenBSD apis, available from
+// future 12.x version most likely
+#if SANITIZER_LINUX && defined(__NR_getrandom)
+# if !defined(GRND_NONBLOCK)
+# define GRND_NONBLOCK 1
+# endif
+# define SANITIZER_USE_GETRANDOM 1
+#else
+# define SANITIZER_USE_GETRANDOM 0
+#endif // SANITIZER_LINUX && defined(__NR_getrandom)
+
+#if SANITIZER_OPENBSD
+# define SANITIZER_USE_GETENTROPY 1
+#else
+# if SANITIZER_FREEBSD && __FreeBSD_version >= 1200000
+# define SANITIZER_USE_GETENTROPY 1
+# else
+# define SANITIZER_USE_GETENTROPY 0
+# endif
+#endif // SANITIZER_USE_GETENTROPY
+
+namespace __sanitizer {
+
+#if SANITIZER_LINUX && defined(__x86_64__)
+#include "sanitizer_syscall_linux_x86_64.inc"
+#elif SANITIZER_LINUX && defined(__aarch64__)
+#include "sanitizer_syscall_linux_aarch64.inc"
+#elif SANITIZER_LINUX && defined(__arm__)
+#include "sanitizer_syscall_linux_arm.inc"
+#else
+#include "sanitizer_syscall_generic.inc"
+#endif
+
+// --------------- sanitizer_libc.h
+#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+#if !SANITIZER_S390 && !SANITIZER_OPENBSD
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+ OFF_T offset) {
+#if SANITIZER_FREEBSD || SANITIZER_LINUX_USES_64BIT_SYSCALLS
+ return internal_syscall(SYSCALL(mmap), (uptr)addr, length, prot, flags, fd,
+ offset);
+#else
+ // mmap2 specifies file offset in 4096-byte units.
+ CHECK(IsAligned(offset, 4096));
+ return internal_syscall(SYSCALL(mmap2), addr, length, prot, flags, fd,
+ offset / 4096);
+#endif
+}
+#endif // !SANITIZER_S390 && !SANITIZER_OPENBSD
+
+#if !SANITIZER_OPENBSD
+uptr internal_munmap(void *addr, uptr length) {
+ return internal_syscall(SYSCALL(munmap), (uptr)addr, length);
+}
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+ return internal_syscall(SYSCALL(mprotect), (uptr)addr, length, prot);
+}
+#endif
+
+uptr internal_close(fd_t fd) {
+ return internal_syscall(SYSCALL(close), fd);
+}
+
+uptr internal_open(const char *filename, int flags) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags);
+#endif
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(openat), AT_FDCWD, (uptr)filename, flags,
+ mode);
+#else
+ return internal_syscall(SYSCALL(open), (uptr)filename, flags, mode);
+#endif
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ sptr res;
+ HANDLE_EINTR(res,
+ (sptr)internal_syscall(SYSCALL(read), fd, (uptr)buf, count));
+ return res;
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ sptr res;
+ HANDLE_EINTR(res,
+ (sptr)internal_syscall(SYSCALL(write), fd, (uptr)buf, count));
+ return res;
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ sptr res;
+ HANDLE_EINTR(res, (sptr)internal_syscall(SYSCALL(ftruncate), fd,
+ (OFF_T)size));
+ return res;
+}
+
+#if !SANITIZER_LINUX_USES_64BIT_SYSCALLS && SANITIZER_LINUX
+static void stat64_to_stat(struct stat64 *in, struct stat *out) {
+ internal_memset(out, 0, sizeof(*out));
+ out->st_dev = in->st_dev;
+ out->st_ino = in->st_ino;
+ out->st_mode = in->st_mode;
+ out->st_nlink = in->st_nlink;
+ out->st_uid = in->st_uid;
+ out->st_gid = in->st_gid;
+ out->st_rdev = in->st_rdev;
+ out->st_size = in->st_size;
+ out->st_blksize = in->st_blksize;
+ out->st_blocks = in->st_blocks;
+ out->st_atime = in->st_atime;
+ out->st_mtime = in->st_mtime;
+ out->st_ctime = in->st_ctime;
+}
+#endif
+
+#if defined(__mips64)
+// Undefine compatibility macros from <sys/stat.h>
+// so that they would not clash with the kernel_stat
+// st_[a|m|c]time fields
+#undef st_atime
+#undef st_mtime
+#undef st_ctime
+#if defined(SANITIZER_ANDROID)
+// Bionic sys/stat.h defines additional macros
+// for compatibility with the old NDKs and
+// they clash with the kernel_stat structure
+// st_[a|m|c]time_nsec fields.
+#undef st_atime_nsec
+#undef st_mtime_nsec
+#undef st_ctime_nsec
+#endif
+static void kernel_stat_to_stat(struct kernel_stat *in, struct stat *out) {
+ internal_memset(out, 0, sizeof(*out));
+ out->st_dev = in->st_dev;
+ out->st_ino = in->st_ino;
+ out->st_mode = in->st_mode;
+ out->st_nlink = in->st_nlink;
+ out->st_uid = in->st_uid;
+ out->st_gid = in->st_gid;
+ out->st_rdev = in->st_rdev;
+ out->st_size = in->st_size;
+ out->st_blksize = in->st_blksize;
+ out->st_blocks = in->st_blocks;
+#if defined(__USE_MISC) || \
+ defined(__USE_XOPEN2K8) || \
+ defined(SANITIZER_ANDROID)
+ out->st_atim.tv_sec = in->st_atime;
+ out->st_atim.tv_nsec = in->st_atime_nsec;
+ out->st_mtim.tv_sec = in->st_mtime;
+ out->st_mtim.tv_nsec = in->st_mtime_nsec;
+ out->st_ctim.tv_sec = in->st_ctime;
+ out->st_ctim.tv_nsec = in->st_ctime_nsec;
+#else
+ out->st_atime = in->st_atime;
+ out->st_atimensec = in->st_atime_nsec;
+ out->st_mtime = in->st_mtime;
+ out->st_mtimensec = in->st_mtime_nsec;
+ out->st_ctime = in->st_ctime;
+ out->st_atimensec = in->st_ctime_nsec;
+#endif
+}
+#endif
+
+uptr internal_stat(const char *path, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf, 0);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
+ 0);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if defined(__mips64)
+ // For mips64, stat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(stat), path, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
+ return internal_syscall(SYSCALL(stat), (uptr)path, (uptr)buf);
+# endif
+#else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(stat64), path, &buf64);
+ stat64_to_stat(&buf64, (struct stat *)buf);
+ return res;
+#endif
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(fstatat), AT_FDCWD, (uptr)path, (uptr)buf,
+ AT_SYMLINK_NOFOLLOW);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(newfstatat), AT_FDCWD, (uptr)path, (uptr)buf,
+ AT_SYMLINK_NOFOLLOW);
+#elif SANITIZER_LINUX_USES_64BIT_SYSCALLS
+# if SANITIZER_MIPS64
+ // For mips64, lstat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(lstat), path, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
+ return internal_syscall(SYSCALL(lstat), (uptr)path, (uptr)buf);
+# endif
+#else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(lstat64), path, &buf64);
+ stat64_to_stat(&buf64, (struct stat *)buf);
+ return res;
+#endif
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD || \
+ SANITIZER_LINUX_USES_64BIT_SYSCALLS
+#if SANITIZER_MIPS64 && !SANITIZER_OPENBSD
+ // For mips64, fstat syscall fills buffer in the format of kernel_stat
+ struct kernel_stat kbuf;
+ int res = internal_syscall(SYSCALL(fstat), fd, &kbuf);
+ kernel_stat_to_stat(&kbuf, (struct stat *)buf);
+ return res;
+# else
+ return internal_syscall(SYSCALL(fstat), fd, (uptr)buf);
+# endif
+#else
+ struct stat64 buf64;
+ int res = internal_syscall(SYSCALL(fstat64), fd, &buf64);
+ stat64_to_stat(&buf64, (struct stat *)buf);
+ return res;
+#endif
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+uptr internal_dup(int oldfd) {
+ return internal_syscall(SYSCALL(dup), oldfd);
+}
+
+uptr internal_dup2(int oldfd, int newfd) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(dup3), oldfd, newfd, 0);
+#else
+ return internal_syscall(SYSCALL(dup2), oldfd, newfd);
+#endif
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
+ bufsize);
+#elif SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(readlinkat), AT_FDCWD, (uptr)path, (uptr)buf,
+ bufsize);
+#else
+ return internal_syscall(SYSCALL(readlink), (uptr)path, (uptr)buf, bufsize);
+#endif
+}
+
+uptr internal_unlink(const char *path) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(unlinkat), AT_FDCWD, (uptr)path, 0);
+#else
+ return internal_syscall(SYSCALL(unlink), (uptr)path);
+#endif
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(renameat), AT_FDCWD, (uptr)oldpath, AT_FDCWD,
+ (uptr)newpath);
+#else
+ return internal_syscall(SYSCALL(rename), (uptr)oldpath, (uptr)newpath);
+#endif
+}
+
+uptr internal_sched_yield() {
+ return internal_syscall(SYSCALL(sched_yield));
+}
+
+void internal__exit(int exitcode) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ internal_syscall(SYSCALL(exit), exitcode);
+#else
+ internal_syscall(SYSCALL(exit_group), exitcode);
+#endif
+ Die(); // Unreachable.
+}
+
+unsigned int internal_sleep(unsigned int seconds) {
+ struct timespec ts;
+ ts.tv_sec = seconds;
+ ts.tv_nsec = 0;
+ int res = internal_syscall(SYSCALL(nanosleep), &ts, &ts);
+ if (res) return ts.tv_sec;
+ return 0;
+}
+
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]) {
+ return internal_syscall(SYSCALL(execve), (uptr)filename, (uptr)argv,
+ (uptr)envp);
+}
+#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+
+// ----------------- sanitizer_common.h
+bool FileExists(const char *filename) {
+ if (ShouldMockFailureToOpen(filename))
+ return false;
+ struct stat st;
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ if (internal_syscall(SYSCALL(newfstatat), AT_FDCWD, filename, &st, 0))
+#else
+ if (internal_stat(filename, &st))
+#endif
+ return false;
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+#if !SANITIZER_NETBSD
+tid_t GetTid() {
+#if SANITIZER_FREEBSD
+ long Tid;
+ thr_self(&Tid);
+ return Tid;
+#elif SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(getthrid));
+#elif SANITIZER_SOLARIS
+ return thr_self();
+#else
+ return internal_syscall(SYSCALL(gettid));
+#endif
+}
+
+int TgKill(pid_t pid, tid_t tid, int sig) {
+#if SANITIZER_LINUX
+ return internal_syscall(SYSCALL(tgkill), pid, tid, sig);
+#elif SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(thr_kill2), pid, tid, sig);
+#elif SANITIZER_OPENBSD
+ (void)pid;
+ return internal_syscall(SYSCALL(thrkill), tid, sig, nullptr);
+#elif SANITIZER_SOLARIS
+ (void)pid;
+ return thr_kill(tid, sig);
+#endif
+}
+#endif
+
+#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+u64 NanoTime() {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ timeval tv;
+#else
+ kernel_timeval tv;
+#endif
+ internal_memset(&tv, 0, sizeof(tv));
+ internal_syscall(SYSCALL(gettimeofday), &tv, 0);
+ return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
+}
+
+uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
+ return internal_syscall(SYSCALL(clock_gettime), clk_id, tp);
+}
+#endif // !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+
+// Like getenv, but reads env directly from /proc (on Linux) or parses the
+// 'environ' array (on some others) and does not use libc. This function
+// should be called first inside __asan_init.
+const char *GetEnv(const char *name) {
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || \
+ SANITIZER_SOLARIS
+ if (::environ != 0) {
+ uptr NameLen = internal_strlen(name);
+ for (char **Env = ::environ; *Env != 0; Env++) {
+ if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
+ return (*Env) + NameLen + 1;
+ }
+ }
+ return 0; // Not found.
+#elif SANITIZER_LINUX
+ static char *environ;
+ static uptr len;
+ static bool inited;
+ if (!inited) {
+ inited = true;
+ uptr environ_size;
+ if (!ReadFileToBuffer("/proc/self/environ", &environ, &environ_size, &len))
+ environ = nullptr;
+ }
+ if (!environ || len == 0) return nullptr;
+ uptr namelen = internal_strlen(name);
+ const char *p = environ;
+ while (*p != '\0') { // will happen at the \0\0 that terminates the buffer
+ // proc file has the format NAME=value\0NAME=value\0NAME=value\0...
+ const char* endp =
+ (char*)internal_memchr(p, '\0', len - (p - environ));
+ if (!endp) // this entry isn't NUL terminated
+ return nullptr;
+ else if (!internal_memcmp(p, name, namelen) && p[namelen] == '=') // Match.
+ return p + namelen + 1; // point after =
+ p = endp + 1;
+ }
+ return nullptr; // Not found.
+#else
+#error "Unsupported platform"
+#endif
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD && !SANITIZER_OPENBSD
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE extern void *__libc_stack_end;
+}
+#endif
+
+#if !SANITIZER_GO && !SANITIZER_FREEBSD && !SANITIZER_NETBSD && \
+ !SANITIZER_OPENBSD
+static void ReadNullSepFileToArray(const char *path, char ***arr,
+ int arr_size) {
+ char *buff;
+ uptr buff_size;
+ uptr buff_len;
+ *arr = (char **)MmapOrDie(arr_size * sizeof(char *), "NullSepFileArray");
+ if (!ReadFileToBuffer(path, &buff, &buff_size, &buff_len, 1024 * 1024)) {
+ (*arr)[0] = nullptr;
+ return;
+ }
+ (*arr)[0] = buff;
+ int count, i;
+ for (count = 1, i = 1; ; i++) {
+ if (buff[i] == 0) {
+ if (buff[i+1] == 0) break;
+ (*arr)[count] = &buff[i+1];
+ CHECK_LE(count, arr_size - 1); // FIXME: make this more flexible.
+ count++;
+ }
+ }
+ (*arr)[count] = nullptr;
+}
+#endif
+
+#if !SANITIZER_OPENBSD
+static void GetArgsAndEnv(char ***argv, char ***envp) {
+#if SANITIZER_FREEBSD
+ // On FreeBSD, retrieving the argument and environment arrays is done via the
+ // kern.ps_strings sysctl, which returns a pointer to a structure containing
+ // this information. See also <sys/exec.h>.
+ ps_strings *pss;
+ uptr sz = sizeof(pss);
+ if (internal_sysctlbyname("kern.ps_strings", &pss, &sz, NULL, 0) == -1) {
+ Printf("sysctl kern.ps_strings failed\n");
+ Die();
+ }
+ *argv = pss->ps_argvstr;
+ *envp = pss->ps_envstr;
+#elif SANITIZER_NETBSD
+ *argv = __ps_strings->ps_argvstr;
+ *envp = __ps_strings->ps_envstr;
+#else // SANITIZER_FREEBSD
+#if !SANITIZER_GO
+ if (&__libc_stack_end) {
+#endif // !SANITIZER_GO
+ uptr* stack_end = (uptr*)__libc_stack_end;
+ int argc = *stack_end;
+ *argv = (char**)(stack_end + 1);
+ *envp = (char**)(stack_end + argc + 2);
+#if !SANITIZER_GO
+ } else {
+ static const int kMaxArgv = 2000, kMaxEnvp = 2000;
+ ReadNullSepFileToArray("/proc/self/cmdline", argv, kMaxArgv);
+ ReadNullSepFileToArray("/proc/self/environ", envp, kMaxEnvp);
+ }
+#endif // !SANITIZER_GO
+#endif // SANITIZER_FREEBSD
+}
+
+char **GetArgv() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ return argv;
+}
+
+char **GetEnviron() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ return envp;
+}
+
+#endif // !SANITIZER_OPENBSD
+
+#if !SANITIZER_SOLARIS
+enum MutexState {
+ MtxUnlocked = 0,
+ MtxLocked = 1,
+ MtxSleeping = 2
+};
+
+BlockingMutex::BlockingMutex() {
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ CHECK_EQ(owner_, 0);
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAIT_UINT, MtxSleeping, 0, 0);
+#elif SANITIZER_NETBSD
+ sched_yield(); /* No userspace futex-like synchronization */
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAIT_PRIVATE, MtxSleeping,
+ 0, 0, 0);
+#endif
+ }
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
+ CHECK_NE(v, MtxUnlocked);
+ if (v == MtxSleeping) {
+#if SANITIZER_FREEBSD
+ _umtx_op(m, UMTX_OP_WAKE, 1, 0, 0);
+#elif SANITIZER_NETBSD
+ /* No userspace futex-like synchronization */
+#else
+ internal_syscall(SYSCALL(futex), (uptr)m, FUTEX_WAKE_PRIVATE, 1, 0, 0, 0);
+#endif
+ }
+}
+
+void BlockingMutex::CheckLocked() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
+}
+#endif // !SANITIZER_SOLARIS
+
+// ----------------- sanitizer_linux.h
+// The actual size of this structure is specified by d_reclen.
+// Note that getdents64 uses a different structure format. We only provide the
+// 32-bit syscall here.
+#if SANITIZER_NETBSD
+// Not used
+#elif SANITIZER_OPENBSD
+// struct dirent is different for Linux and us. At this moment, we use only
+// d_fileno (Linux call this d_ino), d_reclen, and d_name.
+struct linux_dirent {
+ u64 d_ino; // d_fileno
+ u16 d_reclen;
+ u16 d_namlen; // not used
+ u8 d_type; // not used
+ char d_name[NAME_MAX + 1];
+};
+#else
+struct linux_dirent {
+#if SANITIZER_X32 || defined(__aarch64__)
+ u64 d_ino;
+ u64 d_off;
+#else
+ unsigned long d_ino;
+ unsigned long d_off;
+#endif
+ unsigned short d_reclen;
+#ifdef __aarch64__
+ unsigned char d_type;
+#endif
+ char d_name[256];
+};
+#endif
+
+#if !SANITIZER_SOLARIS && !SANITIZER_NETBSD
+// Syscall wrappers.
+uptr internal_ptrace(int request, int pid, void *addr, void *data) {
+ return internal_syscall(SYSCALL(ptrace), request, pid, (uptr)addr,
+ (uptr)data);
+}
+
+uptr internal_waitpid(int pid, int *status, int options) {
+ return internal_syscall(SYSCALL(wait4), pid, (uptr)status, options,
+ 0 /* rusage */);
+}
+
+uptr internal_getpid() {
+ return internal_syscall(SYSCALL(getpid));
+}
+
+uptr internal_getppid() {
+ return internal_syscall(SYSCALL(getppid));
+}
+
+uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count) {
+#if SANITIZER_FREEBSD
+ return internal_syscall(SYSCALL(getdirentries), fd, (uptr)dirp, count, NULL);
+#elif SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(getdents64), fd, (uptr)dirp, count);
+#else
+ return internal_syscall(SYSCALL(getdents), fd, (uptr)dirp, count);
+#endif
+}
+
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
+ return internal_syscall(SYSCALL(lseek), fd, offset, whence);
+}
+
+#if SANITIZER_LINUX
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
+ return internal_syscall(SYSCALL(prctl), option, arg2, arg3, arg4, arg5);
+}
+#endif
+
+uptr internal_sigaltstack(const void *ss, void *oss) {
+ return internal_syscall(SYSCALL(sigaltstack), (uptr)ss, (uptr)oss);
+}
+
+int internal_fork() {
+#if SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+ return internal_syscall(SYSCALL(clone), SIGCHLD, 0);
+#else
+ return internal_syscall(SYSCALL(fork));
+#endif
+}
+
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
+ uptr *oldlenp, const void *newp, uptr newlen) {
+#if SANITIZER_OPENBSD
+ return sysctl(name, namelen, oldp, (size_t *)oldlenp, (void *)newp,
+ (size_t)newlen);
+#else
+ return internal_syscall(SYSCALL(__sysctl), name, namelen, oldp,
+ (size_t *)oldlenp, newp, (size_t)newlen);
+#endif
+}
+
+#if SANITIZER_FREEBSD
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen) {
+ static decltype(sysctlbyname) *real = nullptr;
+ if (!real)
+ real = (decltype(sysctlbyname) *)dlsym(RTLD_NEXT, "sysctlbyname");
+ CHECK(real);
+ return real(sname, oldp, (size_t *)oldlenp, newp, (size_t)newlen);
+}
+#endif
+#endif
+
+#if SANITIZER_LINUX
+#define SA_RESTORER 0x04000000
+// Doesn't set sa_restorer if the caller did not set it, so use with caution
+//(see below).
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact) {
+ __sanitizer_kernel_sigaction_t k_act, k_oldact;
+ internal_memset(&k_act, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ internal_memset(&k_oldact, 0, sizeof(__sanitizer_kernel_sigaction_t));
+ const __sanitizer_sigaction *u_act = (const __sanitizer_sigaction *)act;
+ __sanitizer_sigaction *u_oldact = (__sanitizer_sigaction *)oldact;
+ if (u_act) {
+ k_act.handler = u_act->handler;
+ k_act.sigaction = u_act->sigaction;
+ internal_memcpy(&k_act.sa_mask, &u_act->sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ // Without SA_RESTORER kernel ignores the calls (probably returns EINVAL).
+ k_act.sa_flags = u_act->sa_flags | SA_RESTORER;
+ // FIXME: most often sa_restorer is unset, however the kernel requires it
+ // to point to a valid signal restorer that calls the rt_sigreturn syscall.
+ // If sa_restorer passed to the kernel is NULL, the program may crash upon
+ // signal delivery or fail to unwind the stack in the signal handler.
+ // libc implementation of sigaction() passes its own restorer to
+ // rt_sigaction, so we need to do the same (we'll need to reimplement the
+ // restorers; for x86_64 the restorer address can be obtained from
+ // oldact->sa_restorer upon a call to sigaction(xxx, NULL, oldact).
+#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+ k_act.sa_restorer = u_act->sa_restorer;
+#endif
+ }
+
+ uptr result = internal_syscall(SYSCALL(rt_sigaction), (uptr)signum,
+ (uptr)(u_act ? &k_act : nullptr),
+ (uptr)(u_oldact ? &k_oldact : nullptr),
+ (uptr)sizeof(__sanitizer_kernel_sigset_t));
+
+ if ((result == 0) && u_oldact) {
+ u_oldact->handler = k_oldact.handler;
+ u_oldact->sigaction = k_oldact.sigaction;
+ internal_memcpy(&u_oldact->sa_mask, &k_oldact.sa_mask,
+ sizeof(__sanitizer_kernel_sigset_t));
+ u_oldact->sa_flags = k_oldact.sa_flags;
+#if !SANITIZER_ANDROID || !SANITIZER_MIPS32
+ u_oldact->sa_restorer = k_oldact.sa_restorer;
+#endif
+ }
+ return result;
+}
+#endif // SANITIZER_LINUX
+
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+#if SANITIZER_FREEBSD || SANITIZER_OPENBSD
+ return internal_syscall(SYSCALL(sigprocmask), how, set, oldset);
+#else
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ __sanitizer_kernel_sigset_t *k_oldset = (__sanitizer_kernel_sigset_t *)oldset;
+ return internal_syscall(SYSCALL(rt_sigprocmask), (uptr)how,
+ (uptr)&k_set->sig[0], (uptr)&k_oldset->sig[0],
+ sizeof(__sanitizer_kernel_sigset_t));
+#endif
+}
+
+void internal_sigfillset(__sanitizer_sigset_t *set) {
+ internal_memset(set, 0xff, sizeof(*set));
+}
+
+void internal_sigemptyset(__sanitizer_sigset_t *set) {
+ internal_memset(set, 0, sizeof(*set));
+}
+
+#if SANITIZER_LINUX
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
+ signum -= 1;
+ CHECK_GE(signum, 0);
+ CHECK_LT(signum, sizeof(*set) * 8);
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
+ const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
+ k_set->sig[idx] &= ~(1 << bit);
+}
+
+bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
+ signum -= 1;
+ CHECK_GE(signum, 0);
+ CHECK_LT(signum, sizeof(*set) * 8);
+ __sanitizer_kernel_sigset_t *k_set = (__sanitizer_kernel_sigset_t *)set;
+ const uptr idx = signum / (sizeof(k_set->sig[0]) * 8);
+ const uptr bit = signum % (sizeof(k_set->sig[0]) * 8);
+ return k_set->sig[idx] & (1 << bit);
+}
+#elif SANITIZER_FREEBSD
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum) {
+ sigset_t *rset = reinterpret_cast<sigset_t *>(set);
+ sigdelset(rset, signum);
+}
+
+bool internal_sigismember(__sanitizer_sigset_t *set, int signum) {
+ sigset_t *rset = reinterpret_cast<sigset_t *>(set);
+ return sigismember(rset, signum);
+}
+#endif
+#endif // !SANITIZER_SOLARIS
+
+#if !SANITIZER_NETBSD
+// ThreadLister implementation.
+ThreadLister::ThreadLister(pid_t pid) : pid_(pid), buffer_(4096) {
+ char task_directory_path[80];
+ internal_snprintf(task_directory_path, sizeof(task_directory_path),
+ "/proc/%d/task/", pid);
+ descriptor_ = internal_open(task_directory_path, O_RDONLY | O_DIRECTORY);
+ if (internal_iserror(descriptor_)) {
+ Report("Can't open /proc/%d/task for reading.\n", pid);
+ }
+}
+
+ThreadLister::Result ThreadLister::ListThreads(
+ InternalMmapVector<tid_t> *threads) {
+ if (internal_iserror(descriptor_))
+ return Error;
+ internal_lseek(descriptor_, 0, SEEK_SET);
+ threads->clear();
+
+ Result result = Ok;
+ for (bool first_read = true;; first_read = false) {
+ // Resize to max capacity if it was downsized by IsAlive.
+ buffer_.resize(buffer_.capacity());
+ CHECK_GE(buffer_.size(), 4096);
+ uptr read = internal_getdents(
+ descriptor_, (struct linux_dirent *)buffer_.data(), buffer_.size());
+ if (!read)
+ return result;
+ if (internal_iserror(read)) {
+ Report("Can't read directory entries from /proc/%d/task.\n", pid_);
+ return Error;
+ }
+
+ for (uptr begin = (uptr)buffer_.data(), end = begin + read; begin < end;) {
+ struct linux_dirent *entry = (struct linux_dirent *)begin;
+ begin += entry->d_reclen;
+ if (entry->d_ino == 1) {
+ // Inode 1 is for bad blocks and also can be a reason for early return.
+ // Should be emitted if kernel tried to output terminating thread.
+ // See proc_task_readdir implementation in Linux.
+ result = Incomplete;
+ }
+ if (entry->d_ino && *entry->d_name >= '0' && *entry->d_name <= '9')
+ threads->push_back(internal_atoll(entry->d_name));
+ }
+
+ // Now we are going to detect short-read or early EOF. In such cases Linux
+ // can return inconsistent list with missing alive threads.
+ // Code will just remember that the list can be incomplete but it will
+ // continue reads to return as much as possible.
+ if (!first_read) {
+ // The first one was a short-read by definition.
+ result = Incomplete;
+ } else if (read > buffer_.size() - 1024) {
+ // Read was close to the buffer size. So double the size and assume the
+ // worst.
+ buffer_.resize(buffer_.size() * 2);
+ result = Incomplete;
+ } else if (!threads->empty() && !IsAlive(threads->back())) {
+ // Maybe Linux early returned from read on terminated thread (!pid_alive)
+ // and failed to restore read position.
+ // See next_tid and proc_task_instantiate in Linux.
+ result = Incomplete;
+ }
+ }
+}
+
+bool ThreadLister::IsAlive(int tid) {
+ // /proc/%d/task/%d/status uses same call to detect alive threads as
+ // proc_task_readdir. See task_state implementation in Linux.
+ char path[80];
+ internal_snprintf(path, sizeof(path), "/proc/%d/task/%d/status", pid_, tid);
+ if (!ReadFileToVector(path, &buffer_) || buffer_.empty())
+ return false;
+ buffer_.push_back(0);
+ static const char kPrefix[] = "\nPPid:";
+ const char *field = internal_strstr(buffer_.data(), kPrefix);
+ if (!field)
+ return false;
+ field += internal_strlen(kPrefix);
+ return (int)internal_atoll(field) != 0;
+}
+
+ThreadLister::~ThreadLister() {
+ if (!internal_iserror(descriptor_))
+ internal_close(descriptor_);
+}
+#endif
+
+#if SANITIZER_WORDSIZE == 32
+// Take care of unusable kernel area in top gigabyte.
+static uptr GetKernelAreaSize() {
+#if SANITIZER_LINUX && !SANITIZER_X32
+ const uptr gbyte = 1UL << 30;
+
+ // Firstly check if there are writable segments
+ // mapped to top gigabyte (e.g. stack).
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ if (proc_maps.Error())
+ return 0;
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if ((segment.end >= 3 * gbyte) && segment.IsWritable()) return 0;
+ }
+
+#if !SANITIZER_ANDROID
+ // Even if nothing is mapped, top Gb may still be accessible
+ // if we are running on 64-bit kernel.
+ // Uname may report misleading results if personality type
+ // is modified (e.g. under schroot) so check this as well.
+ struct utsname uname_info;
+ int pers = personality(0xffffffffUL);
+ if (!(pers & PER_MASK)
+ && uname(&uname_info) == 0
+ && internal_strstr(uname_info.machine, "64"))
+ return 0;
+#endif // SANITIZER_ANDROID
+
+ // Top gigabyte is reserved for kernel.
+ return gbyte;
+#else
+ return 0;
+#endif // SANITIZER_LINUX && !SANITIZER_X32
+}
+#endif // SANITIZER_WORDSIZE == 32
+
+uptr GetMaxVirtualAddress() {
+#if (SANITIZER_NETBSD || SANITIZER_OPENBSD) && defined(__x86_64__)
+ return 0x7f7ffffff000ULL; // (0x00007f8000000000 - PAGE_SIZE)
+#elif SANITIZER_WORDSIZE == 64
+# if defined(__powerpc64__) || defined(__aarch64__)
+ // On PowerPC64 we have two different address space layouts: 44- and 46-bit.
+ // We somehow need to figure out which one we are using now and choose
+ // one of 0x00000fffffffffffUL and 0x00003fffffffffffUL.
+ // Note that with 'ulimit -s unlimited' the stack is moved away from the top
+ // of the address space, so simply checking the stack address is not enough.
+ // This should (does) work for both PowerPC64 Endian modes.
+ // Similarly, aarch64 has multiple address space layouts: 39, 42 and 47-bit.
+ return (1ULL << (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1)) - 1;
+# elif defined(__mips64)
+ return (1ULL << 40) - 1; // 0x000000ffffffffffUL;
+# elif defined(__s390x__)
+ return (1ULL << 53) - 1; // 0x001fffffffffffffUL;
+#elif defined(__sparc__)
+ return ~(uptr)0;
+# else
+ return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
+# endif
+#else // SANITIZER_WORDSIZE == 32
+# if defined(__s390__)
+ return (1ULL << 31) - 1; // 0x7fffffff;
+# else
+ return (1ULL << 32) - 1; // 0xffffffff;
+# endif
+#endif // SANITIZER_WORDSIZE
+}
+
+uptr GetMaxUserVirtualAddress() {
+ uptr addr = GetMaxVirtualAddress();
+#if SANITIZER_WORDSIZE == 32 && !defined(__s390__)
+ if (!common_flags()->full_address_space)
+ addr -= GetKernelAreaSize();
+ CHECK_LT(reinterpret_cast<uptr>(&addr), addr);
+#endif
+ return addr;
+}
+
+#if !SANITIZER_ANDROID
+uptr GetPageSize() {
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__i386__))
+ return EXEC_PAGESIZE;
+#elif SANITIZER_USE_GETAUXVAL
+ return getauxval(AT_PAGESZ);
+#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
+// Use sysctl as sysconf can trigger interceptors internally.
+ int pz = 0;
+ uptr pzl = sizeof(pz);
+ int mib[2] = {CTL_HW, HW_PAGESIZE};
+ int rv = internal_sysctl(mib, 2, &pz, &pzl, nullptr, 0);
+ CHECK_EQ(rv, 0);
+ return (uptr)pz;
+#else
+ return sysconf(_SC_PAGESIZE); // EXEC_PAGESIZE may not be trustworthy.
+#endif
+}
+#endif // !SANITIZER_ANDROID
+
+#if !SANITIZER_OPENBSD
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+#if SANITIZER_SOLARIS
+ const char *default_module_name = getexecname();
+ CHECK_NE(default_module_name, NULL);
+ return internal_snprintf(buf, buf_len, "%s", default_module_name);
+#else
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD
+#if SANITIZER_FREEBSD
+ const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PATHNAME, -1};
+#else
+ const int Mib[4] = {CTL_KERN, KERN_PROC_ARGS, -1, KERN_PROC_PATHNAME};
+#endif
+ const char *default_module_name = "kern.proc.pathname";
+ uptr Size = buf_len;
+ bool IsErr =
+ (internal_sysctl(Mib, ARRAY_SIZE(Mib), buf, &Size, NULL, 0) != 0);
+ int readlink_error = IsErr ? errno : 0;
+ uptr module_name_len = Size;
+#else
+ const char *default_module_name = "/proc/self/exe";
+ uptr module_name_len = internal_readlink(
+ default_module_name, buf, buf_len);
+ int readlink_error;
+ bool IsErr = internal_iserror(module_name_len, &readlink_error);
+#endif // SANITIZER_SOLARIS
+ if (IsErr) {
+ // We can't read binary name for some reason, assume it's unknown.
+ Report("WARNING: reading executable name failed with errno %d, "
+ "some stack frames may not be symbolized\n", readlink_error);
+ module_name_len = internal_snprintf(buf, buf_len, "%s",
+ default_module_name);
+ CHECK_LT(module_name_len, buf_len);
+ }
+ return module_name_len;
+#endif
+}
+#endif // !SANITIZER_OPENBSD
+
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
+#if SANITIZER_LINUX
+ char *tmpbuf;
+ uptr tmpsize;
+ uptr tmplen;
+ if (ReadFileToBuffer("/proc/self/cmdline", &tmpbuf, &tmpsize, &tmplen,
+ 1024 * 1024)) {
+ internal_strncpy(buf, tmpbuf, buf_len);
+ UnmapOrDie(tmpbuf, tmpsize);
+ return internal_strlen(buf);
+ }
+#endif
+ return ReadBinaryName(buf, buf_len);
+}
+
+// Match full names of the form /path/to/base_name{-,.}*
+bool LibraryNameIs(const char *full_name, const char *base_name) {
+ const char *name = full_name;
+ // Strip path.
+ while (*name != '\0') name++;
+ while (name > full_name && *name != '/') name--;
+ if (*name == '/') name++;
+ uptr base_name_length = internal_strlen(base_name);
+ if (internal_strncmp(name, base_name, base_name_length)) return false;
+ return (name[base_name_length] == '-' || name[base_name_length] == '.');
+}
+
+#if !SANITIZER_ANDROID
+// Call cb for each region mapped by map.
+void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr)) {
+ CHECK_NE(map, nullptr);
+#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+ typedef ElfW(Phdr) Elf_Phdr;
+ typedef ElfW(Ehdr) Elf_Ehdr;
+#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+ char *base = (char *)map->l_addr;
+ Elf_Ehdr *ehdr = (Elf_Ehdr *)base;
+ char *phdrs = base + ehdr->e_phoff;
+ char *phdrs_end = phdrs + ehdr->e_phnum * ehdr->e_phentsize;
+
+ // Find the segment with the minimum base so we can "relocate" the p_vaddr
+ // fields. Typically ET_DYN objects (DSOs) have base of zero and ET_EXEC
+ // objects have a non-zero base.
+ uptr preferred_base = (uptr)-1;
+ for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {
+ Elf_Phdr *phdr = (Elf_Phdr *)iter;
+ if (phdr->p_type == PT_LOAD && preferred_base > (uptr)phdr->p_vaddr)
+ preferred_base = (uptr)phdr->p_vaddr;
+ }
+
+ // Compute the delta from the real base to get a relocation delta.
+ sptr delta = (uptr)base - preferred_base;
+ // Now we can figure out what the loader really mapped.
+ for (char *iter = phdrs; iter != phdrs_end; iter += ehdr->e_phentsize) {
+ Elf_Phdr *phdr = (Elf_Phdr *)iter;
+ if (phdr->p_type == PT_LOAD) {
+ uptr seg_start = phdr->p_vaddr + delta;
+ uptr seg_end = seg_start + phdr->p_memsz;
+ // None of these values are aligned. We consider the ragged edges of the
+ // load command as defined, since they are mapped from the file.
+ seg_start = RoundDownTo(seg_start, GetPageSizeCached());
+ seg_end = RoundUpTo(seg_end, GetPageSizeCached());
+ cb((void *)seg_start, seg_end - seg_start);
+ }
+ }
+}
+#endif
+
+#if defined(__x86_64__) && SANITIZER_LINUX
+// We cannot use glibc's clone wrapper, because it messes with the child
+// task's TLS. It writes the PID and TID of the child task to its thread
+// descriptor, but in our case the child task shares the thread descriptor with
+// the parent (because we don't know how to allocate a new thread
+// descriptor to keep glibc happy). So the stock version of clone(), when
+// used with CLONE_VM, would end up corrupting the parent's thread descriptor.
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+ register void *r8 __asm__("r8") = newtls;
+ register int *r10 __asm__("r10") = child_tidptr;
+ __asm__ __volatile__(
+ /* %rax = syscall(%rax = SYSCALL(clone),
+ * %rdi = flags,
+ * %rsi = child_stack,
+ * %rdx = parent_tidptr,
+ * %r8 = new_tls,
+ * %r10 = child_tidptr)
+ */
+ "syscall\n"
+
+ /* if (%rax != 0)
+ * return;
+ */
+ "testq %%rax,%%rax\n"
+ "jnz 1f\n"
+
+ /* In the child. Terminate unwind chain. */
+ // XXX: We should also terminate the CFI unwind chain
+ // here. Unfortunately clang 3.2 doesn't support the
+ // necessary CFI directives, so we skip that part.
+ "xorq %%rbp,%%rbp\n"
+
+ /* Call "fn(arg)". */
+ "popq %%rax\n"
+ "popq %%rdi\n"
+ "call *%%rax\n"
+
+ /* Call _exit(%rax). */
+ "movq %%rax,%%rdi\n"
+ "movq %2,%%rax\n"
+ "syscall\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=a" (res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
+ "S"(child_stack),
+ "D"(flags),
+ "d"(parent_tidptr),
+ "r"(r8),
+ "r"(r10)
+ : "memory", "r11", "rcx");
+ return res;
+}
+#elif defined(__mips__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+ register void *a3 __asm__("$7") = newtls;
+ register int *a4 __asm__("$8") = child_tidptr;
+ // We don't have proper CFI directives here because it requires alot of code
+ // for very marginal benefits.
+ __asm__ __volatile__(
+ /* $v0 = syscall($v0 = __NR_clone,
+ * $a0 = flags,
+ * $a1 = child_stack,
+ * $a2 = parent_tidptr,
+ * $a3 = new_tls,
+ * $a4 = child_tidptr)
+ */
+ ".cprestore 16;\n"
+ "move $4,%1;\n"
+ "move $5,%2;\n"
+ "move $6,%3;\n"
+ "move $7,%4;\n"
+ /* Store the fifth argument on stack
+ * if we are using 32-bit abi.
+ */
+#if SANITIZER_WORDSIZE == 32
+ "lw %5,16($29);\n"
+#else
+ "move $8,%5;\n"
+#endif
+ "li $2,%6;\n"
+ "syscall;\n"
+
+ /* if ($v0 != 0)
+ * return;
+ */
+ "bnez $2,1f;\n"
+
+ /* Call "fn(arg)". */
+#if SANITIZER_WORDSIZE == 32
+#ifdef __BIG_ENDIAN__
+ "lw $25,4($29);\n"
+ "lw $4,12($29);\n"
+#else
+ "lw $25,0($29);\n"
+ "lw $4,8($29);\n"
+#endif
+#else
+ "ld $25,0($29);\n"
+ "ld $4,8($29);\n"
+#endif
+ "jal $25;\n"
+
+ /* Call _exit($v0). */
+ "move $4,$2;\n"
+ "li $2,%7;\n"
+ "syscall;\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r" (res)
+ : "r"(flags),
+ "r"(child_stack),
+ "r"(parent_tidptr),
+ "r"(a3),
+ "r"(a4),
+ "i"(__NR_clone),
+ "i"(__NR_exit)
+ : "memory", "$29" );
+ return res;
+}
+#elif defined(__aarch64__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned long long);
+ ((unsigned long long *)child_stack)[0] = (uptr)fn;
+ ((unsigned long long *)child_stack)[1] = (uptr)arg;
+
+ register int (*__fn)(void *) __asm__("x0") = fn;
+ register void *__stack __asm__("x1") = child_stack;
+ register int __flags __asm__("x2") = flags;
+ register void *__arg __asm__("x3") = arg;
+ register int *__ptid __asm__("x4") = parent_tidptr;
+ register void *__tls __asm__("x5") = newtls;
+ register int *__ctid __asm__("x6") = child_tidptr;
+
+ __asm__ __volatile__(
+ "mov x0,x2\n" /* flags */
+ "mov x2,x4\n" /* ptid */
+ "mov x3,x5\n" /* tls */
+ "mov x4,x6\n" /* ctid */
+ "mov x8,%9\n" /* clone */
+
+ "svc 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp x0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldp x1, x0, [sp], #16\n"
+ "blr x1\n"
+
+ /* Call _exit(%r0). */
+ "mov x8, %10\n"
+ "svc 0x0\n"
+ "1:\n"
+
+ : "=r" (res)
+ : "i"(-EINVAL),
+ "r"(__fn), "r"(__stack), "r"(__flags), "r"(__arg),
+ "r"(__ptid), "r"(__tls), "r"(__ctid),
+ "i"(__NR_clone), "i"(__NR_exit)
+ : "x30", "memory");
+ return res;
+}
+#elif defined(__powerpc64__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ long long res;
+// Stack frame structure.
+#if SANITIZER_PPC64V1
+// Back chain == 0 (SP + 112)
+// Frame (112 bytes):
+// Parameter save area (SP + 48), 8 doublewords
+// TOC save area (SP + 40)
+// Link editor doubleword (SP + 32)
+// Compiler doubleword (SP + 24)
+// LR save area (SP + 16)
+// CR save area (SP + 8)
+// Back chain (SP + 0)
+# define FRAME_SIZE 112
+# define FRAME_TOC_SAVE_OFFSET 40
+#elif SANITIZER_PPC64V2
+// Back chain == 0 (SP + 32)
+// Frame (32 bytes):
+// TOC save area (SP + 24)
+// LR save area (SP + 16)
+// CR save area (SP + 8)
+// Back chain (SP + 0)
+# define FRAME_SIZE 32
+# define FRAME_TOC_SAVE_OFFSET 24
+#else
+# error "Unsupported PPC64 ABI"
+#endif
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+
+ register int (*__fn)(void *) __asm__("r3") = fn;
+ register void *__cstack __asm__("r4") = child_stack;
+ register int __flags __asm__("r5") = flags;
+ register void *__arg __asm__("r6") = arg;
+ register int *__ptidptr __asm__("r7") = parent_tidptr;
+ register void *__newtls __asm__("r8") = newtls;
+ register int *__ctidptr __asm__("r9") = child_tidptr;
+
+ __asm__ __volatile__(
+ /* fn and arg are saved across the syscall */
+ "mr 28, %5\n\t"
+ "mr 27, %8\n\t"
+
+ /* syscall
+ r0 == __NR_clone
+ r3 == flags
+ r4 == child_stack
+ r5 == parent_tidptr
+ r6 == newtls
+ r7 == child_tidptr */
+ "mr 3, %7\n\t"
+ "mr 5, %9\n\t"
+ "mr 6, %10\n\t"
+ "mr 7, %11\n\t"
+ "li 0, %3\n\t"
+ "sc\n\t"
+
+ /* Test if syscall was successful */
+ "cmpdi cr1, 3, 0\n\t"
+ "crandc cr1*4+eq, cr1*4+eq, cr0*4+so\n\t"
+ "bne- cr1, 1f\n\t"
+
+ /* Set up stack frame */
+ "li 29, 0\n\t"
+ "stdu 29, -8(1)\n\t"
+ "stdu 1, -%12(1)\n\t"
+ /* Do the function call */
+ "std 2, %13(1)\n\t"
+#if SANITIZER_PPC64V1
+ "ld 0, 0(28)\n\t"
+ "ld 2, 8(28)\n\t"
+ "mtctr 0\n\t"
+#elif SANITIZER_PPC64V2
+ "mr 12, 28\n\t"
+ "mtctr 12\n\t"
+#else
+# error "Unsupported PPC64 ABI"
+#endif
+ "mr 3, 27\n\t"
+ "bctrl\n\t"
+ "ld 2, %13(1)\n\t"
+
+ /* Call _exit(r3) */
+ "li 0, %4\n\t"
+ "sc\n\t"
+
+ /* Return to parent */
+ "1:\n\t"
+ "mr %0, 3\n\t"
+ : "=r" (res)
+ : "0" (-1),
+ "i" (EINVAL),
+ "i" (__NR_clone),
+ "i" (__NR_exit),
+ "r" (__fn),
+ "r" (__cstack),
+ "r" (__flags),
+ "r" (__arg),
+ "r" (__ptidptr),
+ "r" (__newtls),
+ "r" (__ctidptr),
+ "i" (FRAME_SIZE),
+ "i" (FRAME_TOC_SAVE_OFFSET)
+ : "cr0", "cr1", "memory", "ctr", "r0", "r27", "r28", "r29");
+ return res;
+}
+#elif defined(__i386__) && SANITIZER_LINUX
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ int res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ child_stack = (char *)child_stack - 7 * sizeof(unsigned int);
+ ((unsigned int *)child_stack)[0] = (uptr)flags;
+ ((unsigned int *)child_stack)[1] = (uptr)0;
+ ((unsigned int *)child_stack)[2] = (uptr)fn;
+ ((unsigned int *)child_stack)[3] = (uptr)arg;
+ __asm__ __volatile__(
+ /* %eax = syscall(%eax = SYSCALL(clone),
+ * %ebx = flags,
+ * %ecx = child_stack,
+ * %edx = parent_tidptr,
+ * %esi = new_tls,
+ * %edi = child_tidptr)
+ */
+
+ /* Obtain flags */
+ "movl (%%ecx), %%ebx\n"
+ /* Do the system call */
+ "pushl %%ebx\n"
+ "pushl %%esi\n"
+ "pushl %%edi\n"
+ /* Remember the flag value. */
+ "movl %%ebx, (%%ecx)\n"
+ "int $0x80\n"
+ "popl %%edi\n"
+ "popl %%esi\n"
+ "popl %%ebx\n"
+
+ /* if (%eax != 0)
+ * return;
+ */
+
+ "test %%eax,%%eax\n"
+ "jnz 1f\n"
+
+ /* terminate the stack frame */
+ "xorl %%ebp,%%ebp\n"
+ /* Call FN. */
+ "call *%%ebx\n"
+#ifdef PIC
+ "call here\n"
+ "here:\n"
+ "popl %%ebx\n"
+ "addl $_GLOBAL_OFFSET_TABLE_+[.-here], %%ebx\n"
+#endif
+ /* Call exit */
+ "movl %%eax, %%ebx\n"
+ "movl %2, %%eax\n"
+ "int $0x80\n"
+ "1:\n"
+ : "=a" (res)
+ : "a"(SYSCALL(clone)), "i"(SYSCALL(exit)),
+ "c"(child_stack),
+ "d"(parent_tidptr),
+ "S"(newtls),
+ "D"(child_tidptr)
+ : "memory");
+ return res;
+}
+#elif defined(__arm__) && SANITIZER_LINUX
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ unsigned int res;
+ if (!fn || !child_stack)
+ return -EINVAL;
+ child_stack = (char *)child_stack - 2 * sizeof(unsigned int);
+ ((unsigned int *)child_stack)[0] = (uptr)fn;
+ ((unsigned int *)child_stack)[1] = (uptr)arg;
+ register int r0 __asm__("r0") = flags;
+ register void *r1 __asm__("r1") = child_stack;
+ register int *r2 __asm__("r2") = parent_tidptr;
+ register void *r3 __asm__("r3") = newtls;
+ register int *r4 __asm__("r4") = child_tidptr;
+ register int r7 __asm__("r7") = __NR_clone;
+
+#if __ARM_ARCH > 4 || defined (__ARM_ARCH_4T__)
+# define ARCH_HAS_BX
+#endif
+#if __ARM_ARCH > 4
+# define ARCH_HAS_BLX
+#endif
+
+#ifdef ARCH_HAS_BX
+# ifdef ARCH_HAS_BLX
+# define BLX(R) "blx " #R "\n"
+# else
+# define BLX(R) "mov lr, pc; bx " #R "\n"
+# endif
+#else
+# define BLX(R) "mov lr, pc; mov pc," #R "\n"
+#endif
+
+ __asm__ __volatile__(
+ /* %r0 = syscall(%r7 = SYSCALL(clone),
+ * %r0 = flags,
+ * %r1 = child_stack,
+ * %r2 = parent_tidptr,
+ * %r3 = new_tls,
+ * %r4 = child_tidptr)
+ */
+
+ /* Do the system call */
+ "swi 0x0\n"
+
+ /* if (%r0 != 0)
+ * return %r0;
+ */
+ "cmp r0, #0\n"
+ "bne 1f\n"
+
+ /* In the child, now. Call "fn(arg)". */
+ "ldr r0, [sp, #4]\n"
+ "ldr ip, [sp], #8\n"
+ BLX(ip)
+ /* Call _exit(%r0). */
+ "mov r7, %7\n"
+ "swi 0x0\n"
+ "1:\n"
+ "mov %0, r0\n"
+ : "=r"(res)
+ : "r"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r7),
+ "i"(__NR_exit)
+ : "memory");
+ return res;
+}
+#endif // defined(__x86_64__) && SANITIZER_LINUX
+
+#if SANITIZER_ANDROID
+#if __ANDROID_API__ < 21
+extern "C" __attribute__((weak)) int dl_iterate_phdr(
+ int (*)(struct dl_phdr_info *, size_t, void *), void *);
+#endif
+
+static int dl_iterate_phdr_test_cb(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ // Any name starting with "lib" indicates a bug in L where library base names
+ // are returned instead of paths.
+ if (info->dlpi_name && info->dlpi_name[0] == 'l' &&
+ info->dlpi_name[1] == 'i' && info->dlpi_name[2] == 'b') {
+ *(bool *)data = true;
+ return 1;
+ }
+ return 0;
+}
+
+static atomic_uint32_t android_api_level;
+
+static AndroidApiLevel AndroidDetectApiLevelStatic() {
+#if __ANDROID_API__ <= 19
+ return ANDROID_KITKAT;
+#elif __ANDROID_API__ <= 22
+ return ANDROID_LOLLIPOP_MR1;
+#else
+ return ANDROID_POST_LOLLIPOP;
+#endif
+}
+
+static AndroidApiLevel AndroidDetectApiLevel() {
+ if (!&dl_iterate_phdr)
+ return ANDROID_KITKAT; // K or lower
+ bool base_name_seen = false;
+ dl_iterate_phdr(dl_iterate_phdr_test_cb, &base_name_seen);
+ if (base_name_seen)
+ return ANDROID_LOLLIPOP_MR1; // L MR1
+ return ANDROID_POST_LOLLIPOP; // post-L
+ // Plain L (API level 21) is completely broken wrt ASan and not very
+ // interesting to detect.
+}
+
+extern "C" __attribute__((weak)) void* _DYNAMIC;
+
+AndroidApiLevel AndroidGetApiLevel() {
+ AndroidApiLevel level =
+ (AndroidApiLevel)atomic_load(&android_api_level, memory_order_relaxed);
+ if (level) return level;
+ level = &_DYNAMIC == nullptr ? AndroidDetectApiLevelStatic()
+ : AndroidDetectApiLevel();
+ atomic_store(&android_api_level, level, memory_order_relaxed);
+ return level;
+}
+
+#endif
+
+static HandleSignalMode GetHandleSignalModeImpl(int signum) {
+ switch (signum) {
+ case SIGABRT:
+ return common_flags()->handle_abort;
+ case SIGILL:
+ return common_flags()->handle_sigill;
+ case SIGTRAP:
+ return common_flags()->handle_sigtrap;
+ case SIGFPE:
+ return common_flags()->handle_sigfpe;
+ case SIGSEGV:
+ return common_flags()->handle_segv;
+ case SIGBUS:
+ return common_flags()->handle_sigbus;
+ }
+ return kHandleSignalNo;
+}
+
+HandleSignalMode GetHandleSignalMode(int signum) {
+ HandleSignalMode result = GetHandleSignalModeImpl(signum);
+ if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
+ return kHandleSignalExclusive;
+ return result;
+}
+
+#if !SANITIZER_GO
+void *internal_start_thread(void(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
+ internal_sigfillset(&set);
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ // Glibc uses SIGSETXID signal during setuid call. If this signal is blocked
+ // on any thread, setuid call hangs (see test/tsan/setuid.c).
+ internal_sigdelset(&set, 33);
+#endif
+ internal_sigprocmask(SIG_SETMASK, &set, &old);
+ void *th;
+ real_pthread_create(&th, nullptr, (void*(*)(void *arg))func, arg);
+ internal_sigprocmask(SIG_SETMASK, &old, nullptr);
+ return th;
+}
+
+void internal_join_thread(void *th) {
+ real_pthread_join(th, nullptr);
+}
+#else
+void *internal_start_thread(void (*func)(void *), void *arg) { return 0; }
+
+void internal_join_thread(void *th) {}
+#endif
+
+#if defined(__aarch64__)
+// Android headers in the older NDK releases miss this definition.
+struct __sanitizer_esr_context {
+ struct _aarch64_ctx head;
+ uint64_t esr;
+};
+
+static bool Aarch64GetESR(ucontext_t *ucontext, u64 *esr) {
+ static const u32 kEsrMagic = 0x45535201;
+ u8 *aux = ucontext->uc_mcontext.__reserved;
+ while (true) {
+ _aarch64_ctx *ctx = (_aarch64_ctx *)aux;
+ if (ctx->size == 0) break;
+ if (ctx->magic == kEsrMagic) {
+ *esr = ((__sanitizer_esr_context *)ctx)->esr;
+ return true;
+ }
+ aux += ctx->size;
+ }
+ return false;
+}
+#endif
+
+#if SANITIZER_OPENBSD
+using Context = sigcontext;
+#else
+using Context = ucontext_t;
+#endif
+
+SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
+ Context *ucontext = (Context *)context;
+#if defined(__x86_64__) || defined(__i386__)
+ static const uptr PF_WRITE = 1U << 1;
+#if SANITIZER_FREEBSD
+ uptr err = ucontext->uc_mcontext.mc_err;
+#elif SANITIZER_NETBSD
+ uptr err = ucontext->uc_mcontext.__gregs[_REG_ERR];
+#elif SANITIZER_OPENBSD
+ uptr err = ucontext->sc_err;
+#elif SANITIZER_SOLARIS && defined(__i386__)
+ const int Err = 13;
+ uptr err = ucontext->uc_mcontext.gregs[Err];
+#else
+ uptr err = ucontext->uc_mcontext.gregs[REG_ERR];
+#endif // SANITIZER_FREEBSD
+ return err & PF_WRITE ? WRITE : READ;
+#elif defined(__mips__)
+ uint32_t *exception_source;
+ uint32_t faulty_instruction;
+ uint32_t op_code;
+
+ exception_source = (uint32_t *)ucontext->uc_mcontext.pc;
+ faulty_instruction = (uint32_t)(*exception_source);
+
+ op_code = (faulty_instruction >> 26) & 0x3f;
+
+ // FIXME: Add support for FPU, microMIPS, DSP, MSA memory instructions.
+ switch (op_code) {
+ case 0x28: // sb
+ case 0x29: // sh
+ case 0x2b: // sw
+ case 0x3f: // sd
+#if __mips_isa_rev < 6
+ case 0x2c: // sdl
+ case 0x2d: // sdr
+ case 0x2a: // swl
+ case 0x2e: // swr
+#endif
+ return SignalContext::WRITE;
+
+ case 0x20: // lb
+ case 0x24: // lbu
+ case 0x21: // lh
+ case 0x25: // lhu
+ case 0x23: // lw
+ case 0x27: // lwu
+ case 0x37: // ld
+#if __mips_isa_rev < 6
+ case 0x1a: // ldl
+ case 0x1b: // ldr
+ case 0x22: // lwl
+ case 0x26: // lwr
+#endif
+ return SignalContext::READ;
+#if __mips_isa_rev == 6
+ case 0x3b: // pcrel
+ op_code = (faulty_instruction >> 19) & 0x3;
+ switch (op_code) {
+ case 0x1: // lwpc
+ case 0x2: // lwupc
+ return SignalContext::READ;
+ }
+#endif
+ }
+ return SignalContext::UNKNOWN;
+#elif defined(__arm__)
+ static const uptr FSR_WRITE = 1U << 11;
+ uptr fsr = ucontext->uc_mcontext.error_code;
+ return fsr & FSR_WRITE ? WRITE : READ;
+#elif defined(__aarch64__)
+ static const u64 ESR_ELx_WNR = 1U << 6;
+ u64 esr;
+ if (!Aarch64GetESR(ucontext, &esr)) return UNKNOWN;
+ return esr & ESR_ELx_WNR ? WRITE : READ;
+#elif defined(__sparc__)
+ // Decode the instruction to determine the access type.
+ // From OpenSolaris $SRC/uts/sun4/os/trap.c (get_accesstype).
+#if SANITIZER_SOLARIS
+ uptr pc = ucontext->uc_mcontext.gregs[REG_PC];
+#else
+ // Historical BSDism here.
+ struct sigcontext *scontext = (struct sigcontext *)context;
+#if defined(__arch64__)
+ uptr pc = scontext->sigc_regs.tpc;
+#else
+ uptr pc = scontext->si_regs.pc;
+#endif
+#endif
+ u32 instr = *(u32 *)pc;
+ return (instr >> 21) & 1 ? WRITE: READ;
+#else
+ (void)ucontext;
+ return UNKNOWN; // FIXME: Implement.
+#endif
+}
+
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
+static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+#if SANITIZER_NETBSD
+ // This covers all NetBSD architectures
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = _UC_MACHINE_PC(ucontext);
+ *bp = _UC_MACHINE_FP(ucontext);
+ *sp = _UC_MACHINE_SP(ucontext);
+#elif defined(__arm__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.arm_pc;
+ *bp = ucontext->uc_mcontext.arm_fp;
+ *sp = ucontext->uc_mcontext.arm_sp;
+#elif defined(__aarch64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.regs[29];
+ *sp = ucontext->uc_mcontext.sp;
+#elif defined(__hppa__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.sc_iaoq[0];
+ /* GCC uses %r3 whenever a frame pointer is needed. */
+ *bp = ucontext->uc_mcontext.sc_gr[3];
+ *sp = ucontext->uc_mcontext.sc_gr[30];
+#elif defined(__x86_64__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_rip;
+ *bp = ucontext->uc_mcontext.mc_rbp;
+ *sp = ucontext->uc_mcontext.mc_rsp;
+#elif SANITIZER_OPENBSD
+ sigcontext *ucontext = (sigcontext *)context;
+ *pc = ucontext->sc_rip;
+ *bp = ucontext->sc_rbp;
+ *sp = ucontext->sc_rsp;
+# else
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.gregs[REG_RIP];
+ *bp = ucontext->uc_mcontext.gregs[REG_RBP];
+ *sp = ucontext->uc_mcontext.gregs[REG_RSP];
+# endif
+#elif defined(__i386__)
+# if SANITIZER_FREEBSD
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.mc_eip;
+ *bp = ucontext->uc_mcontext.mc_ebp;
+ *sp = ucontext->uc_mcontext.mc_esp;
+#elif SANITIZER_OPENBSD
+ sigcontext *ucontext = (sigcontext *)context;
+ *pc = ucontext->sc_eip;
+ *bp = ucontext->sc_ebp;
+ *sp = ucontext->sc_esp;
+# else
+ ucontext_t *ucontext = (ucontext_t*)context;
+# if SANITIZER_SOLARIS
+ /* Use the numeric values: the symbolic ones are undefined by llvm
+ include/llvm/Support/Solaris.h. */
+# ifndef REG_EIP
+# define REG_EIP 14 // REG_PC
+# endif
+# ifndef REG_EBP
+# define REG_EBP 6 // REG_FP
+# endif
+# ifndef REG_ESP
+# define REG_ESP 17 // REG_SP
+# endif
+# endif
+ *pc = ucontext->uc_mcontext.gregs[REG_EIP];
+ *bp = ucontext->uc_mcontext.gregs[REG_EBP];
+ *sp = ucontext->uc_mcontext.gregs[REG_ESP];
+# endif
+#elif defined(__powerpc__) || defined(__powerpc64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.regs->nip;
+ *sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
+ // The powerpc{,64}-linux ABIs do not specify r31 as the frame
+ // pointer, but GCC always uses r31 when we need a frame pointer.
+ *bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
+#elif defined(__sparc__)
+#if defined(__arch64__) || defined(__sparcv9)
+#define STACK_BIAS 2047
+#else
+#define STACK_BIAS 0
+# endif
+# if SANITIZER_SOLARIS
+ ucontext_t *ucontext = (ucontext_t *)context;
+ *pc = ucontext->uc_mcontext.gregs[REG_PC];
+ *sp = ucontext->uc_mcontext.gregs[REG_O6] + STACK_BIAS;
+#else
+ // Historical BSDism here.
+ struct sigcontext *scontext = (struct sigcontext *)context;
+#if defined(__arch64__)
+ *pc = scontext->sigc_regs.tpc;
+ *sp = scontext->sigc_regs.u_regs[14] + STACK_BIAS;
+#else
+ *pc = scontext->si_regs.pc;
+ *sp = scontext->si_regs.u_regs[14];
+#endif
+# endif
+ *bp = (uptr)((uhwptr *)*sp)[14] + STACK_BIAS;
+#elif defined(__mips__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.pc;
+ *bp = ucontext->uc_mcontext.gregs[30];
+ *sp = ucontext->uc_mcontext.gregs[29];
+#elif defined(__s390__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+# if defined(__s390x__)
+ *pc = ucontext->uc_mcontext.psw.addr;
+# else
+ *pc = ucontext->uc_mcontext.psw.addr & 0x7fffffff;
+# endif
+ *bp = ucontext->uc_mcontext.gregs[11];
+ *sp = ucontext->uc_mcontext.gregs[15];
+#else
+# error "Unsupported arch"
+#endif
+}
+
+void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }
+
+void InitializePlatformEarly() {
+ // Do nothing.
+}
+
+void MaybeReexec() {
+ // No need to re-exec on Linux.
+}
+
+void CheckASLR() {
+#if SANITIZER_NETBSD
+ int mib[3];
+ int paxflags;
+ uptr len = sizeof(paxflags);
+
+ mib[0] = CTL_PROC;
+ mib[1] = internal_getpid();
+ mib[2] = PROC_PID_PAXFLAGS;
+
+ if (UNLIKELY(internal_sysctl(mib, 3, &paxflags, &len, NULL, 0) == -1)) {
+ Printf("sysctl failed\n");
+ Die();
+ }
+
+ if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_ASLR)) {
+ Printf("This sanitizer is not compatible with enabled ASLR\n");
+ Die();
+ }
+#elif SANITIZER_PPC64V2
+ // Disable ASLR for Linux PPC64LE.
+ int old_personality = personality(0xffffffff);
+ if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
+ VReport(1, "WARNING: Program is being run with address space layout "
+ "randomization (ASLR) enabled which prevents the thread and "
+ "memory sanitizers from working on powerpc64le.\n"
+ "ASLR will be disabled and the program re-executed.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ ReExec();
+ }
+#elif SANITIZER_FREEBSD
+ int aslr_pie;
+ uptr len = sizeof(aslr_pie);
+#if SANITIZER_WORDSIZE == 64
+ if (UNLIKELY(internal_sysctlbyname("kern.elf64.aslr.pie_enable",
+ &aslr_pie, &len, NULL, 0) == -1)) {
+ // We're making things less 'dramatic' here since
+ // the OID is not necessarily guaranteed to be here
+ // just yet regarding FreeBSD release
+ return;
+ }
+
+ if (aslr_pie > 0) {
+ Printf("This sanitizer is not compatible with enabled ASLR "
+ "and binaries compiled with PIE\n");
+ Die();
+ }
+#endif
+ // there might be 32 bits compat for 64 bits
+ if (UNLIKELY(internal_sysctlbyname("kern.elf32.aslr.pie_enable",
+ &aslr_pie, &len, NULL, 0) == -1)) {
+ return;
+ }
+
+ if (aslr_pie > 0) {
+ Printf("This sanitizer is not compatible with enabled ASLR "
+ "and binaries compiled with PIE\n");
+ Die();
+ }
+#else
+ // Do nothing
+#endif
+}
+
+void CheckMPROTECT() {
+#if SANITIZER_NETBSD
+ int mib[3];
+ int paxflags;
+ uptr len = sizeof(paxflags);
+
+ mib[0] = CTL_PROC;
+ mib[1] = internal_getpid();
+ mib[2] = PROC_PID_PAXFLAGS;
+
+ if (UNLIKELY(internal_sysctl(mib, 3, &paxflags, &len, NULL, 0) == -1)) {
+ Printf("sysctl failed\n");
+ Die();
+ }
+
+ if (UNLIKELY(paxflags & CTL_PROC_PAXFLAGS_MPROTECT)) {
+ Printf("This sanitizer is not compatible with enabled MPROTECT\n");
+ Die();
+ }
+#else
+ // Do nothing
+#endif
+}
+
+void PrintModuleMap() { }
+
+void CheckNoDeepBind(const char *filename, int flag) {
+#ifdef RTLD_DEEPBIND
+ if (flag & RTLD_DEEPBIND) {
+ Report(
+ "You are trying to dlopen a %s shared library with RTLD_DEEPBIND flag"
+ " which is incompatibe with sanitizer runtime "
+ "(see https://github.com/google/sanitizers/issues/611 for details"
+ "). If you want to run %s library under sanitizers please remove "
+ "RTLD_DEEPBIND from dlopen flags.\n",
+ filename, filename);
+ Die();
+ }
+#endif
+}
+
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found,
+ uptr *max_occupied_addr) {
+ UNREACHABLE("FindAvailableMemoryRange is not available");
+ return 0;
+}
+
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ if (!buffer || !length || length > 256)
+ return false;
+#if SANITIZER_USE_GETENTROPY
+ uptr rnd = getentropy(buffer, length);
+ int rverrno = 0;
+ if (internal_iserror(rnd, &rverrno) && rverrno == EFAULT)
+ return false;
+ else if (rnd == 0)
+ return true;
+#endif // SANITIZER_USE_GETENTROPY
+
+#if SANITIZER_USE_GETRANDOM
+ static atomic_uint8_t skip_getrandom_syscall;
+ if (!atomic_load_relaxed(&skip_getrandom_syscall)) {
+ // Up to 256 bytes, getrandom will not be interrupted.
+ uptr res = internal_syscall(SYSCALL(getrandom), buffer, length,
+ blocking ? 0 : GRND_NONBLOCK);
+ int rverrno = 0;
+ if (internal_iserror(res, &rverrno) && rverrno == ENOSYS)
+ atomic_store_relaxed(&skip_getrandom_syscall, 1);
+ else if (res == length)
+ return true;
+ }
+#endif // SANITIZER_USE_GETRANDOM
+ // Up to 256 bytes, a read off /dev/urandom will not be interrupted.
+ // blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
+ uptr fd = internal_open("/dev/urandom", O_RDONLY);
+ if (internal_iserror(fd))
+ return false;
+ uptr res = internal_read(fd, buffer, length);
+ if (internal_iserror(res))
+ return false;
+ internal_close(fd);
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc (revision 351984)
@@ -0,0 +1,1533 @@
+//===-- sanitizer_interceptors_ioctl_netbsd.inc -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Ioctl handling in common sanitizer interceptors.
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_NETBSD
+
+#include "sanitizer_flags.h"
+
+struct ioctl_desc {
+ unsigned req;
+ // FIXME: support read+write arguments. Currently READWRITE and WRITE do the
+ // same thing.
+ // XXX: The declarations below may use WRITE instead of READWRITE, unless
+ // explicitly noted.
+ enum { NONE, READ, WRITE, READWRITE, CUSTOM } type : 3;
+ unsigned size : 29;
+ const char *name;
+};
+
+const unsigned ioctl_table_max = 1236;
+static ioctl_desc ioctl_table[ioctl_table_max];
+static unsigned ioctl_table_size = 0;
+
+// This can not be declared as a global, because references to struct_*_sz
+// require a global initializer. And this table must be available before global
+// initializers are run.
+static void ioctl_table_fill() {
+#define _(rq, tp, sz) \
+ if (IOCTL_##rq != IOCTL_NOT_PRESENT) { \
+ CHECK(ioctl_table_size < ioctl_table_max); \
+ ioctl_table[ioctl_table_size].req = IOCTL_##rq; \
+ ioctl_table[ioctl_table_size].type = ioctl_desc::tp; \
+ ioctl_table[ioctl_table_size].size = sz; \
+ ioctl_table[ioctl_table_size].name = #rq; \
+ ++ioctl_table_size; \
+ }
+
+ /* Entries from file: altq/altq_afmap.h */
+ _(AFM_ADDFMAP, READWRITE, struct_atm_flowmap_sz);
+ _(AFM_DELFMAP, READWRITE, struct_atm_flowmap_sz);
+ _(AFM_CLEANFMAP, READWRITE, struct_atm_flowmap_sz);
+ _(AFM_GETFMAP, READWRITE, struct_atm_flowmap_sz);
+ /* Entries from file: altq/altq.h */
+ _(ALTQGTYPE, READWRITE, struct_altqreq_sz);
+ _(ALTQTBRSET, READ, struct_tbrreq_sz);
+ _(ALTQTBRGET, READWRITE, struct_tbrreq_sz);
+ /* Entries from file: altq/altq_blue.h */
+ _(BLUE_IF_ATTACH, READ, struct_blue_interface_sz);
+ _(BLUE_DISABLE, READ, struct_blue_interface_sz);
+ _(BLUE_CONFIG, READWRITE, struct_blue_conf_sz);
+ _(BLUE_GETSTATS, READWRITE, struct_blue_stats_sz);
+ /* Entries from file: altq/altq_cbq.h */
+ _(CBQ_ENABLE, READ, struct_cbq_interface_sz);
+ _(CBQ_ADD_CLASS, READWRITE, struct_cbq_add_class_sz);
+ _(CBQ_DEL_CLASS, READ, struct_cbq_delete_class_sz);
+ _(CBQ_MODIFY_CLASS, READWRITE, struct_cbq_modify_class_sz);
+ _(CBQ_DEL_FILTER, READ, struct_cbq_delete_filter_sz);
+ _(CBQ_GETSTATS, READWRITE, struct_cbq_getstats_sz);
+ /* Entries from file: altq/altq_cdnr.h */
+ _(CDNR_IF_DETACH, READ, struct_cdnr_interface_sz);
+ _(CDNR_ADD_FILTER, READWRITE, struct_cdnr_add_filter_sz);
+ _(CDNR_GETSTATS, READWRITE, struct_cdnr_get_stats_sz);
+ _(CDNR_ADD_ELEM, READWRITE, struct_cdnr_add_element_sz);
+ _(CDNR_DEL_ELEM, READ, struct_cdnr_delete_element_sz);
+ _(CDNR_ADD_TBM, READWRITE, struct_cdnr_add_tbmeter_sz);
+ _(CDNR_MOD_TBM, READ, struct_cdnr_modify_tbmeter_sz);
+ _(CDNR_TBM_STATS, READWRITE, struct_cdnr_tbmeter_stats_sz);
+ _(CDNR_ADD_TCM, READWRITE, struct_cdnr_add_trtcm_sz);
+ _(CDNR_MOD_TCM, READWRITE, struct_cdnr_modify_trtcm_sz);
+ _(CDNR_TCM_STATS, READWRITE, struct_cdnr_tcm_stats_sz);
+ _(CDNR_ADD_TSW, READWRITE, struct_cdnr_add_tswtcm_sz);
+ _(CDNR_MOD_TSW, READWRITE, struct_cdnr_modify_tswtcm_sz);
+ /* Entries from file: altq/altq_fifoq.h */
+ _(FIFOQ_CONFIG, READWRITE, struct_fifoq_conf_sz);
+ _(FIFOQ_GETSTATS, READWRITE, struct_fifoq_getstats_sz);
+ /* Entries from file: altq/altq_hfsc.h */
+ _(HFSC_CLEAR_HIERARCHY, READ, struct_hfsc_interface_sz);
+ _(HFSC_ADD_CLASS, READWRITE, struct_hfsc_add_class_sz);
+ _(HFSC_GETSTATS, READWRITE, struct_hfsc_class_stats_sz);
+ /* Entries from file: altq/altq_jobs.h */
+ _(JOBS_IF_ATTACH, READ, struct_jobs_attach_sz);
+ _(JOBS_IF_DETACH, READ, struct_jobs_interface_sz);
+ _(JOBS_ENABLE, READ, struct_jobs_interface_sz);
+ _(JOBS_DISABLE, READ, struct_jobs_interface_sz);
+ _(JOBS_CLEAR, READ, struct_jobs_interface_sz);
+ _(JOBS_ADD_CLASS, READWRITE, struct_jobs_add_class_sz);
+ _(JOBS_MOD_CLASS, READ, struct_jobs_modify_class_sz);
+ /* Entries from file: altq/altq_priq.h */
+ _(PRIQ_IF_ATTACH, READ, struct_priq_interface_sz);
+ _(PRIQ_CLEAR, READ, struct_priq_interface_sz);
+ _(PRIQ_ADD_CLASS, READWRITE, struct_priq_add_class_sz);
+ _(PRIQ_DEL_CLASS, READ, struct_priq_delete_class_sz);
+ _(PRIQ_MOD_CLASS, READ, struct_priq_modify_class_sz);
+ _(PRIQ_ADD_FILTER, READWRITE, struct_priq_add_filter_sz);
+ _(PRIQ_DEL_FILTER, READ, struct_priq_delete_filter_sz);
+ _(PRIQ_GETSTATS, READWRITE, struct_priq_class_stats_sz);
+ /* Entries from file: altq/altq_red.h */
+ _(RED_CONFIG, READWRITE, struct_red_conf_sz);
+ _(RED_GETSTATS, READWRITE, struct_red_stats_sz);
+ _(RED_SETDEFAULTS, READ, struct_redparams_sz);
+ /* Entries from file: altq/altq_rio.h */
+ _(RIO_CONFIG, READWRITE, struct_rio_conf_sz);
+ _(RIO_GETSTATS, READWRITE, struct_rio_stats_sz);
+ _(RIO_SETDEFAULTS, READ, struct_redparams_sz);
+ /* Entries from file: altq/altq_wfq.h */
+ _(WFQ_CONFIG, READWRITE, struct_wfq_conf_sz);
+ _(WFQ_GET_QID, READWRITE, struct_wfq_getqid_sz);
+ _(WFQ_SET_WEIGHT, READWRITE, struct_wfq_setweight_sz);
+ /* Entries from file: crypto/cryptodev.h */
+ _(CRIOGET, READWRITE, sizeof(u32));
+ _(CIOCFSESSION, READ, sizeof(u32));
+ _(CIOCKEY, READWRITE, struct_crypt_kop_sz);
+ _(CIOCNFKEYM, READWRITE, struct_crypt_mkop_sz);
+ _(CIOCNFSESSION, READ, struct_crypt_sfop_sz);
+ _(CIOCNCRYPTRETM, READWRITE, struct_cryptret_sz);
+ _(CIOCNCRYPTRET, READWRITE, struct_crypt_result_sz);
+ _(CIOCGSESSION, READWRITE, struct_session_op_sz);
+ _(CIOCNGSESSION, READWRITE, struct_crypt_sgop_sz);
+ _(CIOCCRYPT, READWRITE, struct_crypt_op_sz);
+ _(CIOCNCRYPTM, READWRITE, struct_crypt_mop_sz);
+ _(CIOCASYMFEAT, WRITE, sizeof(u32));
+ /* Entries from file: dev/apm/apmio.h */
+ _(APM_IOC_REJECT, READ, struct_apm_event_info_sz);
+ _(OAPM_IOC_GETPOWER, WRITE, struct_apm_power_info_sz);
+ _(APM_IOC_GETPOWER, READWRITE, struct_apm_power_info_sz);
+ _(APM_IOC_NEXTEVENT, WRITE, struct_apm_event_info_sz);
+ _(APM_IOC_DEV_CTL, READ, struct_apm_ctl_sz);
+ /* Entries from file: dev/dm/netbsd-dm.h */
+ _(NETBSD_DM_IOCTL, READWRITE, struct_plistref_sz);
+ /* Entries from file: dev/dmover/dmover_io.h */
+ _(DMIO_SETFUNC, READ, struct_dmio_setfunc_sz);
+ /* Entries from file: dev/dtv/dtvio_demux.h */
+ _(DMX_START, NONE, 0);
+ _(DMX_STOP, NONE, 0);
+ _(DMX_SET_FILTER, READ, struct_dmx_sct_filter_params_sz);
+ _(DMX_SET_PES_FILTER, READ, struct_dmx_pes_filter_params_sz);
+ _(DMX_SET_BUFFER_SIZE, NONE, 0);
+ _(DMX_GET_STC, READWRITE, struct_dmx_stc_sz);
+ _(DMX_ADD_PID, READ, sizeof(u16));
+ _(DMX_REMOVE_PID, READ, sizeof(u16));
+ _(DMX_GET_CAPS, WRITE, struct_dmx_caps_sz);
+ _(DMX_SET_SOURCE, READ, enum_dmx_source_sz);
+ /* Entries from file: dev/dtv/dtvio_frontend.h */
+ _(FE_READ_STATUS, WRITE, enum_fe_status_sz);
+ _(FE_READ_BER, WRITE, sizeof(u32));
+ _(FE_READ_SNR, WRITE, sizeof(u16));
+ _(FE_READ_SIGNAL_STRENGTH, WRITE, sizeof(u16));
+ _(FE_READ_UNCORRECTED_BLOCKS, WRITE, sizeof(u32));
+ _(FE_SET_FRONTEND, READWRITE, struct_dvb_frontend_parameters_sz);
+ _(FE_GET_FRONTEND, WRITE, struct_dvb_frontend_parameters_sz);
+ _(FE_GET_EVENT, WRITE, struct_dvb_frontend_event_sz);
+ _(FE_GET_INFO, WRITE, struct_dvb_frontend_info_sz);
+ _(FE_DISEQC_RESET_OVERLOAD, NONE, 0);
+ _(FE_DISEQC_SEND_MASTER_CMD, READ, struct_dvb_diseqc_master_cmd_sz);
+ _(FE_DISEQC_RECV_SLAVE_REPLY, WRITE, struct_dvb_diseqc_slave_reply_sz);
+ _(FE_DISEQC_SEND_BURST, READ, enum_fe_sec_mini_cmd_sz);
+ _(FE_SET_TONE, READ, enum_fe_sec_tone_mode_sz);
+ _(FE_SET_VOLTAGE, READ, enum_fe_sec_voltage_sz);
+ _(FE_ENABLE_HIGH_LNB_VOLTAGE, READ, sizeof(int));
+ _(FE_SET_FRONTEND_TUNE_MODE, READ, sizeof(unsigned int));
+ _(FE_DISHNETWORK_SEND_LEGACY_CMD, READ, sizeof(unsigned long));
+ /* Entries from file: dev/filemon/filemon.h */
+ _(FILEMON_SET_FD, READWRITE, sizeof(int));
+ _(FILEMON_SET_PID, READWRITE, sizeof(int));
+ /* Entries from file: dev/hdaudio/hdaudioio.h */
+ _(HDAUDIO_FGRP_INFO, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_FGRP_GETCONFIG, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_FGRP_SETCONFIG, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_FGRP_WIDGET_INFO, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_FGRP_CODEC_INFO, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_AFG_WIDGET_INFO, READWRITE, struct_plistref_sz);
+ _(HDAUDIO_AFG_CODEC_INFO, READWRITE, struct_plistref_sz);
+ /* Entries from file: dev/hdmicec/hdmicecio.h */
+ _(CEC_GET_PHYS_ADDR, WRITE, sizeof(u16));
+ _(CEC_GET_LOG_ADDRS, WRITE, sizeof(u16));
+ _(CEC_SET_LOG_ADDRS, READ, sizeof(u16));
+ _(CEC_GET_VENDOR_ID, WRITE, sizeof(u32));
+ /* Entries from file: dev/hpc/hpcfbio.h */
+ _(HPCFBIO_GCONF, READWRITE, struct_hpcfb_fbconf_sz);
+ _(HPCFBIO_SCONF, READ, struct_hpcfb_fbconf_sz);
+ _(HPCFBIO_GDSPCONF, READWRITE, struct_hpcfb_dspconf_sz);
+ _(HPCFBIO_SDSPCONF, READ, struct_hpcfb_dspconf_sz);
+ _(HPCFBIO_GOP, WRITE, struct_hpcfb_dsp_op_sz);
+ _(HPCFBIO_SOP, READWRITE, struct_hpcfb_dsp_op_sz);
+ /* Entries from file: dev/i2o/iopio.h */
+ _(IOPIOCPT, READWRITE, struct_ioppt_sz);
+ _(IOPIOCGLCT, READWRITE, struct_iovec_sz);
+ _(IOPIOCGSTATUS, READWRITE, struct_iovec_sz);
+ _(IOPIOCRECONFIG, NONE, 0);
+ _(IOPIOCGTIDMAP, READWRITE, struct_iovec_sz);
+ /* Entries from file: dev/ic/athioctl.h */
+ _(SIOCGATHSTATS, READWRITE, struct_ifreq_sz);
+ _(SIOCGATHDIAG, READWRITE, struct_ath_diag_sz);
+ /* Entries from file: dev/ic/bt8xx.h */
+ _(METEORCAPTUR, READ, sizeof(int));
+ _(METEORCAPFRM, READ, struct_meteor_capframe_sz);
+ _(METEORSETGEO, READ, struct_meteor_geomet_sz);
+ _(METEORGETGEO, WRITE, struct_meteor_geomet_sz);
+ _(METEORSTATUS, WRITE, sizeof(unsigned short));
+ _(METEORSHUE, READ, sizeof(signed char));
+ _(METEORGHUE, WRITE, sizeof(signed char));
+ _(METEORSFMT, READ, sizeof(unsigned int));
+ _(METEORGFMT, WRITE, sizeof(unsigned int));
+ _(METEORSINPUT, READ, sizeof(unsigned int));
+ _(METEORGINPUT, WRITE, sizeof(unsigned int));
+ _(METEORSCHCV, READ, sizeof(unsigned char));
+ _(METEORGCHCV, WRITE, sizeof(unsigned char));
+ _(METEORSCOUNT, READ, struct_meteor_counts_sz);
+ _(METEORGCOUNT, WRITE, struct_meteor_counts_sz);
+ _(METEORSFPS, READ, sizeof(unsigned short));
+ _(METEORGFPS, WRITE, sizeof(unsigned short));
+ _(METEORSSIGNAL, READ, sizeof(unsigned int));
+ _(METEORGSIGNAL, WRITE, sizeof(unsigned int));
+ _(METEORSVIDEO, READ, struct_meteor_video_sz);
+ _(METEORGVIDEO, WRITE, struct_meteor_video_sz);
+ _(METEORSBRIG, READ, sizeof(unsigned char));
+ _(METEORGBRIG, WRITE, sizeof(unsigned char));
+ _(METEORSCSAT, READ, sizeof(unsigned char));
+ _(METEORGCSAT, WRITE, sizeof(unsigned char));
+ _(METEORSCONT, READ, sizeof(unsigned char));
+ _(METEORGCONT, WRITE, sizeof(unsigned char));
+ _(METEORSHWS, READ, sizeof(unsigned char));
+ _(METEORGHWS, WRITE, sizeof(unsigned char));
+ _(METEORSVWS, READ, sizeof(unsigned char));
+ _(METEORGVWS, WRITE, sizeof(unsigned char));
+ _(METEORSTS, READ, sizeof(unsigned char));
+ _(METEORGTS, WRITE, sizeof(unsigned char));
+ _(TVTUNER_SETCHNL, READ, sizeof(unsigned int));
+ _(TVTUNER_GETCHNL, WRITE, sizeof(unsigned int));
+ _(TVTUNER_SETTYPE, READ, sizeof(unsigned int));
+ _(TVTUNER_GETTYPE, WRITE, sizeof(unsigned int));
+ _(TVTUNER_GETSTATUS, WRITE, sizeof(unsigned int));
+ _(TVTUNER_SETFREQ, READ, sizeof(unsigned int));
+ _(TVTUNER_GETFREQ, WRITE, sizeof(unsigned int));
+ _(TVTUNER_SETAFC, READ, sizeof(int));
+ _(TVTUNER_GETAFC, WRITE, sizeof(int));
+ _(RADIO_SETMODE, READ, sizeof(unsigned int));
+ _(RADIO_GETMODE, WRITE, sizeof(unsigned char));
+ _(RADIO_SETFREQ, READ, sizeof(unsigned int));
+ _(RADIO_GETFREQ, WRITE, sizeof(unsigned int));
+ _(METEORSACTPIXFMT, READ, sizeof(int));
+ _(METEORGACTPIXFMT, WRITE, sizeof(int));
+ _(METEORGSUPPIXFMT, READWRITE, struct_meteor_pixfmt_sz);
+ _(TVTUNER_GETCHNLSET, READWRITE, struct_bktr_chnlset_sz);
+ _(REMOTE_GETKEY, WRITE, struct_bktr_remote_sz);
+ /* Entries from file: dev/ic/icp_ioctl.h */
+ _(GDT_IOCTL_GENERAL, READWRITE, struct_gdt_ucmd_sz);
+ _(GDT_IOCTL_DRVERS, WRITE, sizeof(int));
+ _(GDT_IOCTL_CTRTYPE, READWRITE, struct_gdt_ctrt_sz);
+ _(GDT_IOCTL_OSVERS, WRITE, struct_gdt_osv_sz);
+ _(GDT_IOCTL_CTRCNT, WRITE, sizeof(int));
+ _(GDT_IOCTL_EVENT, READWRITE, struct_gdt_event_sz);
+ _(GDT_IOCTL_STATIST, WRITE, struct_gdt_statist_sz);
+ _(GDT_IOCTL_RESCAN, READWRITE, struct_gdt_rescan_sz);
+ /* Entries from file: dev/ic/isp_ioctl.h */
+ _(ISP_SDBLEV, READWRITE, sizeof(int));
+ _(ISP_RESETHBA, NONE, 0);
+ _(ISP_RESCAN, NONE, 0);
+ _(ISP_SETROLE, READWRITE, sizeof(int));
+ _(ISP_GETROLE, WRITE, sizeof(int));
+ _(ISP_GET_STATS, WRITE, struct_isp_stats_sz);
+ _(ISP_CLR_STATS, NONE, 0);
+ _(ISP_FC_LIP, NONE, 0);
+ _(ISP_FC_GETDINFO, READWRITE, struct_isp_fc_device_sz);
+ _(ISP_GET_FW_CRASH_DUMP, NONE, 0);
+ _(ISP_FORCE_CRASH_DUMP, NONE, 0);
+ _(ISP_FC_GETHINFO, READWRITE, struct_isp_hba_device_sz);
+ _(ISP_TSK_MGMT, READWRITE, struct_isp_fc_tsk_mgmt_sz);
+ _(ISP_FC_GETDLIST, NONE, 0);
+ /* Entries from file: dev/ic/mlxio.h */
+ _(MLXD_STATUS, WRITE, sizeof(int));
+ _(MLXD_CHECKASYNC, WRITE, sizeof(int));
+ _(MLXD_DETACH, READ, sizeof(int));
+ _(MLX_RESCAN_DRIVES, NONE, 0);
+ _(MLX_PAUSE_CHANNEL, READ, struct_mlx_pause_sz);
+ _(MLX_COMMAND, READWRITE, struct_mlx_usercommand_sz);
+ _(MLX_REBUILDASYNC, READWRITE, struct_mlx_rebuild_request_sz);
+ _(MLX_REBUILDSTAT, WRITE, struct_mlx_rebuild_status_sz);
+ _(MLX_GET_SYSDRIVE, READWRITE, sizeof(int));
+ _(MLX_GET_CINFO, WRITE, struct_mlx_cinfo_sz);
+ /* Entries from file: dev/ic/nvmeio.h */
+ _(NVME_PASSTHROUGH_CMD, READWRITE, struct_nvme_pt_command_sz);
+ /* Entries from file: dev/ic/qemufwcfgio.h */
+ _(FWCFGIO_SET_INDEX, READ, sizeof(u16));
+ /* Entries from file: dev/ir/irdaio.h */
+ _(IRDA_RESET_PARAMS, NONE, 0);
+ _(IRDA_SET_PARAMS, READ, struct_irda_params_sz);
+ _(IRDA_GET_SPEEDMASK, WRITE, sizeof(unsigned int));
+ _(IRDA_GET_TURNAROUNDMASK, WRITE, sizeof(unsigned int));
+ _(IRFRAMETTY_GET_DEVICE, WRITE, sizeof(unsigned int));
+ _(IRFRAMETTY_GET_DONGLE, WRITE, sizeof(unsigned int));
+ _(IRFRAMETTY_SET_DONGLE, READ, sizeof(unsigned int));
+ /* Entries from file: dev/isa/isvio.h */
+ _(ISV_CMD, READWRITE, struct_isv_cmd_sz);
+ /* Entries from file: dev/isa/wtreg.h */
+ _(WTQICMD, NONE, 0);
+ /* Entries from file: dev/iscsi/iscsi_ioctl.h */
+ _(ISCSI_GET_VERSION, READWRITE, struct_iscsi_get_version_parameters_sz);
+ _(ISCSI_LOGIN, READWRITE, struct_iscsi_login_parameters_sz);
+ _(ISCSI_LOGOUT, READWRITE, struct_iscsi_logout_parameters_sz);
+ _(ISCSI_ADD_CONNECTION, READWRITE, struct_iscsi_login_parameters_sz);
+ _(ISCSI_RESTORE_CONNECTION, READWRITE, struct_iscsi_login_parameters_sz);
+ _(ISCSI_REMOVE_CONNECTION, READWRITE, struct_iscsi_remove_parameters_sz);
+ _(ISCSI_CONNECTION_STATUS, READWRITE, struct_iscsi_conn_status_parameters_sz);
+ _(ISCSI_SEND_TARGETS, READWRITE, struct_iscsi_send_targets_parameters_sz);
+ _(ISCSI_SET_NODE_NAME, READWRITE, struct_iscsi_set_node_name_parameters_sz);
+ _(ISCSI_IO_COMMAND, READWRITE, struct_iscsi_iocommand_parameters_sz);
+ _(ISCSI_REGISTER_EVENT, READWRITE, struct_iscsi_register_event_parameters_sz);
+ _(ISCSI_DEREGISTER_EVENT, READWRITE,
+ struct_iscsi_register_event_parameters_sz);
+ _(ISCSI_WAIT_EVENT, READWRITE, struct_iscsi_wait_event_parameters_sz);
+ _(ISCSI_POLL_EVENT, READWRITE, struct_iscsi_wait_event_parameters_sz);
+ /* Entries from file: dev/ofw/openfirmio.h */
+ _(OFIOCGET, READWRITE, struct_ofiocdesc_sz);
+ _(OFIOCSET, READ, struct_ofiocdesc_sz);
+ _(OFIOCNEXTPROP, READWRITE, struct_ofiocdesc_sz);
+ _(OFIOCGETOPTNODE, WRITE, sizeof(int));
+ _(OFIOCGETNEXT, READWRITE, sizeof(int));
+ _(OFIOCGETCHILD, READWRITE, sizeof(int));
+ _(OFIOCFINDDEVICE, READWRITE, struct_ofiocdesc_sz);
+ /* Entries from file: dev/pci/amrio.h */
+ _(AMR_IO_VERSION, WRITE, sizeof(int));
+ _(AMR_IO_COMMAND, READWRITE, struct_amr_user_ioctl_sz);
+ /* Entries from file: dev/pci/mlyio.h */
+ _(MLYIO_COMMAND, READWRITE, struct_mly_user_command_sz);
+ _(MLYIO_HEALTH, READ, struct_mly_user_health_sz);
+ /* Entries from file: dev/pci/pciio.h */
+ _(PCI_IOC_CFGREAD, READWRITE, struct_pciio_cfgreg_sz);
+ _(PCI_IOC_CFGWRITE, READ, struct_pciio_cfgreg_sz);
+ _(PCI_IOC_BDF_CFGREAD, READWRITE, struct_pciio_bdf_cfgreg_sz);
+ _(PCI_IOC_BDF_CFGWRITE, READ, struct_pciio_bdf_cfgreg_sz);
+ _(PCI_IOC_BUSINFO, WRITE, struct_pciio_businfo_sz);
+ _(PCI_IOC_DRVNAME, READWRITE, struct_pciio_drvname_sz);
+ _(PCI_IOC_DRVNAMEONBUS, READWRITE, struct_pciio_drvnameonbus_sz);
+ /* Entries from file: dev/pci/tweio.h */
+ _(TWEIO_COMMAND, READWRITE, struct_twe_usercommand_sz);
+ _(TWEIO_STATS, READWRITE, union_twe_statrequest_sz);
+ _(TWEIO_AEN_POLL, WRITE, sizeof(int));
+ _(TWEIO_AEN_WAIT, WRITE, sizeof(int));
+ _(TWEIO_SET_PARAM, READ, struct_twe_paramcommand_sz);
+ _(TWEIO_GET_PARAM, READ, struct_twe_paramcommand_sz);
+ _(TWEIO_RESET, NONE, 0);
+ _(TWEIO_ADD_UNIT, READ, struct_twe_drivecommand_sz);
+ _(TWEIO_DEL_UNIT, READ, struct_twe_drivecommand_sz);
+ /* Entries from file: dev/pcmcia/if_cnwioctl.h */
+ _(SIOCSCNWDOMAIN, READ, struct_ifreq_sz);
+ _(SIOCGCNWDOMAIN, READWRITE, struct_ifreq_sz);
+ _(SIOCSCNWKEY, READWRITE, struct_ifreq_sz);
+ _(SIOCGCNWSTATUS, READWRITE, struct_cnwstatus_sz);
+ _(SIOCGCNWSTATS, READWRITE, struct_cnwistats_sz);
+ _(SIOCGCNWTRAIL, READWRITE, struct_cnwitrail_sz);
+ /* Entries from file: dev/pcmcia/if_rayreg.h */
+ _(SIOCGRAYSIGLEV, READWRITE, struct_ifreq_sz);
+ /* Entries from file: dev/raidframe/raidframeio.h */
+ _(RAIDFRAME_SHUTDOWN, NONE, 0);
+ _(RAIDFRAME_TUR, READ, sizeof(u64));
+ _(RAIDFRAME_FAIL_DISK, READ, struct_rf_recon_req_sz);
+ _(RAIDFRAME_CHECK_RECON_STATUS, READWRITE, sizeof(int));
+ _(RAIDFRAME_REWRITEPARITY, NONE, 0);
+ _(RAIDFRAME_COPYBACK, NONE, 0);
+ _(RAIDFRAME_SPARET_WAIT, WRITE, struct_RF_SparetWait_sz);
+ _(RAIDFRAME_SEND_SPARET, READ, sizeof(uptr));
+ _(RAIDFRAME_ABORT_SPARET_WAIT, NONE, 0);
+ _(RAIDFRAME_START_ATRACE, NONE, 0);
+ _(RAIDFRAME_STOP_ATRACE, NONE, 0);
+ _(RAIDFRAME_GET_SIZE, WRITE, sizeof(int));
+ _(RAIDFRAME_RESET_ACCTOTALS, NONE, 0);
+ _(RAIDFRAME_KEEP_ACCTOTALS, READ, sizeof(int));
+ _(RAIDFRAME_GET_COMPONENT_LABEL, READWRITE, struct_RF_ComponentLabel_sz);
+ _(RAIDFRAME_SET_COMPONENT_LABEL, READ, struct_RF_ComponentLabel_sz);
+ _(RAIDFRAME_INIT_LABELS, READ, struct_RF_ComponentLabel_sz);
+ _(RAIDFRAME_ADD_HOT_SPARE, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_REMOVE_HOT_SPARE, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_REBUILD_IN_PLACE, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_CHECK_PARITY, READWRITE, sizeof(int));
+ _(RAIDFRAME_CHECK_PARITYREWRITE_STATUS, READWRITE, sizeof(int));
+ _(RAIDFRAME_CHECK_COPYBACK_STATUS, READWRITE, sizeof(int));
+ _(RAIDFRAME_SET_AUTOCONFIG, READWRITE, sizeof(int));
+ _(RAIDFRAME_SET_ROOT, READWRITE, sizeof(int));
+ _(RAIDFRAME_DELETE_COMPONENT, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_INCORPORATE_HOT_SPARE, READ, struct_RF_SingleComponent_sz);
+ _(RAIDFRAME_CHECK_RECON_STATUS_EXT, READWRITE, struct_RF_ProgressInfo_sz);
+ _(RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT, READWRITE,
+ struct_RF_ProgressInfo_sz);
+ _(RAIDFRAME_CHECK_COPYBACK_STATUS_EXT, READWRITE, struct_RF_ProgressInfo_sz);
+ _(RAIDFRAME_PARITYMAP_STATUS, WRITE, struct_rf_pmstat_sz);
+ _(RAIDFRAME_PARITYMAP_GET_DISABLE, WRITE, sizeof(int));
+ _(RAIDFRAME_PARITYMAP_SET_DISABLE, READ, sizeof(int));
+ _(RAIDFRAME_PARITYMAP_SET_PARAMS, READ, struct_rf_pmparams_sz);
+ _(RAIDFRAME_SET_LAST_UNIT, READ, sizeof(int));
+ _(RAIDFRAME_GET_INFO, READWRITE, sizeof(uptr));
+ _(RAIDFRAME_CONFIGURE, READ, sizeof(uptr));
+ /* Entries from file: dev/sbus/mbppio.h */
+ _(MBPPIOCSPARAM, READ, struct_mbpp_param_sz);
+ _(MBPPIOCGPARAM, WRITE, struct_mbpp_param_sz);
+ _(MBPPIOCGSTAT, WRITE, sizeof(int));
+ /* Entries from file: dev/scsipi/ses.h */
+ _(SESIOC_GETNOBJ, NONE, 0);
+ _(SESIOC_GETOBJMAP, NONE, 0);
+ _(SESIOC_GETENCSTAT, NONE, 0);
+ _(SESIOC_SETENCSTAT, NONE, 0);
+ _(SESIOC_GETOBJSTAT, NONE, 0);
+ _(SESIOC_SETOBJSTAT, NONE, 0);
+ _(SESIOC_GETTEXT, NONE, 0);
+ _(SESIOC_INIT, NONE, 0);
+ /* Entries from file: dev/sun/disklabel.h */
+ _(SUN_DKIOCGGEOM, WRITE, struct_sun_dkgeom_sz);
+ _(SUN_DKIOCINFO, WRITE, struct_sun_dkctlr_sz);
+ _(SUN_DKIOCGPART, WRITE, struct_sun_dkpart_sz);
+ /* Entries from file: dev/sun/fbio.h */
+ _(FBIOGTYPE, WRITE, struct_fbtype_sz);
+ _(FBIOPUTCMAP, READ, struct_fbcmap_sz);
+ _(FBIOGETCMAP, READ, struct_fbcmap_sz);
+ _(FBIOGATTR, WRITE, struct_fbgattr_sz);
+ _(FBIOSVIDEO, READ, sizeof(int));
+ _(FBIOGVIDEO, WRITE, sizeof(int));
+ _(FBIOSCURSOR, READ, struct_fbcursor_sz);
+ _(FBIOGCURSOR, READWRITE, struct_fbcursor_sz);
+ _(FBIOSCURPOS, READ, struct_fbcurpos_sz);
+ _(FBIOGCURPOS, READ, struct_fbcurpos_sz);
+ _(FBIOGCURMAX, WRITE, struct_fbcurpos_sz);
+ /* Entries from file: dev/sun/kbio.h */
+ _(KIOCTRANS, READ, sizeof(int));
+ _(KIOCSETKEY, READWRITE, struct_okiockey_sz);
+ _(KIOCGETKEY, READWRITE, struct_okiockey_sz);
+ _(KIOCGTRANS, WRITE, sizeof(int));
+ _(KIOCCMD, READ, sizeof(int));
+ _(KIOCTYPE, WRITE, sizeof(int));
+ _(KIOCSDIRECT, READ, sizeof(int));
+ _(KIOCSKEY, READ, struct_kiockeymap_sz);
+ _(KIOCGKEY, READWRITE, struct_kiockeymap_sz);
+ _(KIOCSLED, READ, sizeof(char));
+ _(KIOCGLED, WRITE, sizeof(char));
+ _(KIOCLAYOUT, WRITE, sizeof(int));
+ /* Entries from file: dev/sun/vuid_event.h */
+ _(VUIDSFORMAT, READ, sizeof(int));
+ _(VUIDGFORMAT, WRITE, sizeof(int));
+ /* Entries from file: dev/tc/sticio.h */
+ _(STICIO_GXINFO, WRITE, struct_stic_xinfo_sz);
+ _(STICIO_RESET, NONE, 0);
+ _(STICIO_STARTQ, NONE, 0);
+ _(STICIO_STOPQ, NONE, 0);
+ /* Entries from file: dev/usb/ukyopon.h */
+ _(UKYOPON_IDENTIFY, WRITE, struct_ukyopon_identify_sz);
+ /* Entries from file: dev/usb/urio.h */
+ _(URIO_SEND_COMMAND, READWRITE, struct_urio_command_sz);
+ _(URIO_RECV_COMMAND, READWRITE, struct_urio_command_sz);
+ /* Entries from file: dev/usb/usb.h */
+ _(USB_REQUEST, READWRITE, struct_usb_ctl_request_sz);
+ _(USB_SETDEBUG, READ, sizeof(int));
+ _(USB_DISCOVER, NONE, 0);
+ _(USB_DEVICEINFO, READWRITE, struct_usb_device_info_sz);
+ _(USB_DEVICEINFO_OLD, READWRITE, struct_usb_device_info_old_sz);
+ _(USB_DEVICESTATS, WRITE, struct_usb_device_stats_sz);
+ _(USB_GET_REPORT_DESC, WRITE, struct_usb_ctl_report_desc_sz);
+ _(USB_SET_IMMED, READ, sizeof(int));
+ _(USB_GET_REPORT, READWRITE, struct_usb_ctl_report_sz);
+ _(USB_SET_REPORT, READ, struct_usb_ctl_report_sz);
+ _(USB_GET_REPORT_ID, WRITE, sizeof(int));
+ _(USB_GET_CONFIG, WRITE, sizeof(int));
+ _(USB_SET_CONFIG, READ, sizeof(int));
+ _(USB_GET_ALTINTERFACE, READWRITE, struct_usb_alt_interface_sz);
+ _(USB_SET_ALTINTERFACE, READWRITE, struct_usb_alt_interface_sz);
+ _(USB_GET_NO_ALT, READWRITE, struct_usb_alt_interface_sz);
+ _(USB_GET_DEVICE_DESC, WRITE, struct_usb_device_descriptor_sz);
+ _(USB_GET_CONFIG_DESC, READWRITE, struct_usb_config_desc_sz);
+ _(USB_GET_INTERFACE_DESC, READWRITE, struct_usb_interface_desc_sz);
+ _(USB_GET_ENDPOINT_DESC, READWRITE, struct_usb_endpoint_desc_sz);
+ _(USB_GET_FULL_DESC, READWRITE, struct_usb_full_desc_sz);
+ _(USB_GET_STRING_DESC, READWRITE, struct_usb_string_desc_sz);
+ _(USB_DO_REQUEST, READWRITE, struct_usb_ctl_request_sz);
+ _(USB_GET_DEVICEINFO, WRITE, struct_usb_device_info_sz);
+ _(USB_GET_DEVICEINFO_OLD, WRITE, struct_usb_device_info_old_sz);
+ _(USB_SET_SHORT_XFER, READ, sizeof(int));
+ _(USB_SET_TIMEOUT, READ, sizeof(int));
+ _(USB_SET_BULK_RA, READ, sizeof(int));
+ _(USB_SET_BULK_WB, READ, sizeof(int));
+ _(USB_SET_BULK_RA_OPT, READ, struct_usb_bulk_ra_wb_opt_sz);
+ _(USB_SET_BULK_WB_OPT, READ, struct_usb_bulk_ra_wb_opt_sz);
+ _(USB_GET_CM_OVER_DATA, WRITE, sizeof(int));
+ _(USB_SET_CM_OVER_DATA, READ, sizeof(int));
+ /* Entries from file: dev/usb/utoppy.h */
+ _(UTOPPYIOTURBO, READ, sizeof(int));
+ _(UTOPPYIOREBOOT, NONE, 0);
+ _(UTOPPYIOSTATS, WRITE, struct_utoppy_stats_sz);
+ _(UTOPPYIORENAME, READ, struct_utoppy_rename_sz);
+ _(UTOPPYIOMKDIR, READ, sizeof(uptr));
+ _(UTOPPYIODELETE, READ, sizeof(uptr));
+ _(UTOPPYIOREADDIR, READ, sizeof(uptr));
+ _(UTOPPYIOREADFILE, READ, struct_utoppy_readfile_sz);
+ _(UTOPPYIOWRITEFILE, READ, struct_utoppy_writefile_sz);
+ /* Entries from file: dev/vme/xio.h */
+ _(DIOSXDCMD, READWRITE, struct_xd_iocmd_sz);
+ /* Entries from file: dev/wscons/wsdisplay_usl_io.h */
+ _(VT_OPENQRY, WRITE, sizeof(int));
+ _(VT_SETMODE, READ, struct_vt_mode_sz);
+ _(VT_GETMODE, WRITE, struct_vt_mode_sz);
+ _(VT_RELDISP, NONE, 0);
+ _(VT_ACTIVATE, NONE, 0);
+ _(VT_WAITACTIVE, NONE, 0);
+ _(VT_GETACTIVE, WRITE, sizeof(int));
+ _(VT_GETSTATE, WRITE, struct_vt_stat_sz);
+ _(KDGETKBENT, READWRITE, struct_kbentry_sz);
+ _(KDGKBMODE, WRITE, sizeof(int));
+ _(KDSKBMODE, NONE, 0);
+ _(KDMKTONE, NONE, 0);
+ _(KDSETMODE, NONE, 0);
+ _(KDENABIO, NONE, 0);
+ _(KDDISABIO, NONE, 0);
+ _(KDGKBTYPE, WRITE, sizeof(char));
+ _(KDGETLED, WRITE, sizeof(int));
+ _(KDSETLED, NONE, 0);
+ _(KDSETRAD, NONE, 0);
+ _(VGAPCVTID, READWRITE, struct_pcvtid_sz);
+ _(CONS_GETVERS, WRITE, sizeof(int));
+ /* Entries from file: dev/wscons/wsconsio.h */
+ _(WSKBDIO_GTYPE, WRITE, sizeof(unsigned int));
+ _(WSKBDIO_BELL, NONE, 0);
+ _(WSKBDIO_COMPLEXBELL, READ, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_SETBELL, READ, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_GETBELL, WRITE, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_SETDEFAULTBELL, READ, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_GETDEFAULTBELL, WRITE, struct_wskbd_bell_data_sz);
+ _(WSKBDIO_SETKEYREPEAT, READ, struct_wskbd_keyrepeat_data_sz);
+ _(WSKBDIO_GETKEYREPEAT, WRITE, struct_wskbd_keyrepeat_data_sz);
+ _(WSKBDIO_SETDEFAULTKEYREPEAT, READ, struct_wskbd_keyrepeat_data_sz);
+ _(WSKBDIO_GETDEFAULTKEYREPEAT, WRITE, struct_wskbd_keyrepeat_data_sz);
+ _(WSKBDIO_SETLEDS, READ, sizeof(int));
+ _(WSKBDIO_GETLEDS, WRITE, sizeof(int));
+ _(WSKBDIO_GETMAP, READWRITE, struct_wskbd_map_data_sz);
+ _(WSKBDIO_SETMAP, READ, struct_wskbd_map_data_sz);
+ _(WSKBDIO_GETENCODING, WRITE, sizeof(int));
+ _(WSKBDIO_SETENCODING, READ, sizeof(int));
+ _(WSKBDIO_SETMODE, READ, sizeof(int));
+ _(WSKBDIO_GETMODE, WRITE, sizeof(int));
+ _(WSKBDIO_SETKEYCLICK, READ, sizeof(int));
+ _(WSKBDIO_GETKEYCLICK, WRITE, sizeof(int));
+ _(WSKBDIO_GETSCROLL, WRITE, struct_wskbd_scroll_data_sz);
+ _(WSKBDIO_SETSCROLL, READ, struct_wskbd_scroll_data_sz);
+ _(WSKBDIO_SETVERSION, READ, sizeof(int));
+ _(WSMOUSEIO_GTYPE, WRITE, sizeof(unsigned int));
+ _(WSMOUSEIO_SRES, READ, sizeof(unsigned int));
+ _(WSMOUSEIO_SSCALE, READ, sizeof(unsigned int));
+ _(WSMOUSEIO_SRATE, READ, sizeof(unsigned int));
+ _(WSMOUSEIO_SCALIBCOORDS, READ, struct_wsmouse_calibcoords_sz);
+ _(WSMOUSEIO_GCALIBCOORDS, WRITE, struct_wsmouse_calibcoords_sz);
+ _(WSMOUSEIO_GETID, READWRITE, struct_wsmouse_id_sz);
+ _(WSMOUSEIO_GETREPEAT, WRITE, struct_wsmouse_repeat_sz);
+ _(WSMOUSEIO_SETREPEAT, READ, struct_wsmouse_repeat_sz);
+ _(WSMOUSEIO_SETVERSION, READ, sizeof(int));
+ _(WSDISPLAYIO_GTYPE, WRITE, sizeof(unsigned int));
+ _(WSDISPLAYIO_GINFO, WRITE, struct_wsdisplay_fbinfo_sz);
+ _(WSDISPLAYIO_GETCMAP, READ, struct_wsdisplay_cmap_sz);
+ _(WSDISPLAYIO_PUTCMAP, READ, struct_wsdisplay_cmap_sz);
+ _(WSDISPLAYIO_GVIDEO, WRITE, sizeof(unsigned int));
+ _(WSDISPLAYIO_SVIDEO, READ, sizeof(unsigned int));
+ _(WSDISPLAYIO_GCURPOS, WRITE, struct_wsdisplay_curpos_sz);
+ _(WSDISPLAYIO_SCURPOS, READ, struct_wsdisplay_curpos_sz);
+ _(WSDISPLAYIO_GCURMAX, WRITE, struct_wsdisplay_curpos_sz);
+ _(WSDISPLAYIO_GCURSOR, READWRITE, struct_wsdisplay_cursor_sz);
+ _(WSDISPLAYIO_SCURSOR, READ, struct_wsdisplay_cursor_sz);
+ _(WSDISPLAYIO_GMODE, WRITE, sizeof(unsigned int));
+ _(WSDISPLAYIO_SMODE, READ, sizeof(unsigned int));
+ _(WSDISPLAYIO_LDFONT, READ, struct_wsdisplay_font_sz);
+ _(WSDISPLAYIO_ADDSCREEN, READ, struct_wsdisplay_addscreendata_sz);
+ _(WSDISPLAYIO_DELSCREEN, READ, struct_wsdisplay_delscreendata_sz);
+ _(WSDISPLAYIO_SFONT, READ, struct_wsdisplay_usefontdata_sz);
+ _(_O_WSDISPLAYIO_SETKEYBOARD, READWRITE, struct_wsdisplay_kbddata_sz);
+ _(WSDISPLAYIO_GETPARAM, READWRITE, struct_wsdisplay_param_sz);
+ _(WSDISPLAYIO_SETPARAM, READWRITE, struct_wsdisplay_param_sz);
+ _(WSDISPLAYIO_GETACTIVESCREEN, WRITE, sizeof(int));
+ _(WSDISPLAYIO_GETWSCHAR, READWRITE, struct_wsdisplay_char_sz);
+ _(WSDISPLAYIO_PUTWSCHAR, READWRITE, struct_wsdisplay_char_sz);
+ _(WSDISPLAYIO_DGSCROLL, WRITE, struct_wsdisplay_scroll_data_sz);
+ _(WSDISPLAYIO_DSSCROLL, READ, struct_wsdisplay_scroll_data_sz);
+ _(WSDISPLAYIO_GMSGATTRS, WRITE, struct_wsdisplay_msgattrs_sz);
+ _(WSDISPLAYIO_SMSGATTRS, READ, struct_wsdisplay_msgattrs_sz);
+ _(WSDISPLAYIO_GBORDER, WRITE, sizeof(int));
+ _(WSDISPLAYIO_SBORDER, READ, sizeof(int));
+ _(WSDISPLAYIO_SSPLASH, READ, sizeof(int));
+ _(WSDISPLAYIO_SPROGRESS, READ, sizeof(int));
+ _(WSDISPLAYIO_LINEBYTES, WRITE, sizeof(unsigned int));
+ _(WSDISPLAYIO_SETVERSION, READ, sizeof(int));
+ _(WSMUXIO_ADD_DEVICE, READ, struct_wsmux_device_sz);
+ _(WSMUXIO_REMOVE_DEVICE, READ, struct_wsmux_device_sz);
+ _(WSMUXIO_LIST_DEVICES, READWRITE, struct_wsmux_device_list_sz);
+ _(WSMUXIO_INJECTEVENT, READ, struct_wscons_event_sz);
+ _(WSDISPLAYIO_GET_BUSID, WRITE, struct_wsdisplayio_bus_id_sz);
+ _(WSDISPLAYIO_GET_EDID, READWRITE, struct_wsdisplayio_edid_info_sz);
+ _(WSDISPLAYIO_SET_POLLING, READ, sizeof(int));
+ _(WSDISPLAYIO_GET_FBINFO, READWRITE, struct_wsdisplayio_fbinfo_sz);
+ _(WSDISPLAYIO_DOBLIT, READWRITE, struct_wsdisplayio_blit_sz);
+ _(WSDISPLAYIO_WAITBLIT, READWRITE, struct_wsdisplayio_blit_sz);
+ /* Entries from file: dev/biovar.h */
+ _(BIOCLOCATE, READWRITE, struct_bio_locate_sz);
+ _(BIOCINQ, READWRITE, struct_bioc_inq_sz);
+ _(BIOCDISK_NOVOL, READWRITE, struct_bioc_disk_sz);
+ _(BIOCDISK, READWRITE, struct_bioc_disk_sz);
+ _(BIOCVOL, READWRITE, struct_bioc_vol_sz);
+ _(BIOCALARM, READWRITE, struct_bioc_alarm_sz);
+ _(BIOCBLINK, READWRITE, struct_bioc_blink_sz);
+ _(BIOCSETSTATE, READWRITE, struct_bioc_setstate_sz);
+ _(BIOCVOLOPS, READWRITE, struct_bioc_volops_sz);
+ /* Entries from file: dev/md.h */
+ _(MD_GETCONF, WRITE, struct_md_conf_sz);
+ _(MD_SETCONF, READ, struct_md_conf_sz);
+ /* Entries from file: dev/ccdvar.h */
+ _(CCDIOCSET, READWRITE, struct_ccd_ioctl_sz);
+ _(CCDIOCCLR, READ, struct_ccd_ioctl_sz);
+ /* Entries from file: dev/cgdvar.h */
+ _(CGDIOCSET, READWRITE, struct_cgd_ioctl_sz);
+ _(CGDIOCCLR, READ, struct_cgd_ioctl_sz);
+ _(CGDIOCGET, READWRITE, struct_cgd_user_sz);
+ /* Entries from file: dev/fssvar.h */
+ _(FSSIOCSET, READ, struct_fss_set_sz);
+ _(FSSIOCGET, WRITE, struct_fss_get_sz);
+ _(FSSIOCCLR, NONE, 0);
+ _(FSSIOFSET, READ, sizeof(int));
+ _(FSSIOFGET, WRITE, sizeof(int));
+ /* Entries from file: dev/bluetooth/btdev.h */
+ _(BTDEV_ATTACH, READ, struct_plistref_sz);
+ _(BTDEV_DETACH, READ, struct_plistref_sz);
+ /* Entries from file: dev/bluetooth/btsco.h */
+ _(BTSCO_GETINFO, WRITE, struct_btsco_info_sz);
+ /* Entries from file: dev/kttcpio.h */
+ _(KTTCP_IO_SEND, READWRITE, struct_kttcp_io_args_sz);
+ _(KTTCP_IO_RECV, READWRITE, struct_kttcp_io_args_sz);
+ /* Entries from file: dev/lockstat.h */
+ _(IOC_LOCKSTAT_GVERSION, WRITE, sizeof(int));
+ _(IOC_LOCKSTAT_ENABLE, READ, struct_lsenable_sz);
+ _(IOC_LOCKSTAT_DISABLE, WRITE, struct_lsdisable_sz);
+ /* Entries from file: dev/vndvar.h */
+ _(VNDIOCSET, READWRITE, struct_vnd_ioctl_sz);
+ _(VNDIOCCLR, READ, struct_vnd_ioctl_sz);
+ _(VNDIOCGET, READWRITE, struct_vnd_user_sz);
+ /* Entries from file: dev/spkrio.h */
+ _(SPKRTONE, READ, struct_tone_sz);
+ _(SPKRTUNE, NONE, 0);
+ _(SPKRGETVOL, WRITE, sizeof(unsigned int));
+ _(SPKRSETVOL, READ, sizeof(unsigned int));
+#if defined(__x86_64__)
+ /* Entries from file: dev/nvmm/nvmm_ioctl.h */
+ _(NVMM_IOC_CAPABILITY, WRITE, struct_nvmm_ioc_capability_sz);
+ _(NVMM_IOC_MACHINE_CREATE, READWRITE, struct_nvmm_ioc_machine_create_sz);
+ _(NVMM_IOC_MACHINE_DESTROY, READ, struct_nvmm_ioc_machine_destroy_sz);
+ _(NVMM_IOC_MACHINE_CONFIGURE, READ, struct_nvmm_ioc_machine_configure_sz);
+ _(NVMM_IOC_VCPU_CREATE, READ, struct_nvmm_ioc_vcpu_create_sz);
+ _(NVMM_IOC_VCPU_DESTROY, READ, struct_nvmm_ioc_vcpu_destroy_sz);
+ _(NVMM_IOC_VCPU_SETSTATE, READ, struct_nvmm_ioc_vcpu_setstate_sz);
+ _(NVMM_IOC_VCPU_GETSTATE, READ, struct_nvmm_ioc_vcpu_getstate_sz);
+ _(NVMM_IOC_VCPU_INJECT, READ, struct_nvmm_ioc_vcpu_inject_sz);
+ _(NVMM_IOC_VCPU_RUN, READWRITE, struct_nvmm_ioc_vcpu_run_sz);
+ _(NVMM_IOC_GPA_MAP, READ, struct_nvmm_ioc_gpa_map_sz);
+ _(NVMM_IOC_GPA_UNMAP, READ, struct_nvmm_ioc_gpa_unmap_sz);
+ _(NVMM_IOC_HVA_MAP, READ, struct_nvmm_ioc_hva_map_sz);
+ _(NVMM_IOC_HVA_UNMAP, READ, struct_nvmm_ioc_hva_unmap_sz);
+ _(NVMM_IOC_CTL, READ, struct_nvmm_ioc_ctl_sz);
+#endif
+ /* Entries from file: dev/spi/spi_io.h */
+ _(SPI_IOCTL_CONFIGURE, READ, struct_spi_ioctl_configure_sz);
+ _(SPI_IOCTL_TRANSFER, READ, struct_spi_ioctl_transfer_sz);
+ /* Entries from file: fs/autofs/autofs_ioctl.h */
+ _(AUTOFSREQUEST, WRITE, struct_autofs_daemon_request_sz);
+ _(AUTOFSDONE, READ, struct_autofs_daemon_done_sz);
+ /* Entries from file: net/bpf.h */
+ _(BIOCGBLEN, WRITE, sizeof(unsigned int));
+ _(BIOCSBLEN, READWRITE, sizeof(unsigned int));
+ _(BIOCSETF, READ, struct_bpf_program_sz);
+ _(BIOCFLUSH, NONE, 0);
+ _(BIOCPROMISC, NONE, 0);
+ _(BIOCGDLT, WRITE, sizeof(unsigned int));
+ _(BIOCGETIF, WRITE, struct_ifreq_sz);
+ _(BIOCSETIF, READ, struct_ifreq_sz);
+ _(BIOCGSTATS, WRITE, struct_bpf_stat_sz);
+ _(BIOCGSTATSOLD, WRITE, struct_bpf_stat_old_sz);
+ _(BIOCIMMEDIATE, READ, sizeof(unsigned int));
+ _(BIOCVERSION, WRITE, struct_bpf_version_sz);
+ _(BIOCSTCPF, READ, struct_bpf_program_sz);
+ _(BIOCSUDPF, READ, struct_bpf_program_sz);
+ _(BIOCGHDRCMPLT, WRITE, sizeof(unsigned int));
+ _(BIOCSHDRCMPLT, READ, sizeof(unsigned int));
+ _(BIOCSDLT, READ, sizeof(unsigned int));
+ _(BIOCGDLTLIST, READWRITE, struct_bpf_dltlist_sz);
+ _(BIOCGDIRECTION, WRITE, sizeof(unsigned int));
+ _(BIOCSDIRECTION, READ, sizeof(unsigned int));
+ _(BIOCSRTIMEOUT, READ, struct_timeval_sz);
+ _(BIOCGRTIMEOUT, WRITE, struct_timeval_sz);
+ _(BIOCGFEEDBACK, WRITE, sizeof(unsigned int));
+ _(BIOCSFEEDBACK, READ, sizeof(unsigned int));
+ /* Entries from file: net/if_gre.h */
+ _(GRESADDRS, READ, struct_ifreq_sz);
+ _(GRESADDRD, READ, struct_ifreq_sz);
+ _(GREGADDRS, READWRITE, struct_ifreq_sz);
+ _(GREGADDRD, READWRITE, struct_ifreq_sz);
+ _(GRESPROTO, READ, struct_ifreq_sz);
+ _(GREGPROTO, READWRITE, struct_ifreq_sz);
+ _(GRESSOCK, READ, struct_ifreq_sz);
+ _(GREDSOCK, READ, struct_ifreq_sz);
+ /* Entries from file: net/if_ppp.h */
+ _(PPPIOCGRAWIN, WRITE, struct_ppp_rawin_sz);
+ _(PPPIOCGFLAGS, WRITE, sizeof(int));
+ _(PPPIOCSFLAGS, READ, sizeof(int));
+ _(PPPIOCGASYNCMAP, WRITE, sizeof(int));
+ _(PPPIOCSASYNCMAP, READ, sizeof(int));
+ _(PPPIOCGUNIT, WRITE, sizeof(int));
+ _(PPPIOCGRASYNCMAP, WRITE, sizeof(int));
+ _(PPPIOCSRASYNCMAP, READ, sizeof(int));
+ _(PPPIOCGMRU, WRITE, sizeof(int));
+ _(PPPIOCSMRU, READ, sizeof(int));
+ _(PPPIOCSMAXCID, READ, sizeof(int));
+ _(PPPIOCGXASYNCMAP, WRITE, (8 * sizeof(u32)));
+ _(PPPIOCSXASYNCMAP, READ, (8 * sizeof(u32)));
+ _(PPPIOCXFERUNIT, NONE, 0);
+ _(PPPIOCSCOMPRESS, READ, struct_ppp_option_data_sz);
+ _(PPPIOCGNPMODE, READWRITE, struct_npioctl_sz);
+ _(PPPIOCSNPMODE, READ, struct_npioctl_sz);
+ _(PPPIOCGIDLE, WRITE, struct_ppp_idle_sz);
+ _(PPPIOCGMTU, WRITE, sizeof(int));
+ _(PPPIOCSMTU, READ, sizeof(int));
+ _(SIOCGPPPSTATS, READWRITE, struct_ifpppstatsreq_sz);
+ _(SIOCGPPPCSTATS, READWRITE, struct_ifpppcstatsreq_sz);
+ /* Entries from file: net/npf.h */
+ _(IOC_NPF_VERSION, WRITE, sizeof(int));
+ _(IOC_NPF_SWITCH, READ, sizeof(int));
+ _(IOC_NPF_LOAD, READWRITE, struct_nvlist_ref_sz);
+ _(IOC_NPF_TABLE, READ, struct_npf_ioctl_table_sz);
+ _(IOC_NPF_STATS, READ, sizeof(uptr));
+ _(IOC_NPF_SAVE, WRITE, struct_nvlist_ref_sz);
+ _(IOC_NPF_RULE, READWRITE, struct_nvlist_ref_sz);
+ _(IOC_NPF_CONN_LOOKUP, READWRITE, struct_nvlist_ref_sz);
+ /* Entries from file: net/if_pppoe.h */
+ _(PPPOESETPARMS, READ, struct_pppoediscparms_sz);
+ _(PPPOEGETPARMS, READWRITE, struct_pppoediscparms_sz);
+ _(PPPOEGETSESSION, READWRITE, struct_pppoeconnectionstate_sz);
+ /* Entries from file: net/if_sppp.h */
+ _(SPPPGETAUTHCFG, READWRITE, struct_spppauthcfg_sz);
+ _(SPPPSETAUTHCFG, READ, struct_spppauthcfg_sz);
+ _(SPPPGETLCPCFG, READWRITE, struct_sppplcpcfg_sz);
+ _(SPPPSETLCPCFG, READ, struct_sppplcpcfg_sz);
+ _(SPPPGETSTATUS, READWRITE, struct_spppstatus_sz);
+ _(SPPPGETSTATUSNCP, READWRITE, struct_spppstatusncp_sz);
+ _(SPPPGETIDLETO, READWRITE, struct_spppidletimeout_sz);
+ _(SPPPSETIDLETO, READ, struct_spppidletimeout_sz);
+ _(SPPPGETAUTHFAILURES, READWRITE, struct_spppauthfailurestats_sz);
+ _(SPPPSETAUTHFAILURE, READ, struct_spppauthfailuresettings_sz);
+ _(SPPPSETDNSOPTS, READ, struct_spppdnssettings_sz);
+ _(SPPPGETDNSOPTS, READWRITE, struct_spppdnssettings_sz);
+ _(SPPPGETDNSADDRS, READWRITE, struct_spppdnsaddrs_sz);
+ _(SPPPSETKEEPALIVE, READ, struct_spppkeepalivesettings_sz);
+ _(SPPPGETKEEPALIVE, READWRITE, struct_spppkeepalivesettings_sz);
+ /* Entries from file: net/if_srt.h */
+ _(SRT_GETNRT, WRITE, sizeof(unsigned int));
+ _(SRT_GETRT, READWRITE, struct_srt_rt_sz);
+ _(SRT_SETRT, READ, struct_srt_rt_sz);
+ _(SRT_DELRT, READ, sizeof(unsigned int));
+ _(SRT_SFLAGS, READ, sizeof(unsigned int));
+ _(SRT_GFLAGS, WRITE, sizeof(unsigned int));
+ _(SRT_SGFLAGS, READWRITE, sizeof(unsigned int));
+ _(SRT_DEBUG, READ, sizeof(uptr));
+ /* Entries from file: net/if_tap.h */
+ _(TAPGIFNAME, WRITE, struct_ifreq_sz);
+ /* Entries from file: net/if_tun.h */
+ _(TUNSDEBUG, READ, sizeof(int));
+ _(TUNGDEBUG, WRITE, sizeof(int));
+ _(TUNSIFMODE, READ, sizeof(int));
+ _(TUNSIFHEAD, READ, sizeof(int));
+ _(TUNGIFHEAD, WRITE, sizeof(int));
+ /* Entries from file: net/pfvar.h */
+ _(DIOCSTART, NONE, 0);
+ _(DIOCSTOP, NONE, 0);
+ _(DIOCADDRULE, READWRITE, struct_pfioc_rule_sz);
+ _(DIOCGETRULES, READWRITE, struct_pfioc_rule_sz);
+ _(DIOCGETRULE, READWRITE, struct_pfioc_rule_sz);
+ _(DIOCSETLCK, READWRITE, sizeof(u32));
+ _(DIOCCLRSTATES, READWRITE, struct_pfioc_state_kill_sz);
+ _(DIOCGETSTATE, READWRITE, struct_pfioc_state_sz);
+ _(DIOCSETSTATUSIF, READWRITE, struct_pfioc_if_sz);
+ _(DIOCGETSTATUS, READWRITE, struct_pf_status_sz);
+ _(DIOCCLRSTATUS, NONE, 0);
+ _(DIOCNATLOOK, READWRITE, struct_pfioc_natlook_sz);
+ _(DIOCSETDEBUG, READWRITE, sizeof(u32));
+ _(DIOCGETSTATES, READWRITE, struct_pfioc_states_sz);
+ _(DIOCCHANGERULE, READWRITE, struct_pfioc_rule_sz);
+ _(DIOCSETTIMEOUT, READWRITE, struct_pfioc_tm_sz);
+ _(DIOCGETTIMEOUT, READWRITE, struct_pfioc_tm_sz);
+ _(DIOCADDSTATE, READWRITE, struct_pfioc_state_sz);
+ _(DIOCCLRRULECTRS, NONE, 0);
+ _(DIOCGETLIMIT, READWRITE, struct_pfioc_limit_sz);
+ _(DIOCSETLIMIT, READWRITE, struct_pfioc_limit_sz);
+ _(DIOCKILLSTATES, READWRITE, struct_pfioc_state_kill_sz);
+ _(DIOCSTARTALTQ, NONE, 0);
+ _(DIOCSTOPALTQ, NONE, 0);
+ _(DIOCADDALTQ, READWRITE, struct_pfioc_altq_sz);
+ _(DIOCGETALTQS, READWRITE, struct_pfioc_altq_sz);
+ _(DIOCGETALTQ, READWRITE, struct_pfioc_altq_sz);
+ _(DIOCCHANGEALTQ, READWRITE, struct_pfioc_altq_sz);
+ _(DIOCGETQSTATS, READWRITE, struct_pfioc_qstats_sz);
+ _(DIOCBEGINADDRS, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCADDADDR, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCGETADDRS, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCGETADDR, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCCHANGEADDR, READWRITE, struct_pfioc_pooladdr_sz);
+ _(DIOCADDSTATES, READWRITE, struct_pfioc_states_sz);
+ _(DIOCGETRULESETS, READWRITE, struct_pfioc_ruleset_sz);
+ _(DIOCGETRULESET, READWRITE, struct_pfioc_ruleset_sz);
+ _(DIOCRCLRTABLES, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRADDTABLES, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRDELTABLES, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRGETTABLES, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRGETTSTATS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRCLRTSTATS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRCLRADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRADDADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRDELADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRSETADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRGETADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRGETASTATS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRCLRASTATS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRTSTADDRS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRSETTFLAGS, READWRITE, struct_pfioc_table_sz);
+ _(DIOCRINADEFINE, READWRITE, struct_pfioc_table_sz);
+ _(DIOCOSFPFLUSH, NONE, 0);
+ _(DIOCOSFPADD, READWRITE, struct_pf_osfp_ioctl_sz);
+ _(DIOCOSFPGET, READWRITE, struct_pf_osfp_ioctl_sz);
+ _(DIOCXBEGIN, READWRITE, struct_pfioc_trans_sz);
+ _(DIOCXCOMMIT, READWRITE, struct_pfioc_trans_sz);
+ _(DIOCXROLLBACK, READWRITE, struct_pfioc_trans_sz);
+ _(DIOCGETSRCNODES, READWRITE, struct_pfioc_src_nodes_sz);
+ _(DIOCCLRSRCNODES, NONE, 0);
+ _(DIOCSETHOSTID, READWRITE, sizeof(u32));
+ _(DIOCIGETIFACES, READWRITE, struct_pfioc_iface_sz);
+ _(DIOCSETIFFLAG, READWRITE, struct_pfioc_iface_sz);
+ _(DIOCCLRIFFLAG, READWRITE, struct_pfioc_iface_sz);
+ _(DIOCKILLSRCNODES, READWRITE, struct_pfioc_src_node_kill_sz);
+ /* Entries from file: netbt/hci.h */
+ _(SIOCGBTINFO, READWRITE, struct_btreq_sz);
+ _(SIOCGBTINFOA, READWRITE, struct_btreq_sz);
+ _(SIOCNBTINFO, READWRITE, struct_btreq_sz);
+ _(SIOCSBTFLAGS, READWRITE, struct_btreq_sz);
+ _(SIOCSBTPOLICY, READWRITE, struct_btreq_sz);
+ _(SIOCSBTPTYPE, READWRITE, struct_btreq_sz);
+ _(SIOCGBTSTATS, READWRITE, struct_btreq_sz);
+ _(SIOCZBTSTATS, READWRITE, struct_btreq_sz);
+ _(SIOCBTDUMP, READ, struct_btreq_sz);
+ _(SIOCSBTSCOMTU, READWRITE, struct_btreq_sz);
+ _(SIOCGBTFEAT, READWRITE, struct_btreq_sz);
+ /* Entries from file: netinet/ip_nat.h */
+ _(SIOCADNAT, READ, struct_ipfobj_sz);
+ _(SIOCRMNAT, READ, struct_ipfobj_sz);
+ _(SIOCGNATS, READWRITE, struct_ipfobj_sz);
+ _(SIOCGNATL, READWRITE, struct_ipfobj_sz);
+ _(SIOCPURGENAT, READWRITE, struct_ipfobj_sz);
+ /* Entries from file: netinet/sctp_uio.h */
+ _(SIOCCONNECTX, READWRITE, struct_sctp_connectx_addrs_sz);
+ _(SIOCCONNECTXDEL, READWRITE, struct_sctp_connectx_addrs_sz);
+ /* Entries from file: netinet6/in6_var.h */
+ _(SIOCSIFINFO_FLAGS, READWRITE, struct_in6_ndireq_sz);
+ _(SIOCAADDRCTL_POLICY, READ, struct_in6_addrpolicy_sz);
+ _(SIOCDADDRCTL_POLICY, READ, struct_in6_addrpolicy_sz);
+ /* Entries from file: netsmb/smb_dev.h */
+ _(SMBIOC_OPENSESSION, READ, struct_smbioc_ossn_sz);
+ _(SMBIOC_OPENSHARE, READ, struct_smbioc_oshare_sz);
+ _(SMBIOC_REQUEST, READWRITE, struct_smbioc_rq_sz);
+ _(SMBIOC_SETFLAGS, READ, struct_smbioc_flags_sz);
+ _(SMBIOC_LOOKUP, READ, struct_smbioc_lookup_sz);
+ _(SMBIOC_READ, READWRITE, struct_smbioc_rw_sz);
+ _(SMBIOC_WRITE, READWRITE, struct_smbioc_rw_sz);
+ /* Entries from file: sys/agpio.h */
+ _(AGPIOC_INFO, WRITE, struct__agp_info_sz);
+ _(AGPIOC_ACQUIRE, NONE, 0);
+ _(AGPIOC_RELEASE, NONE, 0);
+ _(AGPIOC_SETUP, READ, struct__agp_setup_sz);
+ _(AGPIOC_ALLOCATE, READWRITE, struct__agp_allocate_sz);
+ _(AGPIOC_DEALLOCATE, READ, sizeof(int));
+ _(AGPIOC_BIND, READ, struct__agp_bind_sz);
+ _(AGPIOC_UNBIND, READ, struct__agp_unbind_sz);
+ /* Entries from file: sys/audioio.h */
+ _(AUDIO_GETINFO, WRITE, struct_audio_info_sz);
+ _(AUDIO_SETINFO, READWRITE, struct_audio_info_sz);
+ _(AUDIO_DRAIN, NONE, 0);
+ _(AUDIO_FLUSH, NONE, 0);
+ _(AUDIO_WSEEK, WRITE, sizeof(unsigned long));
+ _(AUDIO_RERROR, WRITE, sizeof(int));
+ _(AUDIO_GETDEV, WRITE, struct_audio_device_sz);
+ _(AUDIO_GETENC, READWRITE, struct_audio_encoding_sz);
+ _(AUDIO_GETFD, WRITE, sizeof(int));
+ _(AUDIO_SETFD, READWRITE, sizeof(int));
+ _(AUDIO_PERROR, WRITE, sizeof(int));
+ _(AUDIO_GETIOFFS, WRITE, struct_audio_offset_sz);
+ _(AUDIO_GETOOFFS, WRITE, struct_audio_offset_sz);
+ _(AUDIO_GETPROPS, WRITE, sizeof(int));
+ _(AUDIO_GETBUFINFO, WRITE, struct_audio_info_sz);
+ _(AUDIO_SETCHAN, READ, sizeof(int));
+ _(AUDIO_GETCHAN, WRITE, sizeof(int));
+ _(AUDIO_QUERYFORMAT, READWRITE, struct_audio_format_query_sz);
+ _(AUDIO_GETFORMAT, WRITE, struct_audio_info_sz);
+ _(AUDIO_SETFORMAT, READ, struct_audio_info_sz);
+ _(AUDIO_MIXER_READ, READWRITE, struct_mixer_ctrl_sz);
+ _(AUDIO_MIXER_WRITE, READWRITE, struct_mixer_ctrl_sz);
+ _(AUDIO_MIXER_DEVINFO, READWRITE, struct_mixer_devinfo_sz);
+ /* Entries from file: sys/ataio.h */
+ _(ATAIOCCOMMAND, READWRITE, struct_atareq_sz);
+ _(ATABUSIOSCAN, READ, struct_atabusioscan_args_sz);
+ _(ATABUSIORESET, NONE, 0);
+ _(ATABUSIODETACH, READ, struct_atabusiodetach_args_sz);
+ /* Entries from file: sys/cdio.h */
+ _(CDIOCPLAYTRACKS, READ, struct_ioc_play_track_sz);
+ _(CDIOCPLAYBLOCKS, READ, struct_ioc_play_blocks_sz);
+ _(CDIOCREADSUBCHANNEL, READWRITE, struct_ioc_read_subchannel_sz);
+ _(CDIOREADTOCHEADER, WRITE, struct_ioc_toc_header_sz);
+ _(CDIOREADTOCENTRIES, READWRITE, struct_ioc_read_toc_entry_sz);
+ _(CDIOREADMSADDR, READWRITE, sizeof(int));
+ _(CDIOCSETPATCH, READ, struct_ioc_patch_sz);
+ _(CDIOCGETVOL, WRITE, struct_ioc_vol_sz);
+ _(CDIOCSETVOL, READ, struct_ioc_vol_sz);
+ _(CDIOCSETMONO, NONE, 0);
+ _(CDIOCSETSTEREO, NONE, 0);
+ _(CDIOCSETMUTE, NONE, 0);
+ _(CDIOCSETLEFT, NONE, 0);
+ _(CDIOCSETRIGHT, NONE, 0);
+ _(CDIOCSETDEBUG, NONE, 0);
+ _(CDIOCCLRDEBUG, NONE, 0);
+ _(CDIOCPAUSE, NONE, 0);
+ _(CDIOCRESUME, NONE, 0);
+ _(CDIOCRESET, NONE, 0);
+ _(CDIOCSTART, NONE, 0);
+ _(CDIOCSTOP, NONE, 0);
+ _(CDIOCEJECT, NONE, 0);
+ _(CDIOCALLOW, NONE, 0);
+ _(CDIOCPREVENT, NONE, 0);
+ _(CDIOCCLOSE, NONE, 0);
+ _(CDIOCPLAYMSF, READ, struct_ioc_play_msf_sz);
+ _(CDIOCLOADUNLOAD, READ, struct_ioc_load_unload_sz);
+ /* Entries from file: sys/chio.h */
+ _(CHIOMOVE, READ, struct_changer_move_request_sz);
+ _(CHIOEXCHANGE, READ, struct_changer_exchange_request_sz);
+ _(CHIOPOSITION, READ, struct_changer_position_request_sz);
+ _(CHIOSPICKER, READ, sizeof(int));
+ _(CHIOGPARAMS, WRITE, struct_changer_params_sz);
+ _(CHIOIELEM, NONE, 0);
+ _(OCHIOGSTATUS, READ, struct_ochanger_element_status_request_sz);
+ _(CHIOGSTATUS, READ, struct_changer_element_status_request_sz);
+ _(CHIOSVOLTAG, READ, struct_changer_set_voltag_request_sz);
+ /* Entries from file: sys/clockctl.h */
+ _(CLOCKCTL_SETTIMEOFDAY, READ, struct_clockctl_settimeofday_sz);
+ _(CLOCKCTL_ADJTIME, READWRITE, struct_clockctl_adjtime_sz);
+ _(CLOCKCTL_CLOCK_SETTIME, READ, struct_clockctl_clock_settime_sz);
+ _(CLOCKCTL_NTP_ADJTIME, READWRITE, struct_clockctl_ntp_adjtime_sz);
+ /* Entries from file: sys/cpuio.h */
+ _(IOC_CPU_SETSTATE, READ, struct_cpustate_sz);
+ _(IOC_CPU_GETSTATE, READWRITE, struct_cpustate_sz);
+ _(IOC_CPU_GETCOUNT, WRITE, sizeof(int));
+ _(IOC_CPU_MAPID, READWRITE, sizeof(int));
+ _(IOC_CPU_UCODE_GET_VERSION, READWRITE, struct_cpu_ucode_version_sz);
+ _(IOC_CPU_UCODE_APPLY, READ, struct_cpu_ucode_sz);
+ /* Entries from file: sys/dkio.h */
+ _(DIOCGDINFO, WRITE, struct_disklabel_sz);
+ _(DIOCSDINFO, READ, struct_disklabel_sz);
+ _(DIOCWDINFO, READ, 0);
+ _(DIOCRFORMAT, READWRITE, struct_format_op_sz);
+ _(DIOCWFORMAT, READWRITE, struct_format_op_sz);
+ _(DIOCSSTEP, READ, sizeof(int));
+ _(DIOCSRETRIES, READ, sizeof(int));
+ _(DIOCKLABEL, READ, sizeof(int));
+ _(DIOCWLABEL, READ, sizeof(int));
+ _(DIOCSBAD, READ, struct_dkbad_sz);
+ _(DIOCEJECT, READ, sizeof(int));
+ _(ODIOCEJECT, NONE, 0);
+ _(DIOCLOCK, READ, sizeof(int));
+ _(DIOCGDEFLABEL, WRITE, struct_disklabel_sz);
+ _(DIOCCLRLABEL, NONE, 0);
+ _(DIOCGCACHE, WRITE, sizeof(int));
+ _(DIOCSCACHE, READ, sizeof(int));
+ _(DIOCCACHESYNC, READ, sizeof(int));
+ _(DIOCBSLIST, READWRITE, struct_disk_badsecinfo_sz);
+ _(DIOCBSFLUSH, NONE, 0);
+ _(DIOCAWEDGE, READWRITE, struct_dkwedge_info_sz);
+ _(DIOCGWEDGEINFO, WRITE, struct_dkwedge_info_sz);
+ _(DIOCDWEDGE, READ, struct_dkwedge_info_sz);
+ _(DIOCLWEDGES, READWRITE, struct_dkwedge_list_sz);
+ _(DIOCGSTRATEGY, WRITE, struct_disk_strategy_sz);
+ _(DIOCSSTRATEGY, READ, struct_disk_strategy_sz);
+ _(DIOCGDISKINFO, WRITE, struct_plistref_sz);
+ _(DIOCTUR, WRITE, sizeof(int));
+ _(DIOCMWEDGES, WRITE, sizeof(int));
+ _(DIOCGSECTORSIZE, WRITE, sizeof(unsigned int));
+ _(DIOCGMEDIASIZE, WRITE, sizeof(uptr));
+ _(DIOCRMWEDGES, WRITE, sizeof(int));
+ /* Entries from file: sys/drvctlio.h */
+ _(DRVDETACHDEV, READ, struct_devdetachargs_sz);
+ _(DRVRESCANBUS, READ, struct_devrescanargs_sz);
+ _(DRVCTLCOMMAND, READWRITE, struct_plistref_sz);
+ _(DRVRESUMEDEV, READ, struct_devpmargs_sz);
+ _(DRVLISTDEV, READWRITE, struct_devlistargs_sz);
+ _(DRVGETEVENT, WRITE, struct_plistref_sz);
+ _(DRVSUSPENDDEV, READ, struct_devpmargs_sz);
+ /* Entries from file: sys/dvdio.h */
+ _(DVD_READ_STRUCT, READWRITE, union_dvd_struct_sz);
+ _(DVD_WRITE_STRUCT, READWRITE, union_dvd_struct_sz);
+ _(DVD_AUTH, READWRITE, union_dvd_authinfo_sz);
+ /* Entries from file: sys/envsys.h */
+ _(ENVSYS_GETDICTIONARY, READWRITE, struct_plistref_sz);
+ _(ENVSYS_SETDICTIONARY, READWRITE, struct_plistref_sz);
+ _(ENVSYS_REMOVEPROPS, READWRITE, struct_plistref_sz);
+ _(ENVSYS_GTREDATA, READWRITE, struct_envsys_tre_data_sz);
+ _(ENVSYS_GTREINFO, READWRITE, struct_envsys_basic_info_sz);
+ /* Entries from file: sys/event.h */
+ _(KFILTER_BYFILTER, READWRITE, struct_kfilter_mapping_sz);
+ _(KFILTER_BYNAME, READWRITE, struct_kfilter_mapping_sz);
+ /* Entries from file: sys/fdio.h */
+ _(FDIOCGETOPTS, WRITE, 0);
+ _(FDIOCSETOPTS, READ, sizeof(int));
+ _(FDIOCSETFORMAT, READ, struct_fdformat_parms_sz);
+ _(FDIOCGETFORMAT, WRITE, struct_fdformat_parms_sz);
+ _(FDIOCFORMAT_TRACK, READ, struct_fdformat_cmd_sz);
+ /* Entries from file: sys/filio.h */
+ _(FIOCLEX, NONE, 0);
+ _(FIONCLEX, NONE, 0);
+ _(FIOSEEKDATA, READWRITE, sizeof(uptr));
+ _(FIOSEEKHOLE, READWRITE, sizeof(uptr));
+ _(FIONREAD, WRITE, sizeof(int));
+ _(FIONBIO, READ, sizeof(int));
+ _(FIOASYNC, READ, sizeof(int));
+ _(FIOSETOWN, READ, sizeof(int));
+ _(FIOGETOWN, WRITE, sizeof(int));
+ _(OFIOGETBMAP, READWRITE, sizeof(u32));
+ _(FIOGETBMAP, READWRITE, sizeof(u64));
+ _(FIONWRITE, WRITE, sizeof(int));
+ _(FIONSPACE, WRITE, sizeof(int));
+ /* Entries from file: sys/gpio.h */
+ _(GPIOINFO, WRITE, struct_gpio_info_sz);
+ _(GPIOSET, READWRITE, struct_gpio_set_sz);
+ _(GPIOUNSET, READWRITE, struct_gpio_set_sz);
+ _(GPIOREAD, READWRITE, struct_gpio_req_sz);
+ _(GPIOWRITE, READWRITE, struct_gpio_req_sz);
+ _(GPIOTOGGLE, READWRITE, struct_gpio_req_sz);
+ _(GPIOATTACH, READWRITE, struct_gpio_attach_sz);
+ /* Entries from file: sys/ioctl.h */
+ _(PTIOCNETBSD, READ, struct_ioctl_pt_sz);
+ _(PTIOCSUNOS, READ, struct_ioctl_pt_sz);
+ _(PTIOCLINUX, READ, struct_ioctl_pt_sz);
+ _(PTIOCFREEBSD, READ, struct_ioctl_pt_sz);
+ _(PTIOCULTRIX, READ, struct_ioctl_pt_sz);
+ /* Entries from file: sys/ioctl_compat.h */
+ _(TIOCHPCL, NONE, 0);
+ _(TIOCGETP, WRITE, struct_sgttyb_sz);
+ _(TIOCSETP, READ, struct_sgttyb_sz);
+ _(TIOCSETN, READ, 0);
+ _(TIOCSETC, READ, struct_tchars_sz);
+ _(TIOCGETC, WRITE, struct_tchars_sz);
+ _(TIOCLBIS, READ, sizeof(int));
+ _(TIOCLBIC, READ, sizeof(int));
+ _(TIOCLSET, READ, sizeof(int));
+ _(TIOCLGET, WRITE, sizeof(int));
+ _(TIOCSLTC, READ, struct_ltchars_sz);
+ _(TIOCGLTC, WRITE, struct_ltchars_sz);
+ _(OTIOCCONS, NONE, 0);
+ /* Entries from file: sys/joystick.h */
+ _(JOY_SETTIMEOUT, READ, sizeof(int));
+ _(JOY_GETTIMEOUT, WRITE, sizeof(int));
+ _(JOY_SET_X_OFFSET, READ, sizeof(int));
+ _(JOY_SET_Y_OFFSET, READ, sizeof(int));
+ _(JOY_GET_Y_OFFSET, WRITE, sizeof(int));
+ /* Entries from file: sys/ksyms.h */
+ _(OKIOCGSYMBOL, READ, struct_ksyms_ogsymbol_sz);
+ _(OKIOCGVALUE, READ, struct_ksyms_ogsymbol_sz);
+ _(KIOCGSIZE, WRITE, sizeof(int));
+ _(KIOCGVALUE, READWRITE, struct_ksyms_gvalue_sz);
+ _(KIOCGSYMBOL, READWRITE, struct_ksyms_gsymbol_sz);
+ /* Entries from file: sys/lua.h */
+ _(LUAINFO, READWRITE, struct_lua_info_sz);
+ _(LUACREATE, READWRITE, struct_lua_create_sz);
+ _(LUADESTROY, READWRITE, struct_lua_create_sz);
+ _(LUAREQUIRE, READWRITE, struct_lua_require_sz);
+ _(LUALOAD, READWRITE, struct_lua_load_sz);
+ /* Entries from file: sys/midiio.h */
+ _(MIDI_PRETIME, READWRITE, sizeof(int));
+ _(MIDI_MPUMODE, READWRITE, sizeof(int));
+ _(MIDI_MPUCMD, READWRITE, struct_mpu_command_rec_sz);
+ _(SEQUENCER_RESET, NONE, 0);
+ _(SEQUENCER_SYNC, NONE, 0);
+ _(SEQUENCER_INFO, READWRITE, struct_synth_info_sz);
+ _(SEQUENCER_CTRLRATE, READWRITE, sizeof(int));
+ _(SEQUENCER_GETOUTCOUNT, WRITE, sizeof(int));
+ _(SEQUENCER_GETINCOUNT, WRITE, sizeof(int));
+ _(SEQUENCER_RESETSAMPLES, READ, sizeof(int));
+ _(SEQUENCER_NRSYNTHS, WRITE, sizeof(int));
+ _(SEQUENCER_NRMIDIS, WRITE, sizeof(int));
+ _(SEQUENCER_THRESHOLD, READ, sizeof(int));
+ _(SEQUENCER_MEMAVL, READWRITE, sizeof(int));
+ _(SEQUENCER_PANIC, NONE, 0);
+ _(SEQUENCER_OUTOFBAND, READ, struct_seq_event_rec_sz);
+ _(SEQUENCER_GETTIME, WRITE, sizeof(int));
+ _(SEQUENCER_TMR_TIMEBASE, READWRITE, sizeof(int));
+ _(SEQUENCER_TMR_START, NONE, 0);
+ _(SEQUENCER_TMR_STOP, NONE, 0);
+ _(SEQUENCER_TMR_CONTINUE, NONE, 0);
+ _(SEQUENCER_TMR_TEMPO, READWRITE, sizeof(int));
+ _(SEQUENCER_TMR_SOURCE, READWRITE, sizeof(int));
+ _(SEQUENCER_TMR_METRONOME, READ, sizeof(int));
+ _(SEQUENCER_TMR_SELECT, READ, sizeof(int));
+ /* Entries from file: sys/mtio.h */
+ _(MTIOCTOP, READ, struct_mtop_sz);
+ _(MTIOCGET, WRITE, struct_mtget_sz);
+ _(MTIOCIEOT, NONE, 0);
+ _(MTIOCEEOT, NONE, 0);
+ _(MTIOCRDSPOS, WRITE, sizeof(u32));
+ _(MTIOCRDHPOS, WRITE, sizeof(u32));
+ _(MTIOCSLOCATE, READ, sizeof(u32));
+ _(MTIOCHLOCATE, READ, sizeof(u32));
+ /* Entries from file: sys/power.h */
+ _(POWER_EVENT_RECVDICT, READWRITE, struct_plistref_sz);
+ _(POWER_IOC_GET_TYPE, WRITE, struct_power_type_sz);
+ /* Entries from file: sys/radioio.h */
+ _(RIOCGINFO, WRITE, struct_radio_info_sz);
+ _(RIOCSINFO, READWRITE, struct_radio_info_sz);
+ _(RIOCSSRCH, READ, sizeof(int));
+ /* Entries from file: sys/rndio.h */
+ _(RNDGETENTCNT, WRITE, sizeof(u32));
+ _(RNDGETSRCNUM, READWRITE, struct_rndstat_sz);
+ _(RNDGETSRCNAME, READWRITE, struct_rndstat_name_sz);
+ _(RNDCTL, READ, struct_rndctl_sz);
+ _(RNDADDDATA, READ, struct_rnddata_sz);
+ _(RNDGETPOOLSTAT, WRITE, struct_rndpoolstat_sz);
+ _(RNDGETESTNUM, READWRITE, struct_rndstat_est_sz);
+ _(RNDGETESTNAME, READWRITE, struct_rndstat_est_name_sz);
+ /* Entries from file: sys/scanio.h */
+ _(SCIOCGET, WRITE, struct_scan_io_sz);
+ _(SCIOCSET, READ, struct_scan_io_sz);
+ _(SCIOCRESTART, NONE, 0);
+ /* Entries from file: sys/scsiio.h */
+ _(SCIOCCOMMAND, READWRITE, struct_scsireq_sz);
+ _(SCIOCDEBUG, READ, sizeof(int));
+ _(SCIOCIDENTIFY, WRITE, struct_scsi_addr_sz);
+ _(OSCIOCIDENTIFY, WRITE, struct_oscsi_addr_sz);
+ _(SCIOCDECONFIG, NONE, 0);
+ _(SCIOCRECONFIG, NONE, 0);
+ _(SCIOCRESET, NONE, 0);
+ _(SCBUSIOSCAN, READ, struct_scbusioscan_args_sz);
+ _(SCBUSIORESET, NONE, 0);
+ _(SCBUSIODETACH, READ, struct_scbusiodetach_args_sz);
+ _(SCBUSACCEL, READ, struct_scbusaccel_args_sz);
+ /* Entries from file: sys/sockio.h */
+ _(SIOCSHIWAT, READ, sizeof(int));
+ _(SIOCGHIWAT, WRITE, sizeof(int));
+ _(SIOCSLOWAT, READ, sizeof(int));
+ _(SIOCGLOWAT, WRITE, sizeof(int));
+ _(SIOCATMARK, WRITE, sizeof(int));
+ _(SIOCSPGRP, READ, sizeof(int));
+ _(SIOCGPGRP, WRITE, sizeof(int));
+ _(SIOCPEELOFF, READWRITE, sizeof(int));
+ _(SIOCADDRT, READ, struct_ortentry_sz);
+ _(SIOCDELRT, READ, struct_ortentry_sz);
+ _(SIOCSIFADDR, READ, struct_ifreq_sz);
+ _(SIOCGIFADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
+ _(SIOCGIFDSTADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFFLAGS, READ, struct_ifreq_sz);
+ _(SIOCGIFFLAGS, READWRITE, struct_ifreq_sz);
+ _(SIOCGIFBRDADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
+ _(SIOCGIFCONF, READWRITE, struct_ifconf_sz);
+ _(SIOCGIFNETMASK, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFNETMASK, READ, struct_ifreq_sz);
+ _(SIOCGIFMETRIC, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFMETRIC, READ, struct_ifreq_sz);
+ _(SIOCDIFADDR, READ, struct_ifreq_sz);
+ _(SIOCAIFADDR, READ, struct_ifaliasreq_sz);
+ _(SIOCGIFALIAS, READWRITE, struct_ifaliasreq_sz);
+ _(SIOCGIFAFLAG_IN, READWRITE, struct_ifreq_sz);
+ _(SIOCALIFADDR, READ, struct_if_laddrreq_sz);
+ _(SIOCGLIFADDR, READWRITE, struct_if_laddrreq_sz);
+ _(SIOCDLIFADDR, READ, struct_if_laddrreq_sz);
+ _(SIOCSIFADDRPREF, READ, struct_if_addrprefreq_sz);
+ _(SIOCGIFADDRPREF, READWRITE, struct_if_addrprefreq_sz);
+ _(SIOCADDMULTI, READ, struct_ifreq_sz);
+ _(SIOCDELMULTI, READ, struct_ifreq_sz);
+ _(SIOCGETVIFCNT, READWRITE, struct_sioc_vif_req_sz);
+ _(SIOCGETSGCNT, READWRITE, struct_sioc_sg_req_sz);
+ _(SIOCSIFMEDIA, READWRITE, struct_ifreq_sz);
+ _(SIOCGIFMEDIA, READWRITE, struct_ifmediareq_sz);
+ _(SIOCSIFGENERIC, READ, struct_ifreq_sz);
+ _(SIOCGIFGENERIC, READWRITE, struct_ifreq_sz);
+ _(SIOCSIFPHYADDR, READ, struct_ifaliasreq_sz);
+ _(SIOCGIFPSRCADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCGIFPDSTADDR, READWRITE, struct_ifreq_sz);
+ _(SIOCDIFPHYADDR, READ, struct_ifreq_sz);
+ _(SIOCSLIFPHYADDR, READ, struct_if_laddrreq_sz);
+ _(SIOCGLIFPHYADDR, READWRITE, struct_if_laddrreq_sz);
+ _(SIOCSIFMTU, READ, struct_ifreq_sz);
+ _(SIOCGIFMTU, READWRITE, struct_ifreq_sz);
+ _(SIOCSDRVSPEC, READ, struct_ifdrv_sz);
+ _(SIOCGDRVSPEC, READWRITE, struct_ifdrv_sz);
+ _(SIOCIFCREATE, READ, struct_ifreq_sz);
+ _(SIOCIFDESTROY, READ, struct_ifreq_sz);
+ _(SIOCIFGCLONERS, READWRITE, struct_if_clonereq_sz);
+ _(SIOCGIFDLT, READWRITE, struct_ifreq_sz);
+ _(SIOCGIFCAP, READWRITE, struct_ifcapreq_sz);
+ _(SIOCSIFCAP, READ, struct_ifcapreq_sz);
+ _(SIOCSVH, READWRITE, struct_ifreq_sz);
+ _(SIOCGVH, READWRITE, struct_ifreq_sz);
+ _(SIOCINITIFADDR, READWRITE, struct_ifaddr_sz);
+ _(SIOCGIFDATA, READWRITE, struct_ifdatareq_sz);
+ _(SIOCZIFDATA, READWRITE, struct_ifdatareq_sz);
+ _(SIOCGLINKSTR, READWRITE, struct_ifdrv_sz);
+ _(SIOCSLINKSTR, READ, struct_ifdrv_sz);
+ _(SIOCGETHERCAP, READWRITE, struct_eccapreq_sz);
+ _(SIOCGIFINDEX, READWRITE, struct_ifreq_sz);
+ _(SIOCSETHERCAP, READ, struct_eccapreq_sz);
+ _(SIOCSIFDESCR, READ, struct_ifreq_sz);
+ _(SIOCGIFDESCR, READWRITE, struct_ifreq_sz);
+ _(SIOCGUMBINFO, READWRITE, struct_ifreq_sz);
+ _(SIOCSUMBPARAM, READ, struct_ifreq_sz);
+ _(SIOCGUMBPARAM, READWRITE, struct_ifreq_sz);
+ _(SIOCSETPFSYNC, READ, struct_ifreq_sz);
+ _(SIOCGETPFSYNC, READWRITE, struct_ifreq_sz);
+ /* Entries from file: sys/timepps.h */
+ _(PPS_IOC_CREATE, NONE, 0);
+ _(PPS_IOC_DESTROY, NONE, 0);
+ _(PPS_IOC_SETPARAMS, READ, struct_pps_params_sz);
+ _(PPS_IOC_GETPARAMS, WRITE, struct_pps_params_sz);
+ _(PPS_IOC_GETCAP, WRITE, sizeof(int));
+ _(PPS_IOC_FETCH, READWRITE, struct_pps_info_sz);
+ _(PPS_IOC_KCBIND, READ, sizeof(int));
+ /* Entries from file: sys/ttycom.h */
+ _(TIOCEXCL, NONE, 0);
+ _(TIOCNXCL, NONE, 0);
+ _(TIOCFLUSH, READ, sizeof(int));
+ _(TIOCGETA, WRITE, struct_termios_sz);
+ _(TIOCSETA, READ, struct_termios_sz);
+ _(TIOCSETAW, READ, 0);
+ _(TIOCSETAF, READ, 0);
+ _(TIOCGETD, WRITE, sizeof(int));
+ _(TIOCSETD, READ, sizeof(int));
+ _(TIOCGLINED, WRITE, (32 * sizeof(char)));
+ _(TIOCSLINED, READ, (32 * sizeof(char)));
+ _(TIOCSBRK, NONE, 0);
+ _(TIOCCBRK, NONE, 0);
+ _(TIOCSDTR, NONE, 0);
+ _(TIOCCDTR, NONE, 0);
+ _(TIOCGPGRP, WRITE, sizeof(int));
+ _(TIOCSPGRP, READ, sizeof(int));
+ _(TIOCOUTQ, WRITE, sizeof(int));
+ _(TIOCSTI, READ, sizeof(char));
+ _(TIOCNOTTY, NONE, 0);
+ _(TIOCPKT, READ, sizeof(int));
+ _(TIOCSTOP, NONE, 0);
+ _(TIOCSTART, NONE, 0);
+ _(TIOCMSET, READ, sizeof(int));
+ _(TIOCMBIS, READ, sizeof(int));
+ _(TIOCMBIC, READ, sizeof(int));
+ _(TIOCMGET, WRITE, sizeof(int));
+ _(TIOCREMOTE, READ, sizeof(int));
+ _(TIOCGWINSZ, WRITE, struct_winsize_sz);
+ _(TIOCSWINSZ, READ, struct_winsize_sz);
+ _(TIOCUCNTL, READ, sizeof(int));
+ _(TIOCSTAT, READ, sizeof(int));
+ _(TIOCGSID, WRITE, sizeof(int));
+ _(TIOCCONS, READ, sizeof(int));
+ _(TIOCSCTTY, NONE, 0);
+ _(TIOCEXT, READ, sizeof(int));
+ _(TIOCSIG, NONE, 0);
+ _(TIOCDRAIN, NONE, 0);
+ _(TIOCGFLAGS, WRITE, sizeof(int));
+ _(TIOCSFLAGS, READ, sizeof(int));
+ _(TIOCDCDTIMESTAMP, WRITE, struct_timeval_sz);
+ _(TIOCRCVFRAME, READ, sizeof(uptr));
+ _(TIOCXMTFRAME, READ, sizeof(uptr));
+ _(TIOCPTMGET, WRITE, struct_ptmget_sz);
+ _(TIOCGRANTPT, NONE, 0);
+ _(TIOCPTSNAME, WRITE, struct_ptmget_sz);
+ _(TIOCSQSIZE, READ, sizeof(int));
+ _(TIOCGQSIZE, WRITE, sizeof(int));
+ /* Entries from file: sys/verified_exec.h */
+ _(VERIEXEC_LOAD, READ, struct_plistref_sz);
+ _(VERIEXEC_TABLESIZE, READ, struct_plistref_sz);
+ _(VERIEXEC_DELETE, READ, struct_plistref_sz);
+ _(VERIEXEC_QUERY, READWRITE, struct_plistref_sz);
+ _(VERIEXEC_DUMP, WRITE, struct_plistref_sz);
+ _(VERIEXEC_FLUSH, NONE, 0);
+ /* Entries from file: sys/videoio.h */
+ _(VIDIOC_QUERYCAP, WRITE, struct_v4l2_capability_sz);
+ _(VIDIOC_RESERVED, NONE, 0);
+ _(VIDIOC_ENUM_FMT, READWRITE, struct_v4l2_fmtdesc_sz);
+ _(VIDIOC_G_FMT, READWRITE, struct_v4l2_format_sz);
+ _(VIDIOC_S_FMT, READWRITE, struct_v4l2_format_sz);
+ _(VIDIOC_REQBUFS, READWRITE, struct_v4l2_requestbuffers_sz);
+ _(VIDIOC_QUERYBUF, READWRITE, struct_v4l2_buffer_sz);
+ _(VIDIOC_G_FBUF, WRITE, struct_v4l2_framebuffer_sz);
+ _(VIDIOC_S_FBUF, READ, struct_v4l2_framebuffer_sz);
+ _(VIDIOC_OVERLAY, READ, sizeof(int));
+ _(VIDIOC_QBUF, READWRITE, struct_v4l2_buffer_sz);
+ _(VIDIOC_DQBUF, READWRITE, struct_v4l2_buffer_sz);
+ _(VIDIOC_STREAMON, READ, sizeof(int));
+ _(VIDIOC_STREAMOFF, READ, sizeof(int));
+ _(VIDIOC_G_PARM, READWRITE, struct_v4l2_streamparm_sz);
+ _(VIDIOC_S_PARM, READWRITE, struct_v4l2_streamparm_sz);
+ _(VIDIOC_G_STD, WRITE, sizeof(u64));
+ _(VIDIOC_S_STD, READ, sizeof(u64));
+ _(VIDIOC_ENUMSTD, READWRITE, struct_v4l2_standard_sz);
+ _(VIDIOC_ENUMINPUT, READWRITE, struct_v4l2_input_sz);
+ _(VIDIOC_G_CTRL, READWRITE, struct_v4l2_control_sz);
+ _(VIDIOC_S_CTRL, READWRITE, struct_v4l2_control_sz);
+ _(VIDIOC_G_TUNER, READWRITE, struct_v4l2_tuner_sz);
+ _(VIDIOC_S_TUNER, READ, struct_v4l2_tuner_sz);
+ _(VIDIOC_G_AUDIO, WRITE, struct_v4l2_audio_sz);
+ _(VIDIOC_S_AUDIO, READ, struct_v4l2_audio_sz);
+ _(VIDIOC_QUERYCTRL, READWRITE, struct_v4l2_queryctrl_sz);
+ _(VIDIOC_QUERYMENU, READWRITE, struct_v4l2_querymenu_sz);
+ _(VIDIOC_G_INPUT, WRITE, sizeof(int));
+ _(VIDIOC_S_INPUT, READWRITE, sizeof(int));
+ _(VIDIOC_G_OUTPUT, WRITE, sizeof(int));
+ _(VIDIOC_S_OUTPUT, READWRITE, sizeof(int));
+ _(VIDIOC_ENUMOUTPUT, READWRITE, struct_v4l2_output_sz);
+ _(VIDIOC_G_AUDOUT, WRITE, struct_v4l2_audioout_sz);
+ _(VIDIOC_S_AUDOUT, READ, struct_v4l2_audioout_sz);
+ _(VIDIOC_G_MODULATOR, READWRITE, struct_v4l2_modulator_sz);
+ _(VIDIOC_S_MODULATOR, READ, struct_v4l2_modulator_sz);
+ _(VIDIOC_G_FREQUENCY, READWRITE, struct_v4l2_frequency_sz);
+ _(VIDIOC_S_FREQUENCY, READ, struct_v4l2_frequency_sz);
+ _(VIDIOC_CROPCAP, READWRITE, struct_v4l2_cropcap_sz);
+ _(VIDIOC_G_CROP, READWRITE, struct_v4l2_crop_sz);
+ _(VIDIOC_S_CROP, READ, struct_v4l2_crop_sz);
+ _(VIDIOC_G_JPEGCOMP, WRITE, struct_v4l2_jpegcompression_sz);
+ _(VIDIOC_S_JPEGCOMP, READ, struct_v4l2_jpegcompression_sz);
+ _(VIDIOC_QUERYSTD, WRITE, sizeof(u64));
+ _(VIDIOC_TRY_FMT, READWRITE, struct_v4l2_format_sz);
+ _(VIDIOC_ENUMAUDIO, READWRITE, struct_v4l2_audio_sz);
+ _(VIDIOC_ENUMAUDOUT, READWRITE, struct_v4l2_audioout_sz);
+ _(VIDIOC_G_PRIORITY, WRITE, enum_v4l2_priority_sz);
+ _(VIDIOC_S_PRIORITY, READ, enum_v4l2_priority_sz);
+ _(VIDIOC_ENUM_FRAMESIZES, READWRITE, struct_v4l2_frmsizeenum_sz);
+ _(VIDIOC_ENUM_FRAMEINTERVALS, READWRITE, struct_v4l2_frmivalenum_sz);
+ /* Entries from file: sys/wdog.h */
+ _(WDOGIOC_GMODE, READWRITE, struct_wdog_mode_sz);
+ _(WDOGIOC_SMODE, READ, struct_wdog_mode_sz);
+ _(WDOGIOC_WHICH, WRITE, struct_wdog_mode_sz);
+ _(WDOGIOC_TICKLE, NONE, 0);
+ _(WDOGIOC_GTICKLER, WRITE, sizeof(int));
+ _(WDOGIOC_GWDOGS, READWRITE, struct_wdog_conf_sz);
+ /* Entries from file: sys/kcov.h */
+ _(KCOV_IOC_SETBUFSIZE, READ, sizeof(u64));
+ _(KCOV_IOC_ENABLE, READ, sizeof(int));
+ _(KCOV_IOC_DISABLE, NONE, 0);
+ /* Entries from file: sys/ipmi.h */
+ _(IPMICTL_RECEIVE_MSG_TRUNC, READWRITE, struct_ipmi_recv_sz);
+ _(IPMICTL_RECEIVE_MSG, READWRITE, struct_ipmi_recv_sz);
+ _(IPMICTL_SEND_COMMAND, READ, struct_ipmi_req_sz);
+ _(IPMICTL_REGISTER_FOR_CMD, READ, struct_ipmi_cmdspec_sz);
+ _(IPMICTL_UNREGISTER_FOR_CMD, READ, struct_ipmi_cmdspec_sz);
+ _(IPMICTL_SET_GETS_EVENTS_CMD, READ, sizeof(int));
+ _(IPMICTL_SET_MY_ADDRESS_CMD, READ, sizeof(unsigned int));
+ _(IPMICTL_GET_MY_ADDRESS_CMD, WRITE, sizeof(unsigned int));
+ _(IPMICTL_SET_MY_LUN_CMD, READ, sizeof(unsigned int));
+ _(IPMICTL_GET_MY_LUN_CMD, WRITE, sizeof(unsigned int));
+ /* Entries from file: soundcard.h */
+ _(SNDCTL_DSP_RESET, NONE, 0);
+ _(SNDCTL_DSP_SYNC, NONE, 0);
+ _(SNDCTL_DSP_SPEED, READWRITE, sizeof(int));
+ _(SOUND_PCM_READ_RATE, WRITE, sizeof(int));
+ _(SNDCTL_DSP_STEREO, READWRITE, sizeof(int));
+ _(SNDCTL_DSP_GETBLKSIZE, READWRITE, sizeof(int));
+ _(SNDCTL_DSP_SETFMT, READWRITE, sizeof(int));
+ _(SOUND_PCM_READ_BITS, WRITE, sizeof(int));
+ _(SNDCTL_DSP_CHANNELS, READWRITE, sizeof(int));
+ _(SOUND_PCM_READ_CHANNELS, WRITE, sizeof(int));
+ _(SOUND_PCM_WRITE_FILTER, READWRITE, sizeof(int));
+ _(SOUND_PCM_READ_FILTER, WRITE, sizeof(int));
+ _(SNDCTL_DSP_POST, NONE, 0);
+ _(SNDCTL_DSP_SUBDIVIDE, READWRITE, sizeof(int));
+ _(SNDCTL_DSP_SETFRAGMENT, READWRITE, sizeof(int));
+ _(SNDCTL_DSP_GETFMTS, WRITE, sizeof(int));
+ _(SNDCTL_DSP_GETOSPACE, WRITE, struct_audio_buf_info_sz);
+ _(SNDCTL_DSP_GETISPACE, WRITE, struct_audio_buf_info_sz);
+ _(SNDCTL_DSP_NONBLOCK, NONE, 0);
+ _(SNDCTL_DSP_GETCAPS, WRITE, sizeof(int));
+ _(SNDCTL_DSP_GETTRIGGER, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SETTRIGGER, READ, sizeof(int));
+ _(SNDCTL_DSP_GETIPTR, WRITE, struct_count_info_sz);
+ _(SNDCTL_DSP_GETOPTR, WRITE, struct_count_info_sz);
+ _(SNDCTL_DSP_MAPINBUF, WRITE, struct_buffmem_desc_sz);
+ _(SNDCTL_DSP_MAPOUTBUF, WRITE, struct_buffmem_desc_sz);
+ _(SNDCTL_DSP_SETSYNCRO, NONE, 0);
+ _(SNDCTL_DSP_SETDUPLEX, NONE, 0);
+ _(SNDCTL_DSP_PROFILE, READ, sizeof(int));
+ _(SNDCTL_DSP_GETODELAY, WRITE, sizeof(int));
+ _(SOUND_MIXER_INFO, WRITE, struct_mixer_info_sz);
+ _(SOUND_OLD_MIXER_INFO, WRITE, struct__old_mixer_info_sz);
+ _(OSS_GETVERSION, WRITE, sizeof(int));
+ _(SNDCTL_SYSINFO, WRITE, struct_oss_sysinfo_sz);
+ _(SNDCTL_AUDIOINFO, READWRITE, struct_oss_audioinfo_sz);
+ _(SNDCTL_ENGINEINFO, READWRITE, struct_oss_audioinfo_sz);
+ _(SNDCTL_DSP_GETPLAYVOL, WRITE, sizeof(unsigned int));
+ _(SNDCTL_DSP_SETPLAYVOL, READ, sizeof(unsigned int));
+ _(SNDCTL_DSP_GETRECVOL, WRITE, sizeof(unsigned int));
+ _(SNDCTL_DSP_SETRECVOL, READ, sizeof(unsigned int));
+ _(SNDCTL_DSP_SKIP, NONE, 0);
+ _(SNDCTL_DSP_SILENCE, NONE, 0);
+#undef _
+}
+
+static bool ioctl_initialized = false;
+
+struct ioctl_desc_compare {
+ bool operator()(const ioctl_desc &left, const ioctl_desc &right) const {
+ return left.req < right.req;
+ }
+};
+
+static void ioctl_init() {
+ ioctl_table_fill();
+ Sort(ioctl_table, ioctl_table_size, ioctl_desc_compare());
+
+ bool bad = false;
+ for (unsigned i = 0; i < ioctl_table_size - 1; ++i) {
+ if (ioctl_table[i].req >= ioctl_table[i + 1].req) {
+ Printf("Duplicate or unsorted ioctl request id %x >= %x (%s vs %s)\n",
+ ioctl_table[i].req, ioctl_table[i + 1].req, ioctl_table[i].name,
+ ioctl_table[i + 1].name);
+ bad = true;
+ }
+ }
+
+ if (bad)
+ Die();
+
+ ioctl_initialized = true;
+}
+
+static const ioctl_desc *ioctl_table_lookup(unsigned req) {
+ int left = 0;
+ int right = ioctl_table_size;
+ while (left < right) {
+ int mid = (left + right) / 2;
+ if (ioctl_table[mid].req < req)
+ left = mid + 1;
+ else
+ right = mid;
+ }
+ if (left == right && ioctl_table[left].req == req)
+ return ioctl_table + left;
+ else
+ return nullptr;
+}
+
+static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
+ CHECK(desc);
+ desc->req = req;
+ desc->name = "<DECODED_IOCTL>";
+ desc->size = IOC_SIZE(req);
+ // Sanity check.
+ if (desc->size > 0xFFFF)
+ return false;
+ unsigned dir = IOC_DIR(req);
+ switch (dir) {
+ case IOC_NONE:
+ desc->type = ioctl_desc::NONE;
+ break;
+ case IOC_READ | IOC_WRITE:
+ desc->type = ioctl_desc::READWRITE;
+ break;
+ case IOC_READ:
+ desc->type = ioctl_desc::WRITE;
+ break;
+ case IOC_WRITE:
+ desc->type = ioctl_desc::READ;
+ break;
+ default:
+ return false;
+ }
+ // Size can be 0 iff type is NONE.
+ if ((desc->type == IOC_NONE) != (desc->size == 0))
+ return false;
+ // Sanity check.
+ if (IOC_TYPE(req) == 0)
+ return false;
+ return true;
+}
+
+static const ioctl_desc *ioctl_lookup(unsigned req) {
+ const ioctl_desc *desc = ioctl_table_lookup(req);
+ if (desc)
+ return desc;
+
+ // Try stripping access size from the request id.
+ desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));
+ // Sanity check: requests that encode access size are either read or write and
+ // have size of 0 in the table.
+ if (desc && desc->size == 0 &&
+ (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
+ desc->type == ioctl_desc::READ))
+ return desc;
+ return nullptr;
+}
+
+static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
+ unsigned request, void *arg) {
+ if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {
+ unsigned size = desc->size ? desc->size : IOC_SIZE(request);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
+ }
+ if (desc->type != ioctl_desc::CUSTOM)
+ return;
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (char *)&ifc->ifc_len,
+ sizeof(ifc->ifc_len));
+ }
+}
+
+static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
+ unsigned request, void *arg) {
+ if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {
+ // FIXME: add verbose output
+ unsigned size = desc->size ? desc->size : IOC_SIZE(request);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);
+ }
+ if (desc->type != ioctl_desc::CUSTOM)
+ return;
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
+ }
+}
+
+#endif // SANITIZER_NETBSD
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_interceptors_ioctl_netbsd.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cc (revision 351984)
@@ -0,0 +1,2395 @@
+//===-- sanitizer_platform_limits_netbsd.cc -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific NetBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_NETBSD
+
+#define _KMEMUSER
+#define RAY_DO_SIGLEV
+
+// clang-format off
+#include <sys/param.h>
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#include <sys/disk.h>
+#include <sys/disklabel.h>
+#include <sys/mount.h>
+#include <sys/agpio.h>
+#include <sys/ataio.h>
+#include <sys/audioio.h>
+#include <sys/cdbr.h>
+#include <sys/cdio.h>
+#include <sys/chio.h>
+#include <sys/clockctl.h>
+#include <sys/cpuio.h>
+#include <sys/dkio.h>
+#include <sys/drvctlio.h>
+#include <sys/dvdio.h>
+#include <sys/envsys.h>
+#include <sys/event.h>
+#include <sys/fdio.h>
+#include <sys/filio.h>
+#include <sys/gpio.h>
+#include <sys/ioctl.h>
+#include <sys/ioctl_compat.h>
+#include <sys/joystick.h>
+#include <sys/ksyms.h>
+#include <sys/lua.h>
+#include <sys/midiio.h>
+#include <sys/mtio.h>
+#include <sys/power.h>
+#include <sys/radioio.h>
+#include <sys/rndio.h>
+#include <sys/scanio.h>
+#include <sys/scsiio.h>
+#include <sys/sockio.h>
+#include <sys/timepps.h>
+#include <sys/ttycom.h>
+#include <sys/verified_exec.h>
+#include <sys/videoio.h>
+#include <sys/wdog.h>
+#include <sys/event.h>
+#include <sys/filio.h>
+#include <sys/ipc.h>
+#include <sys/ipmi.h>
+#include <sys/kcov.h>
+#include <sys/mman.h>
+#include <sys/module.h>
+#include <sys/mount.h>
+#include <sys/mqueue.h>
+#include <sys/msg.h>
+#include <sys/mtio.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/sem.h>
+#include <sys/sha1.h>
+#include <sys/sha2.h>
+#include <sys/shm.h>
+#include <sys/signal.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/soundcard.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/timeb.h>
+#include <sys/times.h>
+#include <sys/timespec.h>
+#include <sys/timex.h>
+#include <sys/types.h>
+#include <sys/ucontext.h>
+#include <sys/utsname.h>
+#include <altq/altq.h>
+#include <altq/altq_afmap.h>
+#include <altq/altq_blue.h>
+#include <altq/altq_cbq.h>
+#include <altq/altq_cdnr.h>
+#include <altq/altq_fifoq.h>
+#include <altq/altq_hfsc.h>
+#include <altq/altq_jobs.h>
+#include <altq/altq_priq.h>
+#include <altq/altq_red.h>
+#include <altq/altq_rio.h>
+#include <altq/altq_wfq.h>
+#include <arpa/inet.h>
+#include <crypto/cryptodev.h>
+#include <dev/apm/apmio.h>
+#include <dev/dm/netbsd-dm.h>
+#include <dev/dmover/dmover_io.h>
+#include <dev/dtv/dtvio_demux.h>
+#include <dev/dtv/dtvio_frontend.h>
+#include <dev/filemon/filemon.h>
+#include <dev/hdaudio/hdaudioio.h>
+#include <dev/hdmicec/hdmicecio.h>
+#include <dev/hpc/hpcfbio.h>
+#include <dev/i2o/iopio.h>
+#include <dev/ic/athioctl.h>
+#include <dev/ic/bt8xx.h>
+#include <dev/ic/icp_ioctl.h>
+#include <dev/ic/isp_ioctl.h>
+#include <dev/ic/mlxio.h>
+#include <dev/ic/qemufwcfgio.h>
+#include <dev/ic/nvmeio.h>
+#include <dev/ir/irdaio.h>
+#include <dev/isa/isvio.h>
+#include <dev/isa/wtreg.h>
+#include <dev/iscsi/iscsi_ioctl.h>
+#include <dev/ofw/openfirmio.h>
+#include <dev/pci/amrio.h>
+#include <dev/pci/mlyreg.h>
+#include <dev/pci/mlyio.h>
+#include <dev/pci/pciio.h>
+#include <dev/pci/tweio.h>
+#include <dev/pcmcia/if_cnwioctl.h>
+#include <net/bpf.h>
+#include <net/if_gre.h>
+#include <net/ppp_defs.h>
+#include <net/if_ppp.h>
+#include <net/if_pppoe.h>
+#include <net/if_sppp.h>
+#include <net/if_srt.h>
+#include <net/if_tap.h>
+#include <net/if_tun.h>
+#include <net/npf.h>
+#include <net/pfvar.h>
+#include <net/slip.h>
+#include <netbt/hci.h>
+#include <netinet/ip_compat.h>
+#include <netinet/ip_fil.h>
+#include <netinet/ip_nat.h>
+#include <netinet/ip_proxy.h>
+#include <netinet6/in6_var.h>
+#include <netinet6/nd6.h>
+#include <netsmb/smb_dev.h>
+#include <dev/biovar.h>
+#include <dev/bluetooth/btdev.h>
+#include <dev/bluetooth/btsco.h>
+#include <dev/ccdvar.h>
+#include <dev/cgdvar.h>
+#include <dev/fssvar.h>
+#include <dev/kttcpio.h>
+#include <dev/lockstat.h>
+#include <dev/md.h>
+#include <net/if_ether.h>
+#include <dev/pcmcia/if_rayreg.h>
+#include <stdio.h>
+#include <dev/raidframe/raidframeio.h>
+#include <dev/sbus/mbppio.h>
+#include <dev/scsipi/ses.h>
+#include <dev/spi/spi_io.h>
+#include <dev/spkrio.h>
+#include <dev/sun/disklabel.h>
+#include <dev/sun/fbio.h>
+#include <dev/sun/kbio.h>
+#include <dev/sun/vuid_event.h>
+#include <dev/tc/sticio.h>
+#include <dev/usb/ukyopon.h>
+#include <dev/usb/urio.h>
+#include <dev/usb/usb.h>
+#include <dev/usb/utoppy.h>
+#include <dev/vme/xio.h>
+#include <dev/vndvar.h>
+#include <dev/wscons/wsconsio.h>
+#include <dev/wscons/wsdisplay_usl_io.h>
+#include <fs/autofs/autofs_ioctl.h>
+#include <dirent.h>
+#include <glob.h>
+#include <grp.h>
+#include <ifaddrs.h>
+#include <limits.h>
+#include <link_elf.h>
+#include <net/if.h>
+#include <net/route.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/ip_mroute.h>
+#include <netinet/sctp_uio.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stddef.h>
+#include <md2.h>
+#include <md4.h>
+#include <md5.h>
+#include <rmd160.h>
+#include <soundcard.h>
+#include <term.h>
+#include <termios.h>
+#include <time.h>
+#include <ttyent.h>
+#include <utime.h>
+#include <utmp.h>
+#include <utmpx.h>
+#include <vis.h>
+#include <wchar.h>
+#include <wordexp.h>
+#include <ttyent.h>
+#include <fts.h>
+#include <regex.h>
+#include <fstab.h>
+#include <stringlist.h>
+
+#if defined(__x86_64__)
+#include <nvmm.h>
+#endif
+// clang-format on
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_netbsd.h"
+
+namespace __sanitizer {
+unsigned struct_utsname_sz = sizeof(struct utsname);
+unsigned struct_stat_sz = sizeof(struct stat);
+unsigned struct_rusage_sz = sizeof(struct rusage);
+unsigned struct_tm_sz = sizeof(struct tm);
+unsigned struct_passwd_sz = sizeof(struct passwd);
+unsigned struct_group_sz = sizeof(struct group);
+unsigned siginfo_t_sz = sizeof(siginfo_t);
+unsigned struct_sigaction_sz = sizeof(struct sigaction);
+unsigned struct_itimerval_sz = sizeof(struct itimerval);
+unsigned pthread_t_sz = sizeof(pthread_t);
+unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+unsigned pid_t_sz = sizeof(pid_t);
+unsigned timeval_sz = sizeof(timeval);
+unsigned uid_t_sz = sizeof(uid_t);
+unsigned gid_t_sz = sizeof(gid_t);
+unsigned mbstate_t_sz = sizeof(mbstate_t);
+unsigned sigset_t_sz = sizeof(sigset_t);
+unsigned struct_timezone_sz = sizeof(struct timezone);
+unsigned struct_tms_sz = sizeof(struct tms);
+unsigned struct_sigevent_sz = sizeof(struct sigevent);
+unsigned struct_sched_param_sz = sizeof(struct sched_param);
+unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+unsigned ucontext_t_sz = sizeof(ucontext_t);
+unsigned struct_rlimit_sz = sizeof(struct rlimit);
+unsigned struct_timespec_sz = sizeof(struct timespec);
+unsigned struct_sembuf_sz = sizeof(struct sembuf);
+unsigned struct_kevent_sz = sizeof(struct kevent);
+unsigned struct_FTS_sz = sizeof(FTS);
+unsigned struct_FTSENT_sz = sizeof(FTSENT);
+unsigned struct_regex_sz = sizeof(regex_t);
+unsigned struct_regmatch_sz = sizeof(regmatch_t);
+unsigned struct_fstab_sz = sizeof(struct fstab);
+unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+unsigned struct_timex_sz = sizeof(struct timex);
+unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
+unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
+unsigned struct_statvfs_sz = sizeof(struct statvfs);
+unsigned struct_sigaltstack_sz = sizeof(stack_t);
+
+const uptr sig_ign = (uptr)SIG_IGN;
+const uptr sig_dfl = (uptr)SIG_DFL;
+const uptr sig_err = (uptr)SIG_ERR;
+const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+const unsigned long __sanitizer_bufsiz = BUFSIZ;
+
+int ptrace_pt_io = PT_IO;
+int ptrace_pt_lwpinfo = PT_LWPINFO;
+int ptrace_pt_set_event_mask = PT_SET_EVENT_MASK;
+int ptrace_pt_get_event_mask = PT_GET_EVENT_MASK;
+int ptrace_pt_get_process_state = PT_GET_PROCESS_STATE;
+int ptrace_pt_set_siginfo = PT_SET_SIGINFO;
+int ptrace_pt_get_siginfo = PT_GET_SIGINFO;
+int ptrace_piod_read_d = PIOD_READ_D;
+int ptrace_piod_write_d = PIOD_WRITE_D;
+int ptrace_piod_read_i = PIOD_READ_I;
+int ptrace_piod_write_i = PIOD_WRITE_I;
+int ptrace_piod_read_auxv = PIOD_READ_AUXV;
+
+#if defined(PT_SETREGS) && defined(PT_GETREGS)
+int ptrace_pt_setregs = PT_SETREGS;
+int ptrace_pt_getregs = PT_GETREGS;
+#else
+int ptrace_pt_setregs = -1;
+int ptrace_pt_getregs = -1;
+#endif
+
+#if defined(PT_SETFPREGS) && defined(PT_GETFPREGS)
+int ptrace_pt_setfpregs = PT_SETFPREGS;
+int ptrace_pt_getfpregs = PT_GETFPREGS;
+#else
+int ptrace_pt_setfpregs = -1;
+int ptrace_pt_getfpregs = -1;
+#endif
+
+#if defined(PT_SETDBREGS) && defined(PT_GETDBREGS)
+int ptrace_pt_setdbregs = PT_SETDBREGS;
+int ptrace_pt_getdbregs = PT_GETDBREGS;
+#else
+int ptrace_pt_setdbregs = -1;
+int ptrace_pt_getdbregs = -1;
+#endif
+
+unsigned struct_ptrace_ptrace_io_desc_struct_sz = sizeof(struct ptrace_io_desc);
+unsigned struct_ptrace_ptrace_lwpinfo_struct_sz = sizeof(struct ptrace_lwpinfo);
+unsigned struct_ptrace_ptrace_event_struct_sz = sizeof(ptrace_event_t);
+unsigned struct_ptrace_ptrace_siginfo_struct_sz = sizeof(ptrace_siginfo_t);
+
+#if defined(PT_SETREGS)
+unsigned struct_ptrace_reg_struct_sz = sizeof(struct reg);
+#else
+unsigned struct_ptrace_reg_struct_sz = -1;
+#endif
+
+#if defined(PT_SETFPREGS)
+unsigned struct_ptrace_fpreg_struct_sz = sizeof(struct fpreg);
+#else
+unsigned struct_ptrace_fpreg_struct_sz = -1;
+#endif
+
+#if defined(PT_SETDBREGS)
+unsigned struct_ptrace_dbreg_struct_sz = sizeof(struct dbreg);
+#else
+unsigned struct_ptrace_dbreg_struct_sz = -1;
+#endif
+
+int shmctl_ipc_stat = (int)IPC_STAT;
+
+unsigned struct_utmp_sz = sizeof(struct utmp);
+unsigned struct_utmpx_sz = sizeof(struct utmpx);
+
+int map_fixed = MAP_FIXED;
+
+int af_inet = (int)AF_INET;
+int af_inet6 = (int)AF_INET6;
+
+uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+}
+
+unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
+
+int glob_nomatch = GLOB_NOMATCH;
+int glob_altdirfunc = GLOB_ALTDIRFUNC;
+
+unsigned path_max = PATH_MAX;
+
+int struct_ttyent_sz = sizeof(struct ttyent);
+
+struct __sanitizer_nvlist_ref_t {
+ void *buf;
+ uptr len;
+ int flags;
+};
+
+typedef __sanitizer_nvlist_ref_t nvlist_ref_t;
+
+// ioctl arguments
+unsigned struct_altqreq_sz = sizeof(altqreq);
+unsigned struct_amr_user_ioctl_sz = sizeof(amr_user_ioctl);
+unsigned struct_ap_control_sz = sizeof(ap_control);
+unsigned struct_apm_ctl_sz = sizeof(apm_ctl);
+unsigned struct_apm_event_info_sz = sizeof(apm_event_info);
+unsigned struct_apm_power_info_sz = sizeof(apm_power_info);
+unsigned struct_atabusiodetach_args_sz = sizeof(atabusiodetach_args);
+unsigned struct_atabusioscan_args_sz = sizeof(atabusioscan_args);
+unsigned struct_ath_diag_sz = sizeof(ath_diag);
+unsigned struct_atm_flowmap_sz = sizeof(atm_flowmap);
+unsigned struct_audio_buf_info_sz = sizeof(audio_buf_info);
+unsigned struct_audio_device_sz = sizeof(audio_device);
+unsigned struct_audio_encoding_sz = sizeof(audio_encoding);
+unsigned struct_audio_info_sz = sizeof(audio_info);
+unsigned struct_audio_offset_sz = sizeof(audio_offset);
+unsigned struct_bio_locate_sz = sizeof(bio_locate);
+unsigned struct_bioc_alarm_sz = sizeof(bioc_alarm);
+unsigned struct_bioc_blink_sz = sizeof(bioc_blink);
+unsigned struct_bioc_disk_sz = sizeof(bioc_disk);
+unsigned struct_bioc_inq_sz = sizeof(bioc_inq);
+unsigned struct_bioc_setstate_sz = sizeof(bioc_setstate);
+unsigned struct_bioc_vol_sz = sizeof(bioc_vol);
+unsigned struct_bioc_volops_sz = sizeof(bioc_volops);
+unsigned struct_bktr_chnlset_sz = sizeof(bktr_chnlset);
+unsigned struct_bktr_remote_sz = sizeof(bktr_remote);
+unsigned struct_blue_conf_sz = sizeof(blue_conf);
+unsigned struct_blue_interface_sz = sizeof(blue_interface);
+unsigned struct_blue_stats_sz = sizeof(blue_stats);
+unsigned struct_bpf_dltlist_sz = sizeof(bpf_dltlist);
+unsigned struct_bpf_program_sz = sizeof(bpf_program);
+unsigned struct_bpf_stat_old_sz = sizeof(bpf_stat_old);
+unsigned struct_bpf_stat_sz = sizeof(bpf_stat);
+unsigned struct_bpf_version_sz = sizeof(bpf_version);
+unsigned struct_btreq_sz = sizeof(btreq);
+unsigned struct_btsco_info_sz = sizeof(btsco_info);
+unsigned struct_buffmem_desc_sz = sizeof(buffmem_desc);
+unsigned struct_cbq_add_class_sz = sizeof(cbq_add_class);
+unsigned struct_cbq_add_filter_sz = sizeof(cbq_add_filter);
+unsigned struct_cbq_delete_class_sz = sizeof(cbq_delete_class);
+unsigned struct_cbq_delete_filter_sz = sizeof(cbq_delete_filter);
+unsigned struct_cbq_getstats_sz = sizeof(cbq_getstats);
+unsigned struct_cbq_interface_sz = sizeof(cbq_interface);
+unsigned struct_cbq_modify_class_sz = sizeof(cbq_modify_class);
+unsigned struct_ccd_ioctl_sz = sizeof(ccd_ioctl);
+unsigned struct_cdnr_add_element_sz = sizeof(cdnr_add_element);
+unsigned struct_cdnr_add_filter_sz = sizeof(cdnr_add_filter);
+unsigned struct_cdnr_add_tbmeter_sz = sizeof(cdnr_add_tbmeter);
+unsigned struct_cdnr_add_trtcm_sz = sizeof(cdnr_add_trtcm);
+unsigned struct_cdnr_add_tswtcm_sz = sizeof(cdnr_add_tswtcm);
+unsigned struct_cdnr_delete_element_sz = sizeof(cdnr_delete_element);
+unsigned struct_cdnr_delete_filter_sz = sizeof(cdnr_delete_filter);
+unsigned struct_cdnr_get_stats_sz = sizeof(cdnr_get_stats);
+unsigned struct_cdnr_interface_sz = sizeof(cdnr_interface);
+unsigned struct_cdnr_modify_tbmeter_sz = sizeof(cdnr_modify_tbmeter);
+unsigned struct_cdnr_modify_trtcm_sz = sizeof(cdnr_modify_trtcm);
+unsigned struct_cdnr_modify_tswtcm_sz = sizeof(cdnr_modify_tswtcm);
+unsigned struct_cdnr_tbmeter_stats_sz = sizeof(cdnr_tbmeter_stats);
+unsigned struct_cdnr_tcm_stats_sz = sizeof(cdnr_tcm_stats);
+unsigned struct_cgd_ioctl_sz = sizeof(cgd_ioctl);
+unsigned struct_cgd_user_sz = sizeof(cgd_user);
+unsigned struct_changer_element_status_request_sz =
+ sizeof(changer_element_status_request);
+unsigned struct_changer_exchange_request_sz = sizeof(changer_exchange_request);
+unsigned struct_changer_move_request_sz = sizeof(changer_move_request);
+unsigned struct_changer_params_sz = sizeof(changer_params);
+unsigned struct_changer_position_request_sz = sizeof(changer_position_request);
+unsigned struct_changer_set_voltag_request_sz =
+ sizeof(changer_set_voltag_request);
+unsigned struct_clockctl_adjtime_sz = sizeof(clockctl_adjtime);
+unsigned struct_clockctl_clock_settime_sz = sizeof(clockctl_clock_settime);
+unsigned struct_clockctl_ntp_adjtime_sz = sizeof(clockctl_ntp_adjtime);
+unsigned struct_clockctl_settimeofday_sz = sizeof(clockctl_settimeofday);
+unsigned struct_cnwistats_sz = sizeof(cnwistats);
+unsigned struct_cnwitrail_sz = sizeof(cnwitrail);
+unsigned struct_cnwstatus_sz = sizeof(cnwstatus);
+unsigned struct_count_info_sz = sizeof(count_info);
+unsigned struct_cpu_ucode_sz = sizeof(cpu_ucode);
+unsigned struct_cpu_ucode_version_sz = sizeof(cpu_ucode_version);
+unsigned struct_crypt_kop_sz = sizeof(crypt_kop);
+unsigned struct_crypt_mkop_sz = sizeof(crypt_mkop);
+unsigned struct_crypt_mop_sz = sizeof(crypt_mop);
+unsigned struct_crypt_op_sz = sizeof(crypt_op);
+unsigned struct_crypt_result_sz = sizeof(crypt_result);
+unsigned struct_crypt_sfop_sz = sizeof(crypt_sfop);
+unsigned struct_crypt_sgop_sz = sizeof(crypt_sgop);
+unsigned struct_cryptret_sz = sizeof(cryptret);
+unsigned struct_devdetachargs_sz = sizeof(devdetachargs);
+unsigned struct_devlistargs_sz = sizeof(devlistargs);
+unsigned struct_devpmargs_sz = sizeof(devpmargs);
+unsigned struct_devrescanargs_sz = sizeof(devrescanargs);
+unsigned struct_disk_badsecinfo_sz = sizeof(disk_badsecinfo);
+unsigned struct_disk_strategy_sz = sizeof(disk_strategy);
+unsigned struct_disklabel_sz = sizeof(disklabel);
+unsigned struct_dkbad_sz = sizeof(dkbad);
+unsigned struct_dkwedge_info_sz = sizeof(dkwedge_info);
+unsigned struct_dkwedge_list_sz = sizeof(dkwedge_list);
+unsigned struct_dmio_setfunc_sz = sizeof(dmio_setfunc);
+unsigned struct_dmx_pes_filter_params_sz = sizeof(dmx_pes_filter_params);
+unsigned struct_dmx_sct_filter_params_sz = sizeof(dmx_sct_filter_params);
+unsigned struct_dmx_stc_sz = sizeof(dmx_stc);
+unsigned struct_dvb_diseqc_master_cmd_sz = sizeof(dvb_diseqc_master_cmd);
+unsigned struct_dvb_diseqc_slave_reply_sz = sizeof(dvb_diseqc_slave_reply);
+unsigned struct_dvb_frontend_event_sz = sizeof(dvb_frontend_event);
+unsigned struct_dvb_frontend_info_sz = sizeof(dvb_frontend_info);
+unsigned struct_dvb_frontend_parameters_sz = sizeof(dvb_frontend_parameters);
+unsigned struct_eccapreq_sz = sizeof(eccapreq);
+unsigned struct_fbcmap_sz = sizeof(fbcmap);
+unsigned struct_fbcurpos_sz = sizeof(fbcurpos);
+unsigned struct_fbcursor_sz = sizeof(fbcursor);
+unsigned struct_fbgattr_sz = sizeof(fbgattr);
+unsigned struct_fbsattr_sz = sizeof(fbsattr);
+unsigned struct_fbtype_sz = sizeof(fbtype);
+unsigned struct_fdformat_cmd_sz = sizeof(fdformat_cmd);
+unsigned struct_fdformat_parms_sz = sizeof(fdformat_parms);
+unsigned struct_fifoq_conf_sz = sizeof(fifoq_conf);
+unsigned struct_fifoq_getstats_sz = sizeof(fifoq_getstats);
+unsigned struct_fifoq_interface_sz = sizeof(fifoq_interface);
+unsigned struct_format_op_sz = sizeof(format_op);
+unsigned struct_fss_get_sz = sizeof(fss_get);
+unsigned struct_fss_set_sz = sizeof(fss_set);
+unsigned struct_gpio_attach_sz = sizeof(gpio_attach);
+unsigned struct_gpio_info_sz = sizeof(gpio_info);
+unsigned struct_gpio_req_sz = sizeof(gpio_req);
+unsigned struct_gpio_set_sz = sizeof(gpio_set);
+unsigned struct_hfsc_add_class_sz = sizeof(hfsc_add_class);
+unsigned struct_hfsc_add_filter_sz = sizeof(hfsc_add_filter);
+unsigned struct_hfsc_attach_sz = sizeof(hfsc_attach);
+unsigned struct_hfsc_class_stats_sz = sizeof(hfsc_class_stats);
+unsigned struct_hfsc_delete_class_sz = sizeof(hfsc_delete_class);
+unsigned struct_hfsc_delete_filter_sz = sizeof(hfsc_delete_filter);
+unsigned struct_hfsc_interface_sz = sizeof(hfsc_interface);
+unsigned struct_hfsc_modify_class_sz = sizeof(hfsc_modify_class);
+unsigned struct_hpcfb_dsp_op_sz = sizeof(hpcfb_dsp_op);
+unsigned struct_hpcfb_dspconf_sz = sizeof(hpcfb_dspconf);
+unsigned struct_hpcfb_fbconf_sz = sizeof(hpcfb_fbconf);
+unsigned struct_if_addrprefreq_sz = sizeof(if_addrprefreq);
+unsigned struct_if_clonereq_sz = sizeof(if_clonereq);
+unsigned struct_if_laddrreq_sz = sizeof(if_laddrreq);
+unsigned struct_ifaddr_sz = sizeof(ifaddr);
+unsigned struct_ifaliasreq_sz = sizeof(ifaliasreq);
+unsigned struct_ifcapreq_sz = sizeof(ifcapreq);
+unsigned struct_ifconf_sz = sizeof(ifconf);
+unsigned struct_ifdatareq_sz = sizeof(ifdatareq);
+unsigned struct_ifdrv_sz = sizeof(ifdrv);
+unsigned struct_ifmediareq_sz = sizeof(ifmediareq);
+unsigned struct_ifpppcstatsreq_sz = sizeof(ifpppcstatsreq);
+unsigned struct_ifpppstatsreq_sz = sizeof(ifpppstatsreq);
+unsigned struct_ifreq_sz = sizeof(ifreq);
+unsigned struct_in6_addrpolicy_sz = sizeof(in6_addrpolicy);
+unsigned struct_in6_ndireq_sz = sizeof(in6_ndireq);
+unsigned struct_ioc_load_unload_sz = sizeof(ioc_load_unload);
+unsigned struct_ioc_patch_sz = sizeof(ioc_patch);
+unsigned struct_ioc_play_blocks_sz = sizeof(ioc_play_blocks);
+unsigned struct_ioc_play_msf_sz = sizeof(ioc_play_msf);
+unsigned struct_ioc_play_track_sz = sizeof(ioc_play_track);
+unsigned struct_ioc_read_subchannel_sz = sizeof(ioc_read_subchannel);
+unsigned struct_ioc_read_toc_entry_sz = sizeof(ioc_read_toc_entry);
+unsigned struct_ioc_toc_header_sz = sizeof(ioc_toc_header);
+unsigned struct_ioc_vol_sz = sizeof(ioc_vol);
+unsigned struct_ioctl_pt_sz = sizeof(ioctl_pt);
+unsigned struct_ioppt_sz = sizeof(ioppt);
+unsigned struct_iovec_sz = sizeof(iovec);
+unsigned struct_ipfobj_sz = sizeof(ipfobj);
+unsigned struct_irda_params_sz = sizeof(irda_params);
+unsigned struct_isp_fc_device_sz = sizeof(isp_fc_device);
+unsigned struct_isp_fc_tsk_mgmt_sz = sizeof(isp_fc_tsk_mgmt);
+unsigned struct_isp_hba_device_sz = sizeof(isp_hba_device);
+unsigned struct_isv_cmd_sz = sizeof(isv_cmd);
+unsigned struct_jobs_add_class_sz = sizeof(jobs_add_class);
+unsigned struct_jobs_add_filter_sz = sizeof(jobs_add_filter);
+unsigned struct_jobs_attach_sz = sizeof(jobs_attach);
+unsigned struct_jobs_class_stats_sz = sizeof(jobs_class_stats);
+unsigned struct_jobs_delete_class_sz = sizeof(jobs_delete_class);
+unsigned struct_jobs_delete_filter_sz = sizeof(jobs_delete_filter);
+unsigned struct_jobs_interface_sz = sizeof(jobs_interface);
+unsigned struct_jobs_modify_class_sz = sizeof(jobs_modify_class);
+unsigned struct_kbentry_sz = sizeof(kbentry);
+unsigned struct_kfilter_mapping_sz = sizeof(kfilter_mapping);
+unsigned struct_kiockeymap_sz = sizeof(kiockeymap);
+unsigned struct_ksyms_gsymbol_sz = sizeof(ksyms_gsymbol);
+unsigned struct_ksyms_gvalue_sz = sizeof(ksyms_gvalue);
+unsigned struct_ksyms_ogsymbol_sz = sizeof(ksyms_ogsymbol);
+unsigned struct_kttcp_io_args_sz = sizeof(kttcp_io_args);
+unsigned struct_ltchars_sz = sizeof(ltchars);
+unsigned struct_lua_create_sz = sizeof(struct lua_create);
+unsigned struct_lua_info_sz = sizeof(struct lua_info);
+unsigned struct_lua_load_sz = sizeof(struct lua_load);
+unsigned struct_lua_require_sz = sizeof(lua_require);
+unsigned struct_mbpp_param_sz = sizeof(mbpp_param);
+unsigned struct_md_conf_sz = sizeof(md_conf);
+unsigned struct_meteor_capframe_sz = sizeof(meteor_capframe);
+unsigned struct_meteor_counts_sz = sizeof(meteor_counts);
+unsigned struct_meteor_geomet_sz = sizeof(meteor_geomet);
+unsigned struct_meteor_pixfmt_sz = sizeof(meteor_pixfmt);
+unsigned struct_meteor_video_sz = sizeof(meteor_video);
+unsigned struct_mlx_cinfo_sz = sizeof(mlx_cinfo);
+unsigned struct_mlx_pause_sz = sizeof(mlx_pause);
+unsigned struct_mlx_rebuild_request_sz = sizeof(mlx_rebuild_request);
+unsigned struct_mlx_rebuild_status_sz = sizeof(mlx_rebuild_status);
+unsigned struct_mlx_usercommand_sz = sizeof(mlx_usercommand);
+unsigned struct_mly_user_command_sz = sizeof(mly_user_command);
+unsigned struct_mly_user_health_sz = sizeof(mly_user_health);
+unsigned struct_mtget_sz = sizeof(mtget);
+unsigned struct_mtop_sz = sizeof(mtop);
+unsigned struct_npf_ioctl_table_sz = sizeof(npf_ioctl_table);
+unsigned struct_npioctl_sz = sizeof(npioctl);
+unsigned struct_nvme_pt_command_sz = sizeof(nvme_pt_command);
+unsigned struct_ochanger_element_status_request_sz =
+ sizeof(ochanger_element_status_request);
+unsigned struct_ofiocdesc_sz = sizeof(ofiocdesc);
+unsigned struct_okiockey_sz = sizeof(okiockey);
+unsigned struct_ortentry_sz = sizeof(ortentry);
+unsigned struct_oscsi_addr_sz = sizeof(oscsi_addr);
+unsigned struct_oss_audioinfo_sz = sizeof(oss_audioinfo);
+unsigned struct_oss_sysinfo_sz = sizeof(oss_sysinfo);
+unsigned struct_pciio_bdf_cfgreg_sz = sizeof(pciio_bdf_cfgreg);
+unsigned struct_pciio_businfo_sz = sizeof(pciio_businfo);
+unsigned struct_pciio_cfgreg_sz = sizeof(pciio_cfgreg);
+unsigned struct_pciio_drvname_sz = sizeof(pciio_drvname);
+unsigned struct_pciio_drvnameonbus_sz = sizeof(pciio_drvnameonbus);
+unsigned struct_pcvtid_sz = sizeof(pcvtid);
+unsigned struct_pf_osfp_ioctl_sz = sizeof(pf_osfp_ioctl);
+unsigned struct_pf_status_sz = sizeof(pf_status);
+unsigned struct_pfioc_altq_sz = sizeof(pfioc_altq);
+unsigned struct_pfioc_if_sz = sizeof(pfioc_if);
+unsigned struct_pfioc_iface_sz = sizeof(pfioc_iface);
+unsigned struct_pfioc_limit_sz = sizeof(pfioc_limit);
+unsigned struct_pfioc_natlook_sz = sizeof(pfioc_natlook);
+unsigned struct_pfioc_pooladdr_sz = sizeof(pfioc_pooladdr);
+unsigned struct_pfioc_qstats_sz = sizeof(pfioc_qstats);
+unsigned struct_pfioc_rule_sz = sizeof(pfioc_rule);
+unsigned struct_pfioc_ruleset_sz = sizeof(pfioc_ruleset);
+unsigned struct_pfioc_src_node_kill_sz = sizeof(pfioc_src_node_kill);
+unsigned struct_pfioc_src_nodes_sz = sizeof(pfioc_src_nodes);
+unsigned struct_pfioc_state_kill_sz = sizeof(pfioc_state_kill);
+unsigned struct_pfioc_state_sz = sizeof(pfioc_state);
+unsigned struct_pfioc_states_sz = sizeof(pfioc_states);
+unsigned struct_pfioc_table_sz = sizeof(pfioc_table);
+unsigned struct_pfioc_tm_sz = sizeof(pfioc_tm);
+unsigned struct_pfioc_trans_sz = sizeof(pfioc_trans);
+unsigned struct_plistref_sz = sizeof(plistref);
+unsigned struct_power_type_sz = sizeof(power_type);
+unsigned struct_ppp_idle_sz = sizeof(ppp_idle);
+unsigned struct_ppp_option_data_sz = sizeof(ppp_option_data);
+unsigned struct_ppp_rawin_sz = sizeof(ppp_rawin);
+unsigned struct_pppoeconnectionstate_sz = sizeof(pppoeconnectionstate);
+unsigned struct_pppoediscparms_sz = sizeof(pppoediscparms);
+unsigned struct_priq_add_class_sz = sizeof(priq_add_class);
+unsigned struct_priq_add_filter_sz = sizeof(priq_add_filter);
+unsigned struct_priq_class_stats_sz = sizeof(priq_class_stats);
+unsigned struct_priq_delete_class_sz = sizeof(priq_delete_class);
+unsigned struct_priq_delete_filter_sz = sizeof(priq_delete_filter);
+unsigned struct_priq_interface_sz = sizeof(priq_interface);
+unsigned struct_priq_modify_class_sz = sizeof(priq_modify_class);
+unsigned struct_ptmget_sz = sizeof(ptmget);
+unsigned struct_radio_info_sz = sizeof(radio_info);
+unsigned struct_red_conf_sz = sizeof(red_conf);
+unsigned struct_red_interface_sz = sizeof(red_interface);
+unsigned struct_red_stats_sz = sizeof(red_stats);
+unsigned struct_redparams_sz = sizeof(redparams);
+unsigned struct_rf_pmparams_sz = sizeof(rf_pmparams);
+unsigned struct_rf_pmstat_sz = sizeof(rf_pmstat);
+unsigned struct_rf_recon_req_sz = sizeof(rf_recon_req);
+unsigned struct_rio_conf_sz = sizeof(rio_conf);
+unsigned struct_rio_interface_sz = sizeof(rio_interface);
+unsigned struct_rio_stats_sz = sizeof(rio_stats);
+unsigned struct_scan_io_sz = sizeof(scan_io);
+unsigned struct_scbusaccel_args_sz = sizeof(scbusaccel_args);
+unsigned struct_scbusiodetach_args_sz = sizeof(scbusiodetach_args);
+unsigned struct_scbusioscan_args_sz = sizeof(scbusioscan_args);
+unsigned struct_scsi_addr_sz = sizeof(scsi_addr);
+unsigned struct_seq_event_rec_sz = sizeof(seq_event_rec);
+unsigned struct_session_op_sz = sizeof(session_op);
+unsigned struct_sgttyb_sz = sizeof(sgttyb);
+unsigned struct_sioc_sg_req_sz = sizeof(sioc_sg_req);
+unsigned struct_sioc_vif_req_sz = sizeof(sioc_vif_req);
+unsigned struct_smbioc_flags_sz = sizeof(smbioc_flags);
+unsigned struct_smbioc_lookup_sz = sizeof(smbioc_lookup);
+unsigned struct_smbioc_oshare_sz = sizeof(smbioc_oshare);
+unsigned struct_smbioc_ossn_sz = sizeof(smbioc_ossn);
+unsigned struct_smbioc_rq_sz = sizeof(smbioc_rq);
+unsigned struct_smbioc_rw_sz = sizeof(smbioc_rw);
+unsigned struct_spppauthcfg_sz = sizeof(spppauthcfg);
+unsigned struct_spppauthfailuresettings_sz = sizeof(spppauthfailuresettings);
+unsigned struct_spppauthfailurestats_sz = sizeof(spppauthfailurestats);
+unsigned struct_spppdnsaddrs_sz = sizeof(spppdnsaddrs);
+unsigned struct_spppdnssettings_sz = sizeof(spppdnssettings);
+unsigned struct_spppidletimeout_sz = sizeof(spppidletimeout);
+unsigned struct_spppkeepalivesettings_sz = sizeof(spppkeepalivesettings);
+unsigned struct_sppplcpcfg_sz = sizeof(sppplcpcfg);
+unsigned struct_spppstatus_sz = sizeof(spppstatus);
+unsigned struct_spppstatusncp_sz = sizeof(spppstatusncp);
+unsigned struct_srt_rt_sz = sizeof(srt_rt);
+unsigned struct_stic_xinfo_sz = sizeof(stic_xinfo);
+unsigned struct_sun_dkctlr_sz = sizeof(sun_dkctlr);
+unsigned struct_sun_dkgeom_sz = sizeof(sun_dkgeom);
+unsigned struct_sun_dkpart_sz = sizeof(sun_dkpart);
+unsigned struct_synth_info_sz = sizeof(synth_info);
+unsigned struct_tbrreq_sz = sizeof(tbrreq);
+unsigned struct_tchars_sz = sizeof(tchars);
+unsigned struct_termios_sz = sizeof(termios);
+unsigned struct_timeval_sz = sizeof(timeval);
+unsigned struct_twe_drivecommand_sz = sizeof(twe_drivecommand);
+unsigned struct_twe_paramcommand_sz = sizeof(twe_paramcommand);
+unsigned struct_twe_usercommand_sz = sizeof(twe_usercommand);
+unsigned struct_ukyopon_identify_sz = sizeof(ukyopon_identify);
+unsigned struct_urio_command_sz = sizeof(urio_command);
+unsigned struct_usb_alt_interface_sz = sizeof(usb_alt_interface);
+unsigned struct_usb_bulk_ra_wb_opt_sz = sizeof(usb_bulk_ra_wb_opt);
+unsigned struct_usb_config_desc_sz = sizeof(usb_config_desc);
+unsigned struct_usb_ctl_report_desc_sz = sizeof(usb_ctl_report_desc);
+unsigned struct_usb_ctl_report_sz = sizeof(usb_ctl_report);
+unsigned struct_usb_ctl_request_sz = sizeof(usb_ctl_request);
+#if defined(__x86_64__)
+unsigned struct_nvmm_ioc_capability_sz = sizeof(nvmm_ioc_capability);
+unsigned struct_nvmm_ioc_machine_create_sz = sizeof(nvmm_ioc_machine_create);
+unsigned struct_nvmm_ioc_machine_destroy_sz = sizeof(nvmm_ioc_machine_destroy);
+unsigned struct_nvmm_ioc_machine_configure_sz =
+ sizeof(nvmm_ioc_machine_configure);
+unsigned struct_nvmm_ioc_vcpu_create_sz = sizeof(nvmm_ioc_vcpu_create);
+unsigned struct_nvmm_ioc_vcpu_destroy_sz = sizeof(nvmm_ioc_vcpu_destroy);
+unsigned struct_nvmm_ioc_vcpu_setstate_sz = sizeof(nvmm_ioc_vcpu_destroy);
+unsigned struct_nvmm_ioc_vcpu_getstate_sz = sizeof(nvmm_ioc_vcpu_getstate);
+unsigned struct_nvmm_ioc_vcpu_inject_sz = sizeof(nvmm_ioc_vcpu_inject);
+unsigned struct_nvmm_ioc_vcpu_run_sz = sizeof(nvmm_ioc_vcpu_run);
+unsigned struct_nvmm_ioc_gpa_map_sz = sizeof(nvmm_ioc_gpa_map);
+unsigned struct_nvmm_ioc_gpa_unmap_sz = sizeof(nvmm_ioc_gpa_unmap);
+unsigned struct_nvmm_ioc_hva_map_sz = sizeof(nvmm_ioc_hva_map);
+unsigned struct_nvmm_ioc_hva_unmap_sz = sizeof(nvmm_ioc_hva_unmap);
+unsigned struct_nvmm_ioc_ctl_sz = sizeof(nvmm_ioc_ctl);
+#endif
+unsigned struct_spi_ioctl_configure_sz = sizeof(spi_ioctl_configure);
+unsigned struct_spi_ioctl_transfer_sz = sizeof(spi_ioctl_transfer);
+unsigned struct_autofs_daemon_request_sz = sizeof(autofs_daemon_request);
+unsigned struct_autofs_daemon_done_sz = sizeof(autofs_daemon_done);
+unsigned struct_sctp_connectx_addrs_sz = sizeof(sctp_connectx_addrs);
+unsigned struct_usb_device_info_old_sz = sizeof(usb_device_info_old);
+unsigned struct_usb_device_info_sz = sizeof(usb_device_info);
+unsigned struct_usb_device_stats_sz = sizeof(usb_device_stats);
+unsigned struct_usb_endpoint_desc_sz = sizeof(usb_endpoint_desc);
+unsigned struct_usb_full_desc_sz = sizeof(usb_full_desc);
+unsigned struct_usb_interface_desc_sz = sizeof(usb_interface_desc);
+unsigned struct_usb_string_desc_sz = sizeof(usb_string_desc);
+unsigned struct_utoppy_readfile_sz = sizeof(utoppy_readfile);
+unsigned struct_utoppy_rename_sz = sizeof(utoppy_rename);
+unsigned struct_utoppy_stats_sz = sizeof(utoppy_stats);
+unsigned struct_utoppy_writefile_sz = sizeof(utoppy_writefile);
+unsigned struct_v4l2_audio_sz = sizeof(v4l2_audio);
+unsigned struct_v4l2_audioout_sz = sizeof(v4l2_audioout);
+unsigned struct_v4l2_buffer_sz = sizeof(v4l2_buffer);
+unsigned struct_v4l2_capability_sz = sizeof(v4l2_capability);
+unsigned struct_v4l2_control_sz = sizeof(v4l2_control);
+unsigned struct_v4l2_crop_sz = sizeof(v4l2_crop);
+unsigned struct_v4l2_cropcap_sz = sizeof(v4l2_cropcap);
+unsigned struct_v4l2_fmtdesc_sz = sizeof(v4l2_fmtdesc);
+unsigned struct_v4l2_format_sz = sizeof(v4l2_format);
+unsigned struct_v4l2_framebuffer_sz = sizeof(v4l2_framebuffer);
+unsigned struct_v4l2_frequency_sz = sizeof(v4l2_frequency);
+unsigned struct_v4l2_frmivalenum_sz = sizeof(v4l2_frmivalenum);
+unsigned struct_v4l2_frmsizeenum_sz = sizeof(v4l2_frmsizeenum);
+unsigned struct_v4l2_input_sz = sizeof(v4l2_input);
+unsigned struct_v4l2_jpegcompression_sz = sizeof(v4l2_jpegcompression);
+unsigned struct_v4l2_modulator_sz = sizeof(v4l2_modulator);
+unsigned struct_v4l2_output_sz = sizeof(v4l2_output);
+unsigned struct_v4l2_queryctrl_sz = sizeof(v4l2_queryctrl);
+unsigned struct_v4l2_querymenu_sz = sizeof(v4l2_querymenu);
+unsigned struct_v4l2_requestbuffers_sz = sizeof(v4l2_requestbuffers);
+unsigned struct_v4l2_standard_sz = sizeof(v4l2_standard);
+unsigned struct_v4l2_streamparm_sz = sizeof(v4l2_streamparm);
+unsigned struct_v4l2_tuner_sz = sizeof(v4l2_tuner);
+unsigned struct_vnd_ioctl_sz = sizeof(vnd_ioctl);
+unsigned struct_vnd_user_sz = sizeof(vnd_user);
+unsigned struct_vt_stat_sz = sizeof(vt_stat);
+unsigned struct_wdog_conf_sz = sizeof(wdog_conf);
+unsigned struct_wdog_mode_sz = sizeof(wdog_mode);
+unsigned struct_ipmi_recv_sz = sizeof(ipmi_recv);
+unsigned struct_ipmi_req_sz = sizeof(ipmi_req);
+unsigned struct_ipmi_cmdspec_sz = sizeof(ipmi_cmdspec);
+unsigned struct_wfq_conf_sz = sizeof(wfq_conf);
+unsigned struct_wfq_getqid_sz = sizeof(wfq_getqid);
+unsigned struct_wfq_getstats_sz = sizeof(wfq_getstats);
+unsigned struct_wfq_interface_sz = sizeof(wfq_interface);
+unsigned struct_wfq_setweight_sz = sizeof(wfq_setweight);
+unsigned struct_winsize_sz = sizeof(winsize);
+unsigned struct_wscons_event_sz = sizeof(wscons_event);
+unsigned struct_wsdisplay_addscreendata_sz = sizeof(wsdisplay_addscreendata);
+unsigned struct_wsdisplay_char_sz = sizeof(wsdisplay_char);
+unsigned struct_wsdisplay_cmap_sz = sizeof(wsdisplay_cmap);
+unsigned struct_wsdisplay_curpos_sz = sizeof(wsdisplay_curpos);
+unsigned struct_wsdisplay_cursor_sz = sizeof(wsdisplay_cursor);
+unsigned struct_wsdisplay_delscreendata_sz = sizeof(wsdisplay_delscreendata);
+unsigned struct_wsdisplay_fbinfo_sz = sizeof(wsdisplay_fbinfo);
+unsigned struct_wsdisplay_font_sz = sizeof(wsdisplay_font);
+unsigned struct_wsdisplay_kbddata_sz = sizeof(wsdisplay_kbddata);
+unsigned struct_wsdisplay_msgattrs_sz = sizeof(wsdisplay_msgattrs);
+unsigned struct_wsdisplay_param_sz = sizeof(wsdisplay_param);
+unsigned struct_wsdisplay_scroll_data_sz = sizeof(wsdisplay_scroll_data);
+unsigned struct_wsdisplay_usefontdata_sz = sizeof(wsdisplay_usefontdata);
+unsigned struct_wsdisplayio_blit_sz = sizeof(wsdisplayio_blit);
+unsigned struct_wsdisplayio_bus_id_sz = sizeof(wsdisplayio_bus_id);
+unsigned struct_wsdisplayio_edid_info_sz = sizeof(wsdisplayio_edid_info);
+unsigned struct_wsdisplayio_fbinfo_sz = sizeof(wsdisplayio_fbinfo);
+unsigned struct_wskbd_bell_data_sz = sizeof(wskbd_bell_data);
+unsigned struct_wskbd_keyrepeat_data_sz = sizeof(wskbd_keyrepeat_data);
+unsigned struct_wskbd_map_data_sz = sizeof(wskbd_map_data);
+unsigned struct_wskbd_scroll_data_sz = sizeof(wskbd_scroll_data);
+unsigned struct_wsmouse_calibcoords_sz = sizeof(wsmouse_calibcoords);
+unsigned struct_wsmouse_id_sz = sizeof(wsmouse_id);
+unsigned struct_wsmouse_repeat_sz = sizeof(wsmouse_repeat);
+unsigned struct_wsmux_device_list_sz = sizeof(wsmux_device_list);
+unsigned struct_wsmux_device_sz = sizeof(wsmux_device);
+unsigned struct_xd_iocmd_sz = sizeof(xd_iocmd);
+
+unsigned struct_scsireq_sz = sizeof(struct scsireq);
+unsigned struct_tone_sz = sizeof(tone_t);
+unsigned union_twe_statrequest_sz = sizeof(union twe_statrequest);
+unsigned struct_usb_device_descriptor_sz = sizeof(usb_device_descriptor_t);
+unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
+unsigned struct__old_mixer_info_sz = sizeof(struct _old_mixer_info);
+unsigned struct__agp_allocate_sz = sizeof(struct _agp_allocate);
+unsigned struct__agp_bind_sz = sizeof(struct _agp_bind);
+unsigned struct__agp_info_sz = sizeof(struct _agp_info);
+unsigned struct__agp_setup_sz = sizeof(struct _agp_setup);
+unsigned struct__agp_unbind_sz = sizeof(struct _agp_unbind);
+unsigned struct_atareq_sz = sizeof(struct atareq);
+unsigned struct_cpustate_sz = sizeof(struct cpustate);
+unsigned struct_dmx_caps_sz = sizeof(struct dmx_caps);
+unsigned enum_dmx_source_sz = sizeof(dmx_source_t);
+unsigned union_dvd_authinfo_sz = sizeof(dvd_authinfo);
+unsigned union_dvd_struct_sz = sizeof(dvd_struct);
+unsigned enum_v4l2_priority_sz = sizeof(enum v4l2_priority);
+unsigned struct_envsys_basic_info_sz = sizeof(struct envsys_basic_info);
+unsigned struct_envsys_tre_data_sz = sizeof(struct envsys_tre_data);
+unsigned enum_fe_sec_mini_cmd_sz = sizeof(enum fe_sec_mini_cmd);
+unsigned enum_fe_sec_tone_mode_sz = sizeof(enum fe_sec_tone_mode);
+unsigned enum_fe_sec_voltage_sz = sizeof(enum fe_sec_voltage);
+unsigned enum_fe_status_sz = sizeof(enum fe_status);
+unsigned struct_gdt_ctrt_sz = sizeof(struct gdt_ctrt);
+unsigned struct_gdt_event_sz = sizeof(struct gdt_event);
+unsigned struct_gdt_osv_sz = sizeof(struct gdt_osv);
+unsigned struct_gdt_rescan_sz = sizeof(struct gdt_rescan);
+unsigned struct_gdt_statist_sz = sizeof(struct gdt_statist);
+unsigned struct_gdt_ucmd_sz = sizeof(struct gdt_ucmd);
+unsigned struct_iscsi_conn_status_parameters_sz =
+ sizeof(iscsi_conn_status_parameters_t);
+unsigned struct_iscsi_get_version_parameters_sz =
+ sizeof(iscsi_get_version_parameters_t);
+unsigned struct_iscsi_iocommand_parameters_sz =
+ sizeof(iscsi_iocommand_parameters_t);
+unsigned struct_iscsi_login_parameters_sz = sizeof(iscsi_login_parameters_t);
+unsigned struct_iscsi_logout_parameters_sz = sizeof(iscsi_logout_parameters_t);
+unsigned struct_iscsi_register_event_parameters_sz =
+ sizeof(iscsi_register_event_parameters_t);
+unsigned struct_iscsi_remove_parameters_sz = sizeof(iscsi_remove_parameters_t);
+unsigned struct_iscsi_send_targets_parameters_sz =
+ sizeof(iscsi_send_targets_parameters_t);
+unsigned struct_iscsi_set_node_name_parameters_sz =
+ sizeof(iscsi_set_node_name_parameters_t);
+unsigned struct_iscsi_wait_event_parameters_sz =
+ sizeof(iscsi_wait_event_parameters_t);
+unsigned struct_isp_stats_sz = sizeof(isp_stats_t);
+unsigned struct_lsenable_sz = sizeof(struct lsenable);
+unsigned struct_lsdisable_sz = sizeof(struct lsdisable);
+unsigned struct_audio_format_query_sz = sizeof(audio_format_query);
+unsigned struct_mixer_ctrl_sz = sizeof(struct mixer_ctrl);
+unsigned struct_mixer_devinfo_sz = sizeof(struct mixer_devinfo);
+unsigned struct_mpu_command_rec_sz = sizeof(mpu_command_rec);
+unsigned struct_rndstat_sz = sizeof(rndstat_t);
+unsigned struct_rndstat_name_sz = sizeof(rndstat_name_t);
+unsigned struct_rndctl_sz = sizeof(rndctl_t);
+unsigned struct_rnddata_sz = sizeof(rnddata_t);
+unsigned struct_rndpoolstat_sz = sizeof(rndpoolstat_t);
+unsigned struct_rndstat_est_sz = sizeof(rndstat_est_t);
+unsigned struct_rndstat_est_name_sz = sizeof(rndstat_est_name_t);
+unsigned struct_pps_params_sz = sizeof(pps_params_t);
+unsigned struct_pps_info_sz = sizeof(pps_info_t);
+unsigned struct_mixer_info_sz = sizeof(struct mixer_info);
+unsigned struct_RF_SparetWait_sz = sizeof(RF_SparetWait_t);
+unsigned struct_RF_ComponentLabel_sz = sizeof(RF_ComponentLabel_t);
+unsigned struct_RF_SingleComponent_sz = sizeof(RF_SingleComponent_t);
+unsigned struct_RF_ProgressInfo_sz = sizeof(RF_ProgressInfo_t);
+unsigned struct_nvlist_ref_sz = sizeof(struct __sanitizer_nvlist_ref_t);
+unsigned struct_StringList_sz = sizeof(StringList);
+
+const unsigned IOCTL_NOT_PRESENT = 0;
+
+unsigned IOCTL_AFM_ADDFMAP = AFM_ADDFMAP;
+unsigned IOCTL_AFM_DELFMAP = AFM_DELFMAP;
+unsigned IOCTL_AFM_CLEANFMAP = AFM_CLEANFMAP;
+unsigned IOCTL_AFM_GETFMAP = AFM_GETFMAP;
+unsigned IOCTL_ALTQGTYPE = ALTQGTYPE;
+unsigned IOCTL_ALTQTBRSET = ALTQTBRSET;
+unsigned IOCTL_ALTQTBRGET = ALTQTBRGET;
+unsigned IOCTL_BLUE_IF_ATTACH = BLUE_IF_ATTACH;
+unsigned IOCTL_BLUE_IF_DETACH = BLUE_IF_DETACH;
+unsigned IOCTL_BLUE_ENABLE = BLUE_ENABLE;
+unsigned IOCTL_BLUE_DISABLE = BLUE_DISABLE;
+unsigned IOCTL_BLUE_CONFIG = BLUE_CONFIG;
+unsigned IOCTL_BLUE_GETSTATS = BLUE_GETSTATS;
+unsigned IOCTL_CBQ_IF_ATTACH = CBQ_IF_ATTACH;
+unsigned IOCTL_CBQ_IF_DETACH = CBQ_IF_DETACH;
+unsigned IOCTL_CBQ_ENABLE = CBQ_ENABLE;
+unsigned IOCTL_CBQ_DISABLE = CBQ_DISABLE;
+unsigned IOCTL_CBQ_CLEAR_HIERARCHY = CBQ_CLEAR_HIERARCHY;
+unsigned IOCTL_CBQ_ADD_CLASS = CBQ_ADD_CLASS;
+unsigned IOCTL_CBQ_DEL_CLASS = CBQ_DEL_CLASS;
+unsigned IOCTL_CBQ_MODIFY_CLASS = CBQ_MODIFY_CLASS;
+unsigned IOCTL_CBQ_ADD_FILTER = CBQ_ADD_FILTER;
+unsigned IOCTL_CBQ_DEL_FILTER = CBQ_DEL_FILTER;
+unsigned IOCTL_CBQ_GETSTATS = CBQ_GETSTATS;
+unsigned IOCTL_CDNR_IF_ATTACH = CDNR_IF_ATTACH;
+unsigned IOCTL_CDNR_IF_DETACH = CDNR_IF_DETACH;
+unsigned IOCTL_CDNR_ENABLE = CDNR_ENABLE;
+unsigned IOCTL_CDNR_DISABLE = CDNR_DISABLE;
+unsigned IOCTL_CDNR_ADD_FILTER = CDNR_ADD_FILTER;
+unsigned IOCTL_CDNR_DEL_FILTER = CDNR_DEL_FILTER;
+unsigned IOCTL_CDNR_GETSTATS = CDNR_GETSTATS;
+unsigned IOCTL_CDNR_ADD_ELEM = CDNR_ADD_ELEM;
+unsigned IOCTL_CDNR_DEL_ELEM = CDNR_DEL_ELEM;
+unsigned IOCTL_CDNR_ADD_TBM = CDNR_ADD_TBM;
+unsigned IOCTL_CDNR_MOD_TBM = CDNR_MOD_TBM;
+unsigned IOCTL_CDNR_TBM_STATS = CDNR_TBM_STATS;
+unsigned IOCTL_CDNR_ADD_TCM = CDNR_ADD_TCM;
+unsigned IOCTL_CDNR_MOD_TCM = CDNR_MOD_TCM;
+unsigned IOCTL_CDNR_TCM_STATS = CDNR_TCM_STATS;
+unsigned IOCTL_CDNR_ADD_TSW = CDNR_ADD_TSW;
+unsigned IOCTL_CDNR_MOD_TSW = CDNR_MOD_TSW;
+unsigned IOCTL_FIFOQ_IF_ATTACH = FIFOQ_IF_ATTACH;
+unsigned IOCTL_FIFOQ_IF_DETACH = FIFOQ_IF_DETACH;
+unsigned IOCTL_FIFOQ_ENABLE = FIFOQ_ENABLE;
+unsigned IOCTL_FIFOQ_DISABLE = FIFOQ_DISABLE;
+unsigned IOCTL_FIFOQ_CONFIG = FIFOQ_CONFIG;
+unsigned IOCTL_FIFOQ_GETSTATS = FIFOQ_GETSTATS;
+unsigned IOCTL_HFSC_IF_ATTACH = HFSC_IF_ATTACH;
+unsigned IOCTL_HFSC_IF_DETACH = HFSC_IF_DETACH;
+unsigned IOCTL_HFSC_ENABLE = HFSC_ENABLE;
+unsigned IOCTL_HFSC_DISABLE = HFSC_DISABLE;
+unsigned IOCTL_HFSC_CLEAR_HIERARCHY = HFSC_CLEAR_HIERARCHY;
+unsigned IOCTL_HFSC_ADD_CLASS = HFSC_ADD_CLASS;
+unsigned IOCTL_HFSC_DEL_CLASS = HFSC_DEL_CLASS;
+unsigned IOCTL_HFSC_MOD_CLASS = HFSC_MOD_CLASS;
+unsigned IOCTL_HFSC_ADD_FILTER = HFSC_ADD_FILTER;
+unsigned IOCTL_HFSC_DEL_FILTER = HFSC_DEL_FILTER;
+unsigned IOCTL_HFSC_GETSTATS = HFSC_GETSTATS;
+unsigned IOCTL_JOBS_IF_ATTACH = JOBS_IF_ATTACH;
+unsigned IOCTL_JOBS_IF_DETACH = JOBS_IF_DETACH;
+unsigned IOCTL_JOBS_ENABLE = JOBS_ENABLE;
+unsigned IOCTL_JOBS_DISABLE = JOBS_DISABLE;
+unsigned IOCTL_JOBS_CLEAR = JOBS_CLEAR;
+unsigned IOCTL_JOBS_ADD_CLASS = JOBS_ADD_CLASS;
+unsigned IOCTL_JOBS_DEL_CLASS = JOBS_DEL_CLASS;
+unsigned IOCTL_JOBS_MOD_CLASS = JOBS_MOD_CLASS;
+unsigned IOCTL_JOBS_ADD_FILTER = JOBS_ADD_FILTER;
+unsigned IOCTL_JOBS_DEL_FILTER = JOBS_DEL_FILTER;
+unsigned IOCTL_JOBS_GETSTATS = JOBS_GETSTATS;
+unsigned IOCTL_PRIQ_IF_ATTACH = PRIQ_IF_ATTACH;
+unsigned IOCTL_PRIQ_IF_DETACH = PRIQ_IF_DETACH;
+unsigned IOCTL_PRIQ_ENABLE = PRIQ_ENABLE;
+unsigned IOCTL_PRIQ_DISABLE = PRIQ_DISABLE;
+unsigned IOCTL_PRIQ_CLEAR = PRIQ_CLEAR;
+unsigned IOCTL_PRIQ_ADD_CLASS = PRIQ_ADD_CLASS;
+unsigned IOCTL_PRIQ_DEL_CLASS = PRIQ_DEL_CLASS;
+unsigned IOCTL_PRIQ_MOD_CLASS = PRIQ_MOD_CLASS;
+unsigned IOCTL_PRIQ_ADD_FILTER = PRIQ_ADD_FILTER;
+unsigned IOCTL_PRIQ_DEL_FILTER = PRIQ_DEL_FILTER;
+unsigned IOCTL_PRIQ_GETSTATS = PRIQ_GETSTATS;
+unsigned IOCTL_RED_IF_ATTACH = RED_IF_ATTACH;
+unsigned IOCTL_RED_IF_DETACH = RED_IF_DETACH;
+unsigned IOCTL_RED_ENABLE = RED_ENABLE;
+unsigned IOCTL_RED_DISABLE = RED_DISABLE;
+unsigned IOCTL_RED_CONFIG = RED_CONFIG;
+unsigned IOCTL_RED_GETSTATS = RED_GETSTATS;
+unsigned IOCTL_RED_SETDEFAULTS = RED_SETDEFAULTS;
+unsigned IOCTL_RIO_IF_ATTACH = RIO_IF_ATTACH;
+unsigned IOCTL_RIO_IF_DETACH = RIO_IF_DETACH;
+unsigned IOCTL_RIO_ENABLE = RIO_ENABLE;
+unsigned IOCTL_RIO_DISABLE = RIO_DISABLE;
+unsigned IOCTL_RIO_CONFIG = RIO_CONFIG;
+unsigned IOCTL_RIO_GETSTATS = RIO_GETSTATS;
+unsigned IOCTL_RIO_SETDEFAULTS = RIO_SETDEFAULTS;
+unsigned IOCTL_WFQ_IF_ATTACH = WFQ_IF_ATTACH;
+unsigned IOCTL_WFQ_IF_DETACH = WFQ_IF_DETACH;
+unsigned IOCTL_WFQ_ENABLE = WFQ_ENABLE;
+unsigned IOCTL_WFQ_DISABLE = WFQ_DISABLE;
+unsigned IOCTL_WFQ_CONFIG = WFQ_CONFIG;
+unsigned IOCTL_WFQ_GET_STATS = WFQ_GET_STATS;
+unsigned IOCTL_WFQ_GET_QID = WFQ_GET_QID;
+unsigned IOCTL_WFQ_SET_WEIGHT = WFQ_SET_WEIGHT;
+unsigned IOCTL_CRIOGET = CRIOGET;
+unsigned IOCTL_CIOCFSESSION = CIOCFSESSION;
+unsigned IOCTL_CIOCKEY = CIOCKEY;
+unsigned IOCTL_CIOCNFKEYM = CIOCNFKEYM;
+unsigned IOCTL_CIOCNFSESSION = CIOCNFSESSION;
+unsigned IOCTL_CIOCNCRYPTRETM = CIOCNCRYPTRETM;
+unsigned IOCTL_CIOCNCRYPTRET = CIOCNCRYPTRET;
+unsigned IOCTL_CIOCGSESSION = CIOCGSESSION;
+unsigned IOCTL_CIOCNGSESSION = CIOCNGSESSION;
+unsigned IOCTL_CIOCCRYPT = CIOCCRYPT;
+unsigned IOCTL_CIOCNCRYPTM = CIOCNCRYPTM;
+unsigned IOCTL_CIOCASYMFEAT = CIOCASYMFEAT;
+unsigned IOCTL_APM_IOC_REJECT = APM_IOC_REJECT;
+unsigned IOCTL_APM_IOC_STANDBY = APM_IOC_STANDBY;
+unsigned IOCTL_APM_IOC_SUSPEND = APM_IOC_SUSPEND;
+unsigned IOCTL_OAPM_IOC_GETPOWER = OAPM_IOC_GETPOWER;
+unsigned IOCTL_APM_IOC_GETPOWER = APM_IOC_GETPOWER;
+unsigned IOCTL_APM_IOC_NEXTEVENT = APM_IOC_NEXTEVENT;
+unsigned IOCTL_APM_IOC_DEV_CTL = APM_IOC_DEV_CTL;
+unsigned IOCTL_NETBSD_DM_IOCTL = NETBSD_DM_IOCTL;
+unsigned IOCTL_DMIO_SETFUNC = DMIO_SETFUNC;
+unsigned IOCTL_DMX_START = DMX_START;
+unsigned IOCTL_DMX_STOP = DMX_STOP;
+unsigned IOCTL_DMX_SET_FILTER = DMX_SET_FILTER;
+unsigned IOCTL_DMX_SET_PES_FILTER = DMX_SET_PES_FILTER;
+unsigned IOCTL_DMX_SET_BUFFER_SIZE = DMX_SET_BUFFER_SIZE;
+unsigned IOCTL_DMX_GET_STC = DMX_GET_STC;
+unsigned IOCTL_DMX_ADD_PID = DMX_ADD_PID;
+unsigned IOCTL_DMX_REMOVE_PID = DMX_REMOVE_PID;
+unsigned IOCTL_DMX_GET_CAPS = DMX_GET_CAPS;
+unsigned IOCTL_DMX_SET_SOURCE = DMX_SET_SOURCE;
+unsigned IOCTL_FE_READ_STATUS = FE_READ_STATUS;
+unsigned IOCTL_FE_READ_BER = FE_READ_BER;
+unsigned IOCTL_FE_READ_SNR = FE_READ_SNR;
+unsigned IOCTL_FE_READ_SIGNAL_STRENGTH = FE_READ_SIGNAL_STRENGTH;
+unsigned IOCTL_FE_READ_UNCORRECTED_BLOCKS = FE_READ_UNCORRECTED_BLOCKS;
+unsigned IOCTL_FE_SET_FRONTEND = FE_SET_FRONTEND;
+unsigned IOCTL_FE_GET_FRONTEND = FE_GET_FRONTEND;
+unsigned IOCTL_FE_GET_EVENT = FE_GET_EVENT;
+unsigned IOCTL_FE_GET_INFO = FE_GET_INFO;
+unsigned IOCTL_FE_DISEQC_RESET_OVERLOAD = FE_DISEQC_RESET_OVERLOAD;
+unsigned IOCTL_FE_DISEQC_SEND_MASTER_CMD = FE_DISEQC_SEND_MASTER_CMD;
+unsigned IOCTL_FE_DISEQC_RECV_SLAVE_REPLY = FE_DISEQC_RECV_SLAVE_REPLY;
+unsigned IOCTL_FE_DISEQC_SEND_BURST = FE_DISEQC_SEND_BURST;
+unsigned IOCTL_FE_SET_TONE = FE_SET_TONE;
+unsigned IOCTL_FE_SET_VOLTAGE = FE_SET_VOLTAGE;
+unsigned IOCTL_FE_ENABLE_HIGH_LNB_VOLTAGE = FE_ENABLE_HIGH_LNB_VOLTAGE;
+unsigned IOCTL_FE_SET_FRONTEND_TUNE_MODE = FE_SET_FRONTEND_TUNE_MODE;
+unsigned IOCTL_FE_DISHNETWORK_SEND_LEGACY_CMD = FE_DISHNETWORK_SEND_LEGACY_CMD;
+unsigned IOCTL_FILEMON_SET_FD = FILEMON_SET_FD;
+unsigned IOCTL_FILEMON_SET_PID = FILEMON_SET_PID;
+unsigned IOCTL_HDAUDIO_FGRP_INFO = HDAUDIO_FGRP_INFO;
+unsigned IOCTL_HDAUDIO_FGRP_GETCONFIG = HDAUDIO_FGRP_GETCONFIG;
+unsigned IOCTL_HDAUDIO_FGRP_SETCONFIG = HDAUDIO_FGRP_SETCONFIG;
+unsigned IOCTL_HDAUDIO_FGRP_WIDGET_INFO = HDAUDIO_FGRP_WIDGET_INFO;
+unsigned IOCTL_HDAUDIO_FGRP_CODEC_INFO = HDAUDIO_FGRP_CODEC_INFO;
+unsigned IOCTL_HDAUDIO_AFG_WIDGET_INFO = HDAUDIO_AFG_WIDGET_INFO;
+unsigned IOCTL_HDAUDIO_AFG_CODEC_INFO = HDAUDIO_AFG_CODEC_INFO;
+unsigned IOCTL_CEC_GET_PHYS_ADDR = CEC_GET_PHYS_ADDR;
+unsigned IOCTL_CEC_GET_LOG_ADDRS = CEC_GET_LOG_ADDRS;
+unsigned IOCTL_CEC_SET_LOG_ADDRS = CEC_SET_LOG_ADDRS;
+unsigned IOCTL_CEC_GET_VENDOR_ID = CEC_GET_VENDOR_ID;
+unsigned IOCTL_HPCFBIO_GCONF = HPCFBIO_GCONF;
+unsigned IOCTL_HPCFBIO_SCONF = HPCFBIO_SCONF;
+unsigned IOCTL_HPCFBIO_GDSPCONF = HPCFBIO_GDSPCONF;
+unsigned IOCTL_HPCFBIO_SDSPCONF = HPCFBIO_SDSPCONF;
+unsigned IOCTL_HPCFBIO_GOP = HPCFBIO_GOP;
+unsigned IOCTL_HPCFBIO_SOP = HPCFBIO_SOP;
+unsigned IOCTL_IOPIOCPT = IOPIOCPT;
+unsigned IOCTL_IOPIOCGLCT = IOPIOCGLCT;
+unsigned IOCTL_IOPIOCGSTATUS = IOPIOCGSTATUS;
+unsigned IOCTL_IOPIOCRECONFIG = IOPIOCRECONFIG;
+unsigned IOCTL_IOPIOCGTIDMAP = IOPIOCGTIDMAP;
+unsigned IOCTL_SIOCGATHSTATS = SIOCGATHSTATS;
+unsigned IOCTL_SIOCGATHDIAG = SIOCGATHDIAG;
+unsigned IOCTL_METEORCAPTUR = METEORCAPTUR;
+unsigned IOCTL_METEORCAPFRM = METEORCAPFRM;
+unsigned IOCTL_METEORSETGEO = METEORSETGEO;
+unsigned IOCTL_METEORGETGEO = METEORGETGEO;
+unsigned IOCTL_METEORSTATUS = METEORSTATUS;
+unsigned IOCTL_METEORSHUE = METEORSHUE;
+unsigned IOCTL_METEORGHUE = METEORGHUE;
+unsigned IOCTL_METEORSFMT = METEORSFMT;
+unsigned IOCTL_METEORGFMT = METEORGFMT;
+unsigned IOCTL_METEORSINPUT = METEORSINPUT;
+unsigned IOCTL_METEORGINPUT = METEORGINPUT;
+unsigned IOCTL_METEORSCHCV = METEORSCHCV;
+unsigned IOCTL_METEORGCHCV = METEORGCHCV;
+unsigned IOCTL_METEORSCOUNT = METEORSCOUNT;
+unsigned IOCTL_METEORGCOUNT = METEORGCOUNT;
+unsigned IOCTL_METEORSFPS = METEORSFPS;
+unsigned IOCTL_METEORGFPS = METEORGFPS;
+unsigned IOCTL_METEORSSIGNAL = METEORSSIGNAL;
+unsigned IOCTL_METEORGSIGNAL = METEORGSIGNAL;
+unsigned IOCTL_METEORSVIDEO = METEORSVIDEO;
+unsigned IOCTL_METEORGVIDEO = METEORGVIDEO;
+unsigned IOCTL_METEORSBRIG = METEORSBRIG;
+unsigned IOCTL_METEORGBRIG = METEORGBRIG;
+unsigned IOCTL_METEORSCSAT = METEORSCSAT;
+unsigned IOCTL_METEORGCSAT = METEORGCSAT;
+unsigned IOCTL_METEORSCONT = METEORSCONT;
+unsigned IOCTL_METEORGCONT = METEORGCONT;
+unsigned IOCTL_METEORSHWS = METEORSHWS;
+unsigned IOCTL_METEORGHWS = METEORGHWS;
+unsigned IOCTL_METEORSVWS = METEORSVWS;
+unsigned IOCTL_METEORGVWS = METEORGVWS;
+unsigned IOCTL_METEORSTS = METEORSTS;
+unsigned IOCTL_METEORGTS = METEORGTS;
+unsigned IOCTL_TVTUNER_SETCHNL = TVTUNER_SETCHNL;
+unsigned IOCTL_TVTUNER_GETCHNL = TVTUNER_GETCHNL;
+unsigned IOCTL_TVTUNER_SETTYPE = TVTUNER_SETTYPE;
+unsigned IOCTL_TVTUNER_GETTYPE = TVTUNER_GETTYPE;
+unsigned IOCTL_TVTUNER_GETSTATUS = TVTUNER_GETSTATUS;
+unsigned IOCTL_TVTUNER_SETFREQ = TVTUNER_SETFREQ;
+unsigned IOCTL_TVTUNER_GETFREQ = TVTUNER_GETFREQ;
+unsigned IOCTL_TVTUNER_SETAFC = TVTUNER_SETAFC;
+unsigned IOCTL_TVTUNER_GETAFC = TVTUNER_GETAFC;
+unsigned IOCTL_RADIO_SETMODE = RADIO_SETMODE;
+unsigned IOCTL_RADIO_GETMODE = RADIO_GETMODE;
+unsigned IOCTL_RADIO_SETFREQ = RADIO_SETFREQ;
+unsigned IOCTL_RADIO_GETFREQ = RADIO_GETFREQ;
+unsigned IOCTL_METEORSACTPIXFMT = METEORSACTPIXFMT;
+unsigned IOCTL_METEORGACTPIXFMT = METEORGACTPIXFMT;
+unsigned IOCTL_METEORGSUPPIXFMT = METEORGSUPPIXFMT;
+unsigned IOCTL_TVTUNER_GETCHNLSET = TVTUNER_GETCHNLSET;
+unsigned IOCTL_REMOTE_GETKEY = REMOTE_GETKEY;
+unsigned IOCTL_GDT_IOCTL_GENERAL = GDT_IOCTL_GENERAL;
+unsigned IOCTL_GDT_IOCTL_DRVERS = GDT_IOCTL_DRVERS;
+unsigned IOCTL_GDT_IOCTL_CTRTYPE = GDT_IOCTL_CTRTYPE;
+unsigned IOCTL_GDT_IOCTL_OSVERS = GDT_IOCTL_OSVERS;
+unsigned IOCTL_GDT_IOCTL_CTRCNT = GDT_IOCTL_CTRCNT;
+unsigned IOCTL_GDT_IOCTL_EVENT = GDT_IOCTL_EVENT;
+unsigned IOCTL_GDT_IOCTL_STATIST = GDT_IOCTL_STATIST;
+unsigned IOCTL_GDT_IOCTL_RESCAN = GDT_IOCTL_RESCAN;
+unsigned IOCTL_ISP_SDBLEV = ISP_SDBLEV;
+unsigned IOCTL_ISP_RESETHBA = ISP_RESETHBA;
+unsigned IOCTL_ISP_RESCAN = ISP_RESCAN;
+unsigned IOCTL_ISP_SETROLE = ISP_SETROLE;
+unsigned IOCTL_ISP_GETROLE = ISP_GETROLE;
+unsigned IOCTL_ISP_GET_STATS = ISP_GET_STATS;
+unsigned IOCTL_ISP_CLR_STATS = ISP_CLR_STATS;
+unsigned IOCTL_ISP_FC_LIP = ISP_FC_LIP;
+unsigned IOCTL_ISP_FC_GETDINFO = ISP_FC_GETDINFO;
+unsigned IOCTL_ISP_GET_FW_CRASH_DUMP = ISP_GET_FW_CRASH_DUMP;
+unsigned IOCTL_ISP_FORCE_CRASH_DUMP = ISP_FORCE_CRASH_DUMP;
+unsigned IOCTL_ISP_FC_GETHINFO = ISP_FC_GETHINFO;
+unsigned IOCTL_ISP_TSK_MGMT = ISP_TSK_MGMT;
+unsigned IOCTL_ISP_FC_GETDLIST = ISP_FC_GETDLIST;
+unsigned IOCTL_MLXD_STATUS = MLXD_STATUS;
+unsigned IOCTL_MLXD_CHECKASYNC = MLXD_CHECKASYNC;
+unsigned IOCTL_MLXD_DETACH = MLXD_DETACH;
+unsigned IOCTL_MLX_RESCAN_DRIVES = MLX_RESCAN_DRIVES;
+unsigned IOCTL_MLX_PAUSE_CHANNEL = MLX_PAUSE_CHANNEL;
+unsigned IOCTL_MLX_COMMAND = MLX_COMMAND;
+unsigned IOCTL_MLX_REBUILDASYNC = MLX_REBUILDASYNC;
+unsigned IOCTL_MLX_REBUILDSTAT = MLX_REBUILDSTAT;
+unsigned IOCTL_MLX_GET_SYSDRIVE = MLX_GET_SYSDRIVE;
+unsigned IOCTL_MLX_GET_CINFO = MLX_GET_CINFO;
+unsigned IOCTL_NVME_PASSTHROUGH_CMD = NVME_PASSTHROUGH_CMD;
+unsigned IOCTL_FWCFGIO_SET_INDEX = FWCFGIO_SET_INDEX;
+unsigned IOCTL_IRDA_RESET_PARAMS = IRDA_RESET_PARAMS;
+unsigned IOCTL_IRDA_SET_PARAMS = IRDA_SET_PARAMS;
+unsigned IOCTL_IRDA_GET_SPEEDMASK = IRDA_GET_SPEEDMASK;
+unsigned IOCTL_IRDA_GET_TURNAROUNDMASK = IRDA_GET_TURNAROUNDMASK;
+unsigned IOCTL_IRFRAMETTY_GET_DEVICE = IRFRAMETTY_GET_DEVICE;
+unsigned IOCTL_IRFRAMETTY_GET_DONGLE = IRFRAMETTY_GET_DONGLE;
+unsigned IOCTL_IRFRAMETTY_SET_DONGLE = IRFRAMETTY_SET_DONGLE;
+unsigned IOCTL_ISV_CMD = ISV_CMD;
+unsigned IOCTL_WTQICMD = WTQICMD;
+unsigned IOCTL_ISCSI_GET_VERSION = ISCSI_GET_VERSION;
+unsigned IOCTL_ISCSI_LOGIN = ISCSI_LOGIN;
+unsigned IOCTL_ISCSI_LOGOUT = ISCSI_LOGOUT;
+unsigned IOCTL_ISCSI_ADD_CONNECTION = ISCSI_ADD_CONNECTION;
+unsigned IOCTL_ISCSI_RESTORE_CONNECTION = ISCSI_RESTORE_CONNECTION;
+unsigned IOCTL_ISCSI_REMOVE_CONNECTION = ISCSI_REMOVE_CONNECTION;
+unsigned IOCTL_ISCSI_CONNECTION_STATUS = ISCSI_CONNECTION_STATUS;
+unsigned IOCTL_ISCSI_SEND_TARGETS = ISCSI_SEND_TARGETS;
+unsigned IOCTL_ISCSI_SET_NODE_NAME = ISCSI_SET_NODE_NAME;
+unsigned IOCTL_ISCSI_IO_COMMAND = ISCSI_IO_COMMAND;
+unsigned IOCTL_ISCSI_REGISTER_EVENT = ISCSI_REGISTER_EVENT;
+unsigned IOCTL_ISCSI_DEREGISTER_EVENT = ISCSI_DEREGISTER_EVENT;
+unsigned IOCTL_ISCSI_WAIT_EVENT = ISCSI_WAIT_EVENT;
+unsigned IOCTL_ISCSI_POLL_EVENT = ISCSI_POLL_EVENT;
+unsigned IOCTL_OFIOCGET = OFIOCGET;
+unsigned IOCTL_OFIOCSET = OFIOCSET;
+unsigned IOCTL_OFIOCNEXTPROP = OFIOCNEXTPROP;
+unsigned IOCTL_OFIOCGETOPTNODE = OFIOCGETOPTNODE;
+unsigned IOCTL_OFIOCGETNEXT = OFIOCGETNEXT;
+unsigned IOCTL_OFIOCGETCHILD = OFIOCGETCHILD;
+unsigned IOCTL_OFIOCFINDDEVICE = OFIOCFINDDEVICE;
+unsigned IOCTL_AMR_IO_VERSION = AMR_IO_VERSION;
+unsigned IOCTL_AMR_IO_COMMAND = AMR_IO_COMMAND;
+unsigned IOCTL_MLYIO_COMMAND = MLYIO_COMMAND;
+unsigned IOCTL_MLYIO_HEALTH = MLYIO_HEALTH;
+unsigned IOCTL_PCI_IOC_CFGREAD = PCI_IOC_CFGREAD;
+unsigned IOCTL_PCI_IOC_CFGWRITE = PCI_IOC_CFGWRITE;
+unsigned IOCTL_PCI_IOC_BDF_CFGREAD = PCI_IOC_BDF_CFGREAD;
+unsigned IOCTL_PCI_IOC_BDF_CFGWRITE = PCI_IOC_BDF_CFGWRITE;
+unsigned IOCTL_PCI_IOC_BUSINFO = PCI_IOC_BUSINFO;
+unsigned IOCTL_PCI_IOC_DRVNAME = PCI_IOC_DRVNAME;
+unsigned IOCTL_PCI_IOC_DRVNAMEONBUS = PCI_IOC_DRVNAMEONBUS;
+unsigned IOCTL_TWEIO_COMMAND = TWEIO_COMMAND;
+unsigned IOCTL_TWEIO_STATS = TWEIO_STATS;
+unsigned IOCTL_TWEIO_AEN_POLL = TWEIO_AEN_POLL;
+unsigned IOCTL_TWEIO_AEN_WAIT = TWEIO_AEN_WAIT;
+unsigned IOCTL_TWEIO_SET_PARAM = TWEIO_SET_PARAM;
+unsigned IOCTL_TWEIO_GET_PARAM = TWEIO_GET_PARAM;
+unsigned IOCTL_TWEIO_RESET = TWEIO_RESET;
+unsigned IOCTL_TWEIO_ADD_UNIT = TWEIO_ADD_UNIT;
+unsigned IOCTL_TWEIO_DEL_UNIT = TWEIO_DEL_UNIT;
+unsigned IOCTL_SIOCSCNWDOMAIN = SIOCSCNWDOMAIN;
+unsigned IOCTL_SIOCGCNWDOMAIN = SIOCGCNWDOMAIN;
+unsigned IOCTL_SIOCSCNWKEY = SIOCSCNWKEY;
+unsigned IOCTL_SIOCGCNWSTATUS = SIOCGCNWSTATUS;
+unsigned IOCTL_SIOCGCNWSTATS = SIOCGCNWSTATS;
+unsigned IOCTL_SIOCGCNWTRAIL = SIOCGCNWTRAIL;
+unsigned IOCTL_SIOCGRAYSIGLEV = SIOCGRAYSIGLEV;
+unsigned IOCTL_RAIDFRAME_SHUTDOWN = RAIDFRAME_SHUTDOWN;
+unsigned IOCTL_RAIDFRAME_TUR = RAIDFRAME_TUR;
+unsigned IOCTL_RAIDFRAME_FAIL_DISK = RAIDFRAME_FAIL_DISK;
+unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS = RAIDFRAME_CHECK_RECON_STATUS;
+unsigned IOCTL_RAIDFRAME_REWRITEPARITY = RAIDFRAME_REWRITEPARITY;
+unsigned IOCTL_RAIDFRAME_COPYBACK = RAIDFRAME_COPYBACK;
+unsigned IOCTL_RAIDFRAME_SPARET_WAIT = RAIDFRAME_SPARET_WAIT;
+unsigned IOCTL_RAIDFRAME_SEND_SPARET = RAIDFRAME_SEND_SPARET;
+unsigned IOCTL_RAIDFRAME_ABORT_SPARET_WAIT = RAIDFRAME_ABORT_SPARET_WAIT;
+unsigned IOCTL_RAIDFRAME_START_ATRACE = RAIDFRAME_START_ATRACE;
+unsigned IOCTL_RAIDFRAME_STOP_ATRACE = RAIDFRAME_STOP_ATRACE;
+unsigned IOCTL_RAIDFRAME_GET_SIZE = RAIDFRAME_GET_SIZE;
+unsigned IOCTL_RAIDFRAME_RESET_ACCTOTALS = RAIDFRAME_RESET_ACCTOTALS;
+unsigned IOCTL_RAIDFRAME_KEEP_ACCTOTALS = RAIDFRAME_KEEP_ACCTOTALS;
+unsigned IOCTL_RAIDFRAME_GET_COMPONENT_LABEL = RAIDFRAME_GET_COMPONENT_LABEL;
+unsigned IOCTL_RAIDFRAME_SET_COMPONENT_LABEL = RAIDFRAME_SET_COMPONENT_LABEL;
+unsigned IOCTL_RAIDFRAME_INIT_LABELS = RAIDFRAME_INIT_LABELS;
+unsigned IOCTL_RAIDFRAME_ADD_HOT_SPARE = RAIDFRAME_ADD_HOT_SPARE;
+unsigned IOCTL_RAIDFRAME_REMOVE_HOT_SPARE = RAIDFRAME_REMOVE_HOT_SPARE;
+unsigned IOCTL_RAIDFRAME_REBUILD_IN_PLACE = RAIDFRAME_REBUILD_IN_PLACE;
+unsigned IOCTL_RAIDFRAME_CHECK_PARITY = RAIDFRAME_CHECK_PARITY;
+unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS =
+ RAIDFRAME_CHECK_PARITYREWRITE_STATUS;
+unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS =
+ RAIDFRAME_CHECK_COPYBACK_STATUS;
+unsigned IOCTL_RAIDFRAME_SET_AUTOCONFIG = RAIDFRAME_SET_AUTOCONFIG;
+unsigned IOCTL_RAIDFRAME_SET_ROOT = RAIDFRAME_SET_ROOT;
+unsigned IOCTL_RAIDFRAME_DELETE_COMPONENT = RAIDFRAME_DELETE_COMPONENT;
+unsigned IOCTL_RAIDFRAME_INCORPORATE_HOT_SPARE =
+ RAIDFRAME_INCORPORATE_HOT_SPARE;
+unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS_EXT =
+ RAIDFRAME_CHECK_RECON_STATUS_EXT;
+unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT =
+ RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT;
+unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS_EXT =
+ RAIDFRAME_CHECK_COPYBACK_STATUS_EXT;
+unsigned IOCTL_RAIDFRAME_CONFIGURE = RAIDFRAME_CONFIGURE;
+unsigned IOCTL_RAIDFRAME_GET_INFO = RAIDFRAME_GET_INFO;
+unsigned IOCTL_RAIDFRAME_PARITYMAP_STATUS = RAIDFRAME_PARITYMAP_STATUS;
+unsigned IOCTL_RAIDFRAME_PARITYMAP_GET_DISABLE =
+ RAIDFRAME_PARITYMAP_GET_DISABLE;
+unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_DISABLE =
+ RAIDFRAME_PARITYMAP_SET_DISABLE;
+unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_PARAMS = RAIDFRAME_PARITYMAP_SET_PARAMS;
+unsigned IOCTL_RAIDFRAME_SET_LAST_UNIT = RAIDFRAME_SET_LAST_UNIT;
+unsigned IOCTL_MBPPIOCSPARAM = MBPPIOCSPARAM;
+unsigned IOCTL_MBPPIOCGPARAM = MBPPIOCGPARAM;
+unsigned IOCTL_MBPPIOCGSTAT = MBPPIOCGSTAT;
+unsigned IOCTL_SESIOC_GETNOBJ = SESIOC_GETNOBJ;
+unsigned IOCTL_SESIOC_GETOBJMAP = SESIOC_GETOBJMAP;
+unsigned IOCTL_SESIOC_GETENCSTAT = SESIOC_GETENCSTAT;
+unsigned IOCTL_SESIOC_SETENCSTAT = SESIOC_SETENCSTAT;
+unsigned IOCTL_SESIOC_GETOBJSTAT = SESIOC_GETOBJSTAT;
+unsigned IOCTL_SESIOC_SETOBJSTAT = SESIOC_SETOBJSTAT;
+unsigned IOCTL_SESIOC_GETTEXT = SESIOC_GETTEXT;
+unsigned IOCTL_SESIOC_INIT = SESIOC_INIT;
+unsigned IOCTL_SUN_DKIOCGGEOM = SUN_DKIOCGGEOM;
+unsigned IOCTL_SUN_DKIOCINFO = SUN_DKIOCINFO;
+unsigned IOCTL_SUN_DKIOCGPART = SUN_DKIOCGPART;
+unsigned IOCTL_FBIOGTYPE = FBIOGTYPE;
+unsigned IOCTL_FBIOPUTCMAP = FBIOPUTCMAP;
+unsigned IOCTL_FBIOGETCMAP = FBIOGETCMAP;
+unsigned IOCTL_FBIOGATTR = FBIOGATTR;
+unsigned IOCTL_FBIOSVIDEO = FBIOSVIDEO;
+unsigned IOCTL_FBIOGVIDEO = FBIOGVIDEO;
+unsigned IOCTL_FBIOSCURSOR = FBIOSCURSOR;
+unsigned IOCTL_FBIOGCURSOR = FBIOGCURSOR;
+unsigned IOCTL_FBIOSCURPOS = FBIOSCURPOS;
+unsigned IOCTL_FBIOGCURPOS = FBIOGCURPOS;
+unsigned IOCTL_FBIOGCURMAX = FBIOGCURMAX;
+unsigned IOCTL_KIOCTRANS = KIOCTRANS;
+unsigned IOCTL_KIOCSETKEY = KIOCSETKEY;
+unsigned IOCTL_KIOCGETKEY = KIOCGETKEY;
+unsigned IOCTL_KIOCGTRANS = KIOCGTRANS;
+unsigned IOCTL_KIOCCMD = KIOCCMD;
+unsigned IOCTL_KIOCTYPE = KIOCTYPE;
+unsigned IOCTL_KIOCSDIRECT = KIOCSDIRECT;
+unsigned IOCTL_KIOCSKEY = KIOCSKEY;
+unsigned IOCTL_KIOCGKEY = KIOCGKEY;
+unsigned IOCTL_KIOCSLED = KIOCSLED;
+unsigned IOCTL_KIOCGLED = KIOCGLED;
+unsigned IOCTL_KIOCLAYOUT = KIOCLAYOUT;
+unsigned IOCTL_VUIDSFORMAT = VUIDSFORMAT;
+unsigned IOCTL_VUIDGFORMAT = VUIDGFORMAT;
+unsigned IOCTL_STICIO_GXINFO = STICIO_GXINFO;
+unsigned IOCTL_STICIO_RESET = STICIO_RESET;
+unsigned IOCTL_STICIO_STARTQ = STICIO_STARTQ;
+unsigned IOCTL_STICIO_STOPQ = STICIO_STOPQ;
+unsigned IOCTL_UKYOPON_IDENTIFY = UKYOPON_IDENTIFY;
+unsigned IOCTL_URIO_SEND_COMMAND = URIO_SEND_COMMAND;
+unsigned IOCTL_URIO_RECV_COMMAND = URIO_RECV_COMMAND;
+unsigned IOCTL_USB_REQUEST = USB_REQUEST;
+unsigned IOCTL_USB_SETDEBUG = USB_SETDEBUG;
+unsigned IOCTL_USB_DISCOVER = USB_DISCOVER;
+unsigned IOCTL_USB_DEVICEINFO = USB_DEVICEINFO;
+unsigned IOCTL_USB_DEVICEINFO_OLD = USB_DEVICEINFO_OLD;
+unsigned IOCTL_USB_DEVICESTATS = USB_DEVICESTATS;
+unsigned IOCTL_USB_GET_REPORT_DESC = USB_GET_REPORT_DESC;
+unsigned IOCTL_USB_SET_IMMED = USB_SET_IMMED;
+unsigned IOCTL_USB_GET_REPORT = USB_GET_REPORT;
+unsigned IOCTL_USB_SET_REPORT = USB_SET_REPORT;
+unsigned IOCTL_USB_GET_REPORT_ID = USB_GET_REPORT_ID;
+unsigned IOCTL_USB_GET_CONFIG = USB_GET_CONFIG;
+unsigned IOCTL_USB_SET_CONFIG = USB_SET_CONFIG;
+unsigned IOCTL_USB_GET_ALTINTERFACE = USB_GET_ALTINTERFACE;
+unsigned IOCTL_USB_SET_ALTINTERFACE = USB_SET_ALTINTERFACE;
+unsigned IOCTL_USB_GET_NO_ALT = USB_GET_NO_ALT;
+unsigned IOCTL_USB_GET_DEVICE_DESC = USB_GET_DEVICE_DESC;
+unsigned IOCTL_USB_GET_CONFIG_DESC = USB_GET_CONFIG_DESC;
+unsigned IOCTL_USB_GET_INTERFACE_DESC = USB_GET_INTERFACE_DESC;
+unsigned IOCTL_USB_GET_ENDPOINT_DESC = USB_GET_ENDPOINT_DESC;
+unsigned IOCTL_USB_GET_FULL_DESC = USB_GET_FULL_DESC;
+unsigned IOCTL_USB_GET_STRING_DESC = USB_GET_STRING_DESC;
+unsigned IOCTL_USB_DO_REQUEST = USB_DO_REQUEST;
+unsigned IOCTL_USB_GET_DEVICEINFO = USB_GET_DEVICEINFO;
+unsigned IOCTL_USB_GET_DEVICEINFO_OLD = USB_GET_DEVICEINFO_OLD;
+unsigned IOCTL_USB_SET_SHORT_XFER = USB_SET_SHORT_XFER;
+unsigned IOCTL_USB_SET_TIMEOUT = USB_SET_TIMEOUT;
+unsigned IOCTL_USB_SET_BULK_RA = USB_SET_BULK_RA;
+unsigned IOCTL_USB_SET_BULK_WB = USB_SET_BULK_WB;
+unsigned IOCTL_USB_SET_BULK_RA_OPT = USB_SET_BULK_RA_OPT;
+unsigned IOCTL_USB_SET_BULK_WB_OPT = USB_SET_BULK_WB_OPT;
+unsigned IOCTL_USB_GET_CM_OVER_DATA = USB_GET_CM_OVER_DATA;
+unsigned IOCTL_USB_SET_CM_OVER_DATA = USB_SET_CM_OVER_DATA;
+unsigned IOCTL_UTOPPYIOTURBO = UTOPPYIOTURBO;
+unsigned IOCTL_UTOPPYIOCANCEL = UTOPPYIOCANCEL;
+unsigned IOCTL_UTOPPYIOREBOOT = UTOPPYIOREBOOT;
+unsigned IOCTL_UTOPPYIOSTATS = UTOPPYIOSTATS;
+unsigned IOCTL_UTOPPYIORENAME = UTOPPYIORENAME;
+unsigned IOCTL_UTOPPYIOMKDIR = UTOPPYIOMKDIR;
+unsigned IOCTL_UTOPPYIODELETE = UTOPPYIODELETE;
+unsigned IOCTL_UTOPPYIOREADDIR = UTOPPYIOREADDIR;
+unsigned IOCTL_UTOPPYIOREADFILE = UTOPPYIOREADFILE;
+unsigned IOCTL_UTOPPYIOWRITEFILE = UTOPPYIOWRITEFILE;
+unsigned IOCTL_DIOSXDCMD = DIOSXDCMD;
+unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;
+unsigned IOCTL_VT_SETMODE = VT_SETMODE;
+unsigned IOCTL_VT_GETMODE = VT_GETMODE;
+unsigned IOCTL_VT_RELDISP = VT_RELDISP;
+unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;
+unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;
+unsigned IOCTL_VT_GETACTIVE = VT_GETACTIVE;
+unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
+unsigned IOCTL_KDGETKBENT = KDGETKBENT;
+unsigned IOCTL_KDGKBMODE = KDGKBMODE;
+unsigned IOCTL_KDSKBMODE = KDSKBMODE;
+unsigned IOCTL_KDMKTONE = KDMKTONE;
+unsigned IOCTL_KDSETMODE = KDSETMODE;
+unsigned IOCTL_KDENABIO = KDENABIO;
+unsigned IOCTL_KDDISABIO = KDDISABIO;
+unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
+unsigned IOCTL_KDGETLED = KDGETLED;
+unsigned IOCTL_KDSETLED = KDSETLED;
+unsigned IOCTL_KDSETRAD = KDSETRAD;
+unsigned IOCTL_VGAPCVTID = VGAPCVTID;
+unsigned IOCTL_CONS_GETVERS = CONS_GETVERS;
+unsigned IOCTL_WSKBDIO_GTYPE = WSKBDIO_GTYPE;
+unsigned IOCTL_WSKBDIO_BELL = WSKBDIO_BELL;
+unsigned IOCTL_WSKBDIO_COMPLEXBELL = WSKBDIO_COMPLEXBELL;
+unsigned IOCTL_WSKBDIO_SETBELL = WSKBDIO_SETBELL;
+unsigned IOCTL_WSKBDIO_GETBELL = WSKBDIO_GETBELL;
+unsigned IOCTL_WSKBDIO_SETDEFAULTBELL = WSKBDIO_SETDEFAULTBELL;
+unsigned IOCTL_WSKBDIO_GETDEFAULTBELL = WSKBDIO_GETDEFAULTBELL;
+unsigned IOCTL_WSKBDIO_SETKEYREPEAT = WSKBDIO_SETKEYREPEAT;
+unsigned IOCTL_WSKBDIO_GETKEYREPEAT = WSKBDIO_GETKEYREPEAT;
+unsigned IOCTL_WSKBDIO_SETDEFAULTKEYREPEAT = WSKBDIO_SETDEFAULTKEYREPEAT;
+unsigned IOCTL_WSKBDIO_GETDEFAULTKEYREPEAT = WSKBDIO_GETDEFAULTKEYREPEAT;
+unsigned IOCTL_WSKBDIO_SETLEDS = WSKBDIO_SETLEDS;
+unsigned IOCTL_WSKBDIO_GETLEDS = WSKBDIO_GETLEDS;
+unsigned IOCTL_WSKBDIO_GETMAP = WSKBDIO_GETMAP;
+unsigned IOCTL_WSKBDIO_SETMAP = WSKBDIO_SETMAP;
+unsigned IOCTL_WSKBDIO_GETENCODING = WSKBDIO_GETENCODING;
+unsigned IOCTL_WSKBDIO_SETENCODING = WSKBDIO_SETENCODING;
+unsigned IOCTL_WSKBDIO_SETMODE = WSKBDIO_SETMODE;
+unsigned IOCTL_WSKBDIO_GETMODE = WSKBDIO_GETMODE;
+unsigned IOCTL_WSKBDIO_SETKEYCLICK = WSKBDIO_SETKEYCLICK;
+unsigned IOCTL_WSKBDIO_GETKEYCLICK = WSKBDIO_GETKEYCLICK;
+unsigned IOCTL_WSKBDIO_GETSCROLL = WSKBDIO_GETSCROLL;
+unsigned IOCTL_WSKBDIO_SETSCROLL = WSKBDIO_SETSCROLL;
+unsigned IOCTL_WSKBDIO_SETVERSION = WSKBDIO_SETVERSION;
+unsigned IOCTL_WSMOUSEIO_GTYPE = WSMOUSEIO_GTYPE;
+unsigned IOCTL_WSMOUSEIO_SRES = WSMOUSEIO_SRES;
+unsigned IOCTL_WSMOUSEIO_SSCALE = WSMOUSEIO_SSCALE;
+unsigned IOCTL_WSMOUSEIO_SRATE = WSMOUSEIO_SRATE;
+unsigned IOCTL_WSMOUSEIO_SCALIBCOORDS = WSMOUSEIO_SCALIBCOORDS;
+unsigned IOCTL_WSMOUSEIO_GCALIBCOORDS = WSMOUSEIO_GCALIBCOORDS;
+unsigned IOCTL_WSMOUSEIO_GETID = WSMOUSEIO_GETID;
+unsigned IOCTL_WSMOUSEIO_GETREPEAT = WSMOUSEIO_GETREPEAT;
+unsigned IOCTL_WSMOUSEIO_SETREPEAT = WSMOUSEIO_SETREPEAT;
+unsigned IOCTL_WSMOUSEIO_SETVERSION = WSMOUSEIO_SETVERSION;
+unsigned IOCTL_WSDISPLAYIO_GTYPE = WSDISPLAYIO_GTYPE;
+unsigned IOCTL_WSDISPLAYIO_GINFO = WSDISPLAYIO_GINFO;
+unsigned IOCTL_WSDISPLAYIO_GETCMAP = WSDISPLAYIO_GETCMAP;
+unsigned IOCTL_WSDISPLAYIO_PUTCMAP = WSDISPLAYIO_PUTCMAP;
+unsigned IOCTL_WSDISPLAYIO_GVIDEO = WSDISPLAYIO_GVIDEO;
+unsigned IOCTL_WSDISPLAYIO_SVIDEO = WSDISPLAYIO_SVIDEO;
+unsigned IOCTL_WSDISPLAYIO_GCURPOS = WSDISPLAYIO_GCURPOS;
+unsigned IOCTL_WSDISPLAYIO_SCURPOS = WSDISPLAYIO_SCURPOS;
+unsigned IOCTL_WSDISPLAYIO_GCURMAX = WSDISPLAYIO_GCURMAX;
+unsigned IOCTL_WSDISPLAYIO_GCURSOR = WSDISPLAYIO_GCURSOR;
+unsigned IOCTL_WSDISPLAYIO_SCURSOR = WSDISPLAYIO_SCURSOR;
+unsigned IOCTL_WSDISPLAYIO_GMODE = WSDISPLAYIO_GMODE;
+unsigned IOCTL_WSDISPLAYIO_SMODE = WSDISPLAYIO_SMODE;
+unsigned IOCTL_WSDISPLAYIO_LDFONT = WSDISPLAYIO_LDFONT;
+unsigned IOCTL_WSDISPLAYIO_ADDSCREEN = WSDISPLAYIO_ADDSCREEN;
+unsigned IOCTL_WSDISPLAYIO_DELSCREEN = WSDISPLAYIO_DELSCREEN;
+unsigned IOCTL_WSDISPLAYIO_SFONT = WSDISPLAYIO_SFONT;
+unsigned IOCTL__O_WSDISPLAYIO_SETKEYBOARD = _O_WSDISPLAYIO_SETKEYBOARD;
+unsigned IOCTL_WSDISPLAYIO_GETPARAM = WSDISPLAYIO_GETPARAM;
+unsigned IOCTL_WSDISPLAYIO_SETPARAM = WSDISPLAYIO_SETPARAM;
+unsigned IOCTL_WSDISPLAYIO_GETACTIVESCREEN = WSDISPLAYIO_GETACTIVESCREEN;
+unsigned IOCTL_WSDISPLAYIO_GETWSCHAR = WSDISPLAYIO_GETWSCHAR;
+unsigned IOCTL_WSDISPLAYIO_PUTWSCHAR = WSDISPLAYIO_PUTWSCHAR;
+unsigned IOCTL_WSDISPLAYIO_DGSCROLL = WSDISPLAYIO_DGSCROLL;
+unsigned IOCTL_WSDISPLAYIO_DSSCROLL = WSDISPLAYIO_DSSCROLL;
+unsigned IOCTL_WSDISPLAYIO_GMSGATTRS = WSDISPLAYIO_GMSGATTRS;
+unsigned IOCTL_WSDISPLAYIO_SMSGATTRS = WSDISPLAYIO_SMSGATTRS;
+unsigned IOCTL_WSDISPLAYIO_GBORDER = WSDISPLAYIO_GBORDER;
+unsigned IOCTL_WSDISPLAYIO_SBORDER = WSDISPLAYIO_SBORDER;
+unsigned IOCTL_WSDISPLAYIO_SSPLASH = WSDISPLAYIO_SSPLASH;
+unsigned IOCTL_WSDISPLAYIO_SPROGRESS = WSDISPLAYIO_SPROGRESS;
+unsigned IOCTL_WSDISPLAYIO_LINEBYTES = WSDISPLAYIO_LINEBYTES;
+unsigned IOCTL_WSDISPLAYIO_SETVERSION = WSDISPLAYIO_SETVERSION;
+unsigned IOCTL_WSMUXIO_ADD_DEVICE = WSMUXIO_ADD_DEVICE;
+unsigned IOCTL_WSMUXIO_REMOVE_DEVICE = WSMUXIO_REMOVE_DEVICE;
+unsigned IOCTL_WSMUXIO_LIST_DEVICES = WSMUXIO_LIST_DEVICES;
+unsigned IOCTL_WSMUXIO_INJECTEVENT = WSMUXIO_INJECTEVENT;
+unsigned IOCTL_WSDISPLAYIO_GET_BUSID = WSDISPLAYIO_GET_BUSID;
+unsigned IOCTL_WSDISPLAYIO_GET_EDID = WSDISPLAYIO_GET_EDID;
+unsigned IOCTL_WSDISPLAYIO_SET_POLLING = WSDISPLAYIO_SET_POLLING;
+unsigned IOCTL_WSDISPLAYIO_GET_FBINFO = WSDISPLAYIO_GET_FBINFO;
+unsigned IOCTL_WSDISPLAYIO_DOBLIT = WSDISPLAYIO_DOBLIT;
+unsigned IOCTL_WSDISPLAYIO_WAITBLIT = WSDISPLAYIO_WAITBLIT;
+unsigned IOCTL_BIOCLOCATE = BIOCLOCATE;
+unsigned IOCTL_BIOCINQ = BIOCINQ;
+unsigned IOCTL_BIOCDISK_NOVOL = BIOCDISK_NOVOL;
+unsigned IOCTL_BIOCDISK = BIOCDISK;
+unsigned IOCTL_BIOCVOL = BIOCVOL;
+unsigned IOCTL_BIOCALARM = BIOCALARM;
+unsigned IOCTL_BIOCBLINK = BIOCBLINK;
+unsigned IOCTL_BIOCSETSTATE = BIOCSETSTATE;
+unsigned IOCTL_BIOCVOLOPS = BIOCVOLOPS;
+unsigned IOCTL_MD_GETCONF = MD_GETCONF;
+unsigned IOCTL_MD_SETCONF = MD_SETCONF;
+unsigned IOCTL_CCDIOCSET = CCDIOCSET;
+unsigned IOCTL_CCDIOCCLR = CCDIOCCLR;
+unsigned IOCTL_CGDIOCSET = CGDIOCSET;
+unsigned IOCTL_CGDIOCCLR = CGDIOCCLR;
+unsigned IOCTL_CGDIOCGET = CGDIOCGET;
+unsigned IOCTL_FSSIOCSET = FSSIOCSET;
+unsigned IOCTL_FSSIOCGET = FSSIOCGET;
+unsigned IOCTL_FSSIOCCLR = FSSIOCCLR;
+unsigned IOCTL_FSSIOFSET = FSSIOFSET;
+unsigned IOCTL_FSSIOFGET = FSSIOFGET;
+unsigned IOCTL_BTDEV_ATTACH = BTDEV_ATTACH;
+unsigned IOCTL_BTDEV_DETACH = BTDEV_DETACH;
+unsigned IOCTL_BTSCO_GETINFO = BTSCO_GETINFO;
+unsigned IOCTL_KTTCP_IO_SEND = KTTCP_IO_SEND;
+unsigned IOCTL_KTTCP_IO_RECV = KTTCP_IO_RECV;
+unsigned IOCTL_IOC_LOCKSTAT_GVERSION = IOC_LOCKSTAT_GVERSION;
+unsigned IOCTL_IOC_LOCKSTAT_ENABLE = IOC_LOCKSTAT_ENABLE;
+unsigned IOCTL_IOC_LOCKSTAT_DISABLE = IOC_LOCKSTAT_DISABLE;
+unsigned IOCTL_VNDIOCSET = VNDIOCSET;
+unsigned IOCTL_VNDIOCCLR = VNDIOCCLR;
+unsigned IOCTL_VNDIOCGET = VNDIOCGET;
+unsigned IOCTL_SPKRTONE = SPKRTONE;
+unsigned IOCTL_SPKRTUNE = SPKRTUNE;
+unsigned IOCTL_SPKRGETVOL = SPKRGETVOL;
+unsigned IOCTL_SPKRSETVOL = SPKRSETVOL;
+#if defined(__x86_64__)
+unsigned IOCTL_NVMM_IOC_CAPABILITY = NVMM_IOC_CAPABILITY;
+unsigned IOCTL_NVMM_IOC_MACHINE_CREATE = NVMM_IOC_MACHINE_CREATE;
+unsigned IOCTL_NVMM_IOC_MACHINE_DESTROY = NVMM_IOC_MACHINE_DESTROY;
+unsigned IOCTL_NVMM_IOC_MACHINE_CONFIGURE = NVMM_IOC_MACHINE_CONFIGURE;
+unsigned IOCTL_NVMM_IOC_VCPU_CREATE = NVMM_IOC_VCPU_CREATE;
+unsigned IOCTL_NVMM_IOC_VCPU_DESTROY = NVMM_IOC_VCPU_DESTROY;
+unsigned IOCTL_NVMM_IOC_VCPU_SETSTATE = NVMM_IOC_VCPU_SETSTATE;
+unsigned IOCTL_NVMM_IOC_VCPU_GETSTATE = NVMM_IOC_VCPU_GETSTATE;
+unsigned IOCTL_NVMM_IOC_VCPU_INJECT = NVMM_IOC_VCPU_INJECT;
+unsigned IOCTL_NVMM_IOC_VCPU_RUN = NVMM_IOC_VCPU_RUN;
+unsigned IOCTL_NVMM_IOC_GPA_MAP = NVMM_IOC_GPA_MAP;
+unsigned IOCTL_NVMM_IOC_GPA_UNMAP = NVMM_IOC_GPA_UNMAP;
+unsigned IOCTL_NVMM_IOC_HVA_MAP = NVMM_IOC_HVA_MAP;
+unsigned IOCTL_NVMM_IOC_HVA_UNMAP = NVMM_IOC_HVA_UNMAP;
+unsigned IOCTL_NVMM_IOC_CTL = NVMM_IOC_CTL;
+#endif
+unsigned IOCTL_SPI_IOCTL_CONFIGURE = SPI_IOCTL_CONFIGURE;
+unsigned IOCTL_SPI_IOCTL_TRANSFER = SPI_IOCTL_TRANSFER;
+unsigned IOCTL_AUTOFSREQUEST = AUTOFSREQUEST;
+unsigned IOCTL_AUTOFSDONE = AUTOFSDONE;
+unsigned IOCTL_BIOCGBLEN = BIOCGBLEN;
+unsigned IOCTL_BIOCSBLEN = BIOCSBLEN;
+unsigned IOCTL_BIOCSETF = BIOCSETF;
+unsigned IOCTL_BIOCFLUSH = BIOCFLUSH;
+unsigned IOCTL_BIOCPROMISC = BIOCPROMISC;
+unsigned IOCTL_BIOCGDLT = BIOCGDLT;
+unsigned IOCTL_BIOCGETIF = BIOCGETIF;
+unsigned IOCTL_BIOCSETIF = BIOCSETIF;
+unsigned IOCTL_BIOCGSTATS = BIOCGSTATS;
+unsigned IOCTL_BIOCGSTATSOLD = BIOCGSTATSOLD;
+unsigned IOCTL_BIOCIMMEDIATE = BIOCIMMEDIATE;
+unsigned IOCTL_BIOCVERSION = BIOCVERSION;
+unsigned IOCTL_BIOCSTCPF = BIOCSTCPF;
+unsigned IOCTL_BIOCSUDPF = BIOCSUDPF;
+unsigned IOCTL_BIOCGHDRCMPLT = BIOCGHDRCMPLT;
+unsigned IOCTL_BIOCSHDRCMPLT = BIOCSHDRCMPLT;
+unsigned IOCTL_BIOCSDLT = BIOCSDLT;
+unsigned IOCTL_BIOCGDLTLIST = BIOCGDLTLIST;
+unsigned IOCTL_BIOCGDIRECTION = BIOCGDIRECTION;
+unsigned IOCTL_BIOCSDIRECTION = BIOCSDIRECTION;
+unsigned IOCTL_BIOCSRTIMEOUT = BIOCSRTIMEOUT;
+unsigned IOCTL_BIOCGRTIMEOUT = BIOCGRTIMEOUT;
+unsigned IOCTL_BIOCGFEEDBACK = BIOCGFEEDBACK;
+unsigned IOCTL_BIOCSFEEDBACK = BIOCSFEEDBACK;
+unsigned IOCTL_GRESADDRS = GRESADDRS;
+unsigned IOCTL_GRESADDRD = GRESADDRD;
+unsigned IOCTL_GREGADDRS = GREGADDRS;
+unsigned IOCTL_GREGADDRD = GREGADDRD;
+unsigned IOCTL_GRESPROTO = GRESPROTO;
+unsigned IOCTL_GREGPROTO = GREGPROTO;
+unsigned IOCTL_GRESSOCK = GRESSOCK;
+unsigned IOCTL_GREDSOCK = GREDSOCK;
+unsigned IOCTL_PPPIOCGRAWIN = PPPIOCGRAWIN;
+unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS;
+unsigned IOCTL_PPPIOCSFLAGS = PPPIOCSFLAGS;
+unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP;
+unsigned IOCTL_PPPIOCSASYNCMAP = PPPIOCSASYNCMAP;
+unsigned IOCTL_PPPIOCGUNIT = PPPIOCGUNIT;
+unsigned IOCTL_PPPIOCGRASYNCMAP = PPPIOCGRASYNCMAP;
+unsigned IOCTL_PPPIOCSRASYNCMAP = PPPIOCSRASYNCMAP;
+unsigned IOCTL_PPPIOCGMRU = PPPIOCGMRU;
+unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU;
+unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID;
+unsigned IOCTL_PPPIOCGXASYNCMAP = PPPIOCGXASYNCMAP;
+unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP;
+unsigned IOCTL_PPPIOCXFERUNIT = PPPIOCXFERUNIT;
+unsigned IOCTL_PPPIOCSCOMPRESS = PPPIOCSCOMPRESS;
+unsigned IOCTL_PPPIOCGNPMODE = PPPIOCGNPMODE;
+unsigned IOCTL_PPPIOCSNPMODE = PPPIOCSNPMODE;
+unsigned IOCTL_PPPIOCGIDLE = PPPIOCGIDLE;
+unsigned IOCTL_PPPIOCGMTU = PPPIOCGMTU;
+unsigned IOCTL_PPPIOCSMTU = PPPIOCSMTU;
+unsigned IOCTL_SIOCGPPPSTATS = SIOCGPPPSTATS;
+unsigned IOCTL_SIOCGPPPCSTATS = SIOCGPPPCSTATS;
+unsigned IOCTL_IOC_NPF_VERSION = IOC_NPF_VERSION;
+unsigned IOCTL_IOC_NPF_SWITCH = IOC_NPF_SWITCH;
+unsigned IOCTL_IOC_NPF_LOAD = IOC_NPF_LOAD;
+unsigned IOCTL_IOC_NPF_TABLE = IOC_NPF_TABLE;
+unsigned IOCTL_IOC_NPF_STATS = IOC_NPF_STATS;
+unsigned IOCTL_IOC_NPF_SAVE = IOC_NPF_SAVE;
+unsigned IOCTL_IOC_NPF_RULE = IOC_NPF_RULE;
+unsigned IOCTL_IOC_NPF_CONN_LOOKUP = IOC_NPF_CONN_LOOKUP;
+unsigned IOCTL_PPPOESETPARMS = PPPOESETPARMS;
+unsigned IOCTL_PPPOEGETPARMS = PPPOEGETPARMS;
+unsigned IOCTL_PPPOEGETSESSION = PPPOEGETSESSION;
+unsigned IOCTL_SPPPGETAUTHCFG = SPPPGETAUTHCFG;
+unsigned IOCTL_SPPPSETAUTHCFG = SPPPSETAUTHCFG;
+unsigned IOCTL_SPPPGETLCPCFG = SPPPGETLCPCFG;
+unsigned IOCTL_SPPPSETLCPCFG = SPPPSETLCPCFG;
+unsigned IOCTL_SPPPGETSTATUS = SPPPGETSTATUS;
+unsigned IOCTL_SPPPGETSTATUSNCP = SPPPGETSTATUSNCP;
+unsigned IOCTL_SPPPGETIDLETO = SPPPGETIDLETO;
+unsigned IOCTL_SPPPSETIDLETO = SPPPSETIDLETO;
+unsigned IOCTL_SPPPGETAUTHFAILURES = SPPPGETAUTHFAILURES;
+unsigned IOCTL_SPPPSETAUTHFAILURE = SPPPSETAUTHFAILURE;
+unsigned IOCTL_SPPPSETDNSOPTS = SPPPSETDNSOPTS;
+unsigned IOCTL_SPPPGETDNSOPTS = SPPPGETDNSOPTS;
+unsigned IOCTL_SPPPGETDNSADDRS = SPPPGETDNSADDRS;
+unsigned IOCTL_SPPPSETKEEPALIVE = SPPPSETKEEPALIVE;
+unsigned IOCTL_SPPPGETKEEPALIVE = SPPPGETKEEPALIVE;
+unsigned IOCTL_SRT_GETNRT = SRT_GETNRT;
+unsigned IOCTL_SRT_GETRT = SRT_GETRT;
+unsigned IOCTL_SRT_SETRT = SRT_SETRT;
+unsigned IOCTL_SRT_DELRT = SRT_DELRT;
+unsigned IOCTL_SRT_SFLAGS = SRT_SFLAGS;
+unsigned IOCTL_SRT_GFLAGS = SRT_GFLAGS;
+unsigned IOCTL_SRT_SGFLAGS = SRT_SGFLAGS;
+unsigned IOCTL_SRT_DEBUG = SRT_DEBUG;
+unsigned IOCTL_TAPGIFNAME = TAPGIFNAME;
+unsigned IOCTL_TUNSDEBUG = TUNSDEBUG;
+unsigned IOCTL_TUNGDEBUG = TUNGDEBUG;
+unsigned IOCTL_TUNSIFMODE = TUNSIFMODE;
+unsigned IOCTL_TUNSLMODE = TUNSLMODE;
+unsigned IOCTL_TUNSIFHEAD = TUNSIFHEAD;
+unsigned IOCTL_TUNGIFHEAD = TUNGIFHEAD;
+unsigned IOCTL_DIOCSTART = DIOCSTART;
+unsigned IOCTL_DIOCSTOP = DIOCSTOP;
+unsigned IOCTL_DIOCADDRULE = DIOCADDRULE;
+unsigned IOCTL_DIOCGETRULES = DIOCGETRULES;
+unsigned IOCTL_DIOCGETRULE = DIOCGETRULE;
+unsigned IOCTL_DIOCSETLCK = DIOCSETLCK;
+unsigned IOCTL_DIOCCLRSTATES = DIOCCLRSTATES;
+unsigned IOCTL_DIOCGETSTATE = DIOCGETSTATE;
+unsigned IOCTL_DIOCSETSTATUSIF = DIOCSETSTATUSIF;
+unsigned IOCTL_DIOCGETSTATUS = DIOCGETSTATUS;
+unsigned IOCTL_DIOCCLRSTATUS = DIOCCLRSTATUS;
+unsigned IOCTL_DIOCNATLOOK = DIOCNATLOOK;
+unsigned IOCTL_DIOCSETDEBUG = DIOCSETDEBUG;
+unsigned IOCTL_DIOCGETSTATES = DIOCGETSTATES;
+unsigned IOCTL_DIOCCHANGERULE = DIOCCHANGERULE;
+unsigned IOCTL_DIOCSETTIMEOUT = DIOCSETTIMEOUT;
+unsigned IOCTL_DIOCGETTIMEOUT = DIOCGETTIMEOUT;
+unsigned IOCTL_DIOCADDSTATE = DIOCADDSTATE;
+unsigned IOCTL_DIOCCLRRULECTRS = DIOCCLRRULECTRS;
+unsigned IOCTL_DIOCGETLIMIT = DIOCGETLIMIT;
+unsigned IOCTL_DIOCSETLIMIT = DIOCSETLIMIT;
+unsigned IOCTL_DIOCKILLSTATES = DIOCKILLSTATES;
+unsigned IOCTL_DIOCSTARTALTQ = DIOCSTARTALTQ;
+unsigned IOCTL_DIOCSTOPALTQ = DIOCSTOPALTQ;
+unsigned IOCTL_DIOCADDALTQ = DIOCADDALTQ;
+unsigned IOCTL_DIOCGETALTQS = DIOCGETALTQS;
+unsigned IOCTL_DIOCGETALTQ = DIOCGETALTQ;
+unsigned IOCTL_DIOCCHANGEALTQ = DIOCCHANGEALTQ;
+unsigned IOCTL_DIOCGETQSTATS = DIOCGETQSTATS;
+unsigned IOCTL_DIOCBEGINADDRS = DIOCBEGINADDRS;
+unsigned IOCTL_DIOCADDADDR = DIOCADDADDR;
+unsigned IOCTL_DIOCGETADDRS = DIOCGETADDRS;
+unsigned IOCTL_DIOCGETADDR = DIOCGETADDR;
+unsigned IOCTL_DIOCCHANGEADDR = DIOCCHANGEADDR;
+unsigned IOCTL_DIOCADDSTATES = DIOCADDSTATES;
+unsigned IOCTL_DIOCGETRULESETS = DIOCGETRULESETS;
+unsigned IOCTL_DIOCGETRULESET = DIOCGETRULESET;
+unsigned IOCTL_DIOCRCLRTABLES = DIOCRCLRTABLES;
+unsigned IOCTL_DIOCRADDTABLES = DIOCRADDTABLES;
+unsigned IOCTL_DIOCRDELTABLES = DIOCRDELTABLES;
+unsigned IOCTL_DIOCRGETTABLES = DIOCRGETTABLES;
+unsigned IOCTL_DIOCRGETTSTATS = DIOCRGETTSTATS;
+unsigned IOCTL_DIOCRCLRTSTATS = DIOCRCLRTSTATS;
+unsigned IOCTL_DIOCRCLRADDRS = DIOCRCLRADDRS;
+unsigned IOCTL_DIOCRADDADDRS = DIOCRADDADDRS;
+unsigned IOCTL_DIOCRDELADDRS = DIOCRDELADDRS;
+unsigned IOCTL_DIOCRSETADDRS = DIOCRSETADDRS;
+unsigned IOCTL_DIOCRGETADDRS = DIOCRGETADDRS;
+unsigned IOCTL_DIOCRGETASTATS = DIOCRGETASTATS;
+unsigned IOCTL_DIOCRCLRASTATS = DIOCRCLRASTATS;
+unsigned IOCTL_DIOCRTSTADDRS = DIOCRTSTADDRS;
+unsigned IOCTL_DIOCRSETTFLAGS = DIOCRSETTFLAGS;
+unsigned IOCTL_DIOCRINADEFINE = DIOCRINADEFINE;
+unsigned IOCTL_DIOCOSFPFLUSH = DIOCOSFPFLUSH;
+unsigned IOCTL_DIOCOSFPADD = DIOCOSFPADD;
+unsigned IOCTL_DIOCOSFPGET = DIOCOSFPGET;
+unsigned IOCTL_DIOCXBEGIN = DIOCXBEGIN;
+unsigned IOCTL_DIOCXCOMMIT = DIOCXCOMMIT;
+unsigned IOCTL_DIOCXROLLBACK = DIOCXROLLBACK;
+unsigned IOCTL_DIOCGETSRCNODES = DIOCGETSRCNODES;
+unsigned IOCTL_DIOCCLRSRCNODES = DIOCCLRSRCNODES;
+unsigned IOCTL_DIOCSETHOSTID = DIOCSETHOSTID;
+unsigned IOCTL_DIOCIGETIFACES = DIOCIGETIFACES;
+unsigned IOCTL_DIOCSETIFFLAG = DIOCSETIFFLAG;
+unsigned IOCTL_DIOCCLRIFFLAG = DIOCCLRIFFLAG;
+unsigned IOCTL_DIOCKILLSRCNODES = DIOCKILLSRCNODES;
+unsigned IOCTL_SLIOCGUNIT = SLIOCGUNIT;
+unsigned IOCTL_SIOCGBTINFO = SIOCGBTINFO;
+unsigned IOCTL_SIOCGBTINFOA = SIOCGBTINFOA;
+unsigned IOCTL_SIOCNBTINFO = SIOCNBTINFO;
+unsigned IOCTL_SIOCSBTFLAGS = SIOCSBTFLAGS;
+unsigned IOCTL_SIOCSBTPOLICY = SIOCSBTPOLICY;
+unsigned IOCTL_SIOCSBTPTYPE = SIOCSBTPTYPE;
+unsigned IOCTL_SIOCGBTSTATS = SIOCGBTSTATS;
+unsigned IOCTL_SIOCZBTSTATS = SIOCZBTSTATS;
+unsigned IOCTL_SIOCBTDUMP = SIOCBTDUMP;
+unsigned IOCTL_SIOCSBTSCOMTU = SIOCSBTSCOMTU;
+unsigned IOCTL_SIOCGBTFEAT = SIOCGBTFEAT;
+unsigned IOCTL_SIOCADNAT = SIOCADNAT;
+unsigned IOCTL_SIOCRMNAT = SIOCRMNAT;
+unsigned IOCTL_SIOCGNATS = SIOCGNATS;
+unsigned IOCTL_SIOCGNATL = SIOCGNATL;
+unsigned IOCTL_SIOCPURGENAT = SIOCPURGENAT;
+unsigned IOCTL_SIOCCONNECTX = SIOCCONNECTX;
+unsigned IOCTL_SIOCCONNECTXDEL = SIOCCONNECTXDEL;
+unsigned IOCTL_SIOCSIFINFO_FLAGS = SIOCSIFINFO_FLAGS;
+unsigned IOCTL_SIOCAADDRCTL_POLICY = SIOCAADDRCTL_POLICY;
+unsigned IOCTL_SIOCDADDRCTL_POLICY = SIOCDADDRCTL_POLICY;
+unsigned IOCTL_SMBIOC_OPENSESSION = SMBIOC_OPENSESSION;
+unsigned IOCTL_SMBIOC_OPENSHARE = SMBIOC_OPENSHARE;
+unsigned IOCTL_SMBIOC_REQUEST = SMBIOC_REQUEST;
+unsigned IOCTL_SMBIOC_SETFLAGS = SMBIOC_SETFLAGS;
+unsigned IOCTL_SMBIOC_LOOKUP = SMBIOC_LOOKUP;
+unsigned IOCTL_SMBIOC_READ = SMBIOC_READ;
+unsigned IOCTL_SMBIOC_WRITE = SMBIOC_WRITE;
+unsigned IOCTL_AGPIOC_INFO = AGPIOC_INFO;
+unsigned IOCTL_AGPIOC_ACQUIRE = AGPIOC_ACQUIRE;
+unsigned IOCTL_AGPIOC_RELEASE = AGPIOC_RELEASE;
+unsigned IOCTL_AGPIOC_SETUP = AGPIOC_SETUP;
+unsigned IOCTL_AGPIOC_ALLOCATE = AGPIOC_ALLOCATE;
+unsigned IOCTL_AGPIOC_DEALLOCATE = AGPIOC_DEALLOCATE;
+unsigned IOCTL_AGPIOC_BIND = AGPIOC_BIND;
+unsigned IOCTL_AGPIOC_UNBIND = AGPIOC_UNBIND;
+unsigned IOCTL_AUDIO_GETINFO = AUDIO_GETINFO;
+unsigned IOCTL_AUDIO_SETINFO = AUDIO_SETINFO;
+unsigned IOCTL_AUDIO_DRAIN = AUDIO_DRAIN;
+unsigned IOCTL_AUDIO_FLUSH = AUDIO_FLUSH;
+unsigned IOCTL_AUDIO_WSEEK = AUDIO_WSEEK;
+unsigned IOCTL_AUDIO_RERROR = AUDIO_RERROR;
+unsigned IOCTL_AUDIO_GETDEV = AUDIO_GETDEV;
+unsigned IOCTL_AUDIO_GETENC = AUDIO_GETENC;
+unsigned IOCTL_AUDIO_GETFD = AUDIO_GETFD;
+unsigned IOCTL_AUDIO_SETFD = AUDIO_SETFD;
+unsigned IOCTL_AUDIO_PERROR = AUDIO_PERROR;
+unsigned IOCTL_AUDIO_GETIOFFS = AUDIO_GETIOFFS;
+unsigned IOCTL_AUDIO_GETOOFFS = AUDIO_GETOOFFS;
+unsigned IOCTL_AUDIO_GETPROPS = AUDIO_GETPROPS;
+unsigned IOCTL_AUDIO_GETBUFINFO = AUDIO_GETBUFINFO;
+unsigned IOCTL_AUDIO_SETCHAN = AUDIO_SETCHAN;
+unsigned IOCTL_AUDIO_GETCHAN = AUDIO_GETCHAN;
+unsigned IOCTL_AUDIO_QUERYFORMAT = AUDIO_QUERYFORMAT;
+unsigned IOCTL_AUDIO_GETFORMAT = AUDIO_GETFORMAT;
+unsigned IOCTL_AUDIO_SETFORMAT = AUDIO_SETFORMAT;
+unsigned IOCTL_AUDIO_MIXER_READ = AUDIO_MIXER_READ;
+unsigned IOCTL_AUDIO_MIXER_WRITE = AUDIO_MIXER_WRITE;
+unsigned IOCTL_AUDIO_MIXER_DEVINFO = AUDIO_MIXER_DEVINFO;
+unsigned IOCTL_ATAIOCCOMMAND = ATAIOCCOMMAND;
+unsigned IOCTL_ATABUSIOSCAN = ATABUSIOSCAN;
+unsigned IOCTL_ATABUSIORESET = ATABUSIORESET;
+unsigned IOCTL_ATABUSIODETACH = ATABUSIODETACH;
+unsigned IOCTL_CDIOCPLAYTRACKS = CDIOCPLAYTRACKS;
+unsigned IOCTL_CDIOCPLAYBLOCKS = CDIOCPLAYBLOCKS;
+unsigned IOCTL_CDIOCREADSUBCHANNEL = CDIOCREADSUBCHANNEL;
+unsigned IOCTL_CDIOREADTOCHEADER = CDIOREADTOCHEADER;
+unsigned IOCTL_CDIOREADTOCENTRIES = CDIOREADTOCENTRIES;
+unsigned IOCTL_CDIOREADMSADDR = CDIOREADMSADDR;
+unsigned IOCTL_CDIOCSETPATCH = CDIOCSETPATCH;
+unsigned IOCTL_CDIOCGETVOL = CDIOCGETVOL;
+unsigned IOCTL_CDIOCSETVOL = CDIOCSETVOL;
+unsigned IOCTL_CDIOCSETMONO = CDIOCSETMONO;
+unsigned IOCTL_CDIOCSETSTEREO = CDIOCSETSTEREO;
+unsigned IOCTL_CDIOCSETMUTE = CDIOCSETMUTE;
+unsigned IOCTL_CDIOCSETLEFT = CDIOCSETLEFT;
+unsigned IOCTL_CDIOCSETRIGHT = CDIOCSETRIGHT;
+unsigned IOCTL_CDIOCSETDEBUG = CDIOCSETDEBUG;
+unsigned IOCTL_CDIOCCLRDEBUG = CDIOCCLRDEBUG;
+unsigned IOCTL_CDIOCPAUSE = CDIOCPAUSE;
+unsigned IOCTL_CDIOCRESUME = CDIOCRESUME;
+unsigned IOCTL_CDIOCRESET = CDIOCRESET;
+unsigned IOCTL_CDIOCSTART = CDIOCSTART;
+unsigned IOCTL_CDIOCSTOP = CDIOCSTOP;
+unsigned IOCTL_CDIOCEJECT = CDIOCEJECT;
+unsigned IOCTL_CDIOCALLOW = CDIOCALLOW;
+unsigned IOCTL_CDIOCPREVENT = CDIOCPREVENT;
+unsigned IOCTL_CDIOCCLOSE = CDIOCCLOSE;
+unsigned IOCTL_CDIOCPLAYMSF = CDIOCPLAYMSF;
+unsigned IOCTL_CDIOCLOADUNLOAD = CDIOCLOADUNLOAD;
+unsigned IOCTL_CHIOMOVE = CHIOMOVE;
+unsigned IOCTL_CHIOEXCHANGE = CHIOEXCHANGE;
+unsigned IOCTL_CHIOPOSITION = CHIOPOSITION;
+unsigned IOCTL_CHIOGPICKER = CHIOGPICKER;
+unsigned IOCTL_CHIOSPICKER = CHIOSPICKER;
+unsigned IOCTL_CHIOGPARAMS = CHIOGPARAMS;
+unsigned IOCTL_CHIOIELEM = CHIOIELEM;
+unsigned IOCTL_OCHIOGSTATUS = OCHIOGSTATUS;
+unsigned IOCTL_CHIOGSTATUS = CHIOGSTATUS;
+unsigned IOCTL_CHIOSVOLTAG = CHIOSVOLTAG;
+unsigned IOCTL_CLOCKCTL_SETTIMEOFDAY = CLOCKCTL_SETTIMEOFDAY;
+unsigned IOCTL_CLOCKCTL_ADJTIME = CLOCKCTL_ADJTIME;
+unsigned IOCTL_CLOCKCTL_CLOCK_SETTIME = CLOCKCTL_CLOCK_SETTIME;
+unsigned IOCTL_CLOCKCTL_NTP_ADJTIME = CLOCKCTL_NTP_ADJTIME;
+unsigned IOCTL_IOC_CPU_SETSTATE = IOC_CPU_SETSTATE;
+unsigned IOCTL_IOC_CPU_GETSTATE = IOC_CPU_GETSTATE;
+unsigned IOCTL_IOC_CPU_GETCOUNT = IOC_CPU_GETCOUNT;
+unsigned IOCTL_IOC_CPU_MAPID = IOC_CPU_MAPID;
+unsigned IOCTL_IOC_CPU_UCODE_GET_VERSION = IOC_CPU_UCODE_GET_VERSION;
+unsigned IOCTL_IOC_CPU_UCODE_APPLY = IOC_CPU_UCODE_APPLY;
+unsigned IOCTL_DIOCGDINFO = DIOCGDINFO;
+unsigned IOCTL_DIOCSDINFO = DIOCSDINFO;
+unsigned IOCTL_DIOCWDINFO = DIOCWDINFO;
+unsigned IOCTL_DIOCRFORMAT = DIOCRFORMAT;
+unsigned IOCTL_DIOCWFORMAT = DIOCWFORMAT;
+unsigned IOCTL_DIOCSSTEP = DIOCSSTEP;
+unsigned IOCTL_DIOCSRETRIES = DIOCSRETRIES;
+unsigned IOCTL_DIOCKLABEL = DIOCKLABEL;
+unsigned IOCTL_DIOCWLABEL = DIOCWLABEL;
+unsigned IOCTL_DIOCSBAD = DIOCSBAD;
+unsigned IOCTL_DIOCEJECT = DIOCEJECT;
+unsigned IOCTL_ODIOCEJECT = ODIOCEJECT;
+unsigned IOCTL_DIOCLOCK = DIOCLOCK;
+unsigned IOCTL_DIOCGDEFLABEL = DIOCGDEFLABEL;
+unsigned IOCTL_DIOCCLRLABEL = DIOCCLRLABEL;
+unsigned IOCTL_DIOCGCACHE = DIOCGCACHE;
+unsigned IOCTL_DIOCSCACHE = DIOCSCACHE;
+unsigned IOCTL_DIOCCACHESYNC = DIOCCACHESYNC;
+unsigned IOCTL_DIOCBSLIST = DIOCBSLIST;
+unsigned IOCTL_DIOCBSFLUSH = DIOCBSFLUSH;
+unsigned IOCTL_DIOCAWEDGE = DIOCAWEDGE;
+unsigned IOCTL_DIOCGWEDGEINFO = DIOCGWEDGEINFO;
+unsigned IOCTL_DIOCDWEDGE = DIOCDWEDGE;
+unsigned IOCTL_DIOCLWEDGES = DIOCLWEDGES;
+unsigned IOCTL_DIOCGSTRATEGY = DIOCGSTRATEGY;
+unsigned IOCTL_DIOCSSTRATEGY = DIOCSSTRATEGY;
+unsigned IOCTL_DIOCGDISKINFO = DIOCGDISKINFO;
+unsigned IOCTL_DIOCTUR = DIOCTUR;
+unsigned IOCTL_DIOCMWEDGES = DIOCMWEDGES;
+unsigned IOCTL_DIOCGSECTORSIZE = DIOCGSECTORSIZE;
+unsigned IOCTL_DIOCGMEDIASIZE = DIOCGMEDIASIZE;
+unsigned IOCTL_DIOCRMWEDGES = DIOCRMWEDGES;
+unsigned IOCTL_DRVDETACHDEV = DRVDETACHDEV;
+unsigned IOCTL_DRVRESCANBUS = DRVRESCANBUS;
+unsigned IOCTL_DRVCTLCOMMAND = DRVCTLCOMMAND;
+unsigned IOCTL_DRVRESUMEDEV = DRVRESUMEDEV;
+unsigned IOCTL_DRVLISTDEV = DRVLISTDEV;
+unsigned IOCTL_DRVGETEVENT = DRVGETEVENT;
+unsigned IOCTL_DRVSUSPENDDEV = DRVSUSPENDDEV;
+unsigned IOCTL_DVD_READ_STRUCT = DVD_READ_STRUCT;
+unsigned IOCTL_DVD_WRITE_STRUCT = DVD_WRITE_STRUCT;
+unsigned IOCTL_DVD_AUTH = DVD_AUTH;
+unsigned IOCTL_ENVSYS_GETDICTIONARY = ENVSYS_GETDICTIONARY;
+unsigned IOCTL_ENVSYS_SETDICTIONARY = ENVSYS_SETDICTIONARY;
+unsigned IOCTL_ENVSYS_REMOVEPROPS = ENVSYS_REMOVEPROPS;
+unsigned IOCTL_ENVSYS_GTREDATA = ENVSYS_GTREDATA;
+unsigned IOCTL_ENVSYS_GTREINFO = ENVSYS_GTREINFO;
+unsigned IOCTL_KFILTER_BYFILTER = KFILTER_BYFILTER;
+unsigned IOCTL_KFILTER_BYNAME = KFILTER_BYNAME;
+unsigned IOCTL_FDIOCGETOPTS = FDIOCGETOPTS;
+unsigned IOCTL_FDIOCSETOPTS = FDIOCSETOPTS;
+unsigned IOCTL_FDIOCSETFORMAT = FDIOCSETFORMAT;
+unsigned IOCTL_FDIOCGETFORMAT = FDIOCGETFORMAT;
+unsigned IOCTL_FDIOCFORMAT_TRACK = FDIOCFORMAT_TRACK;
+unsigned IOCTL_FIOCLEX = FIOCLEX;
+unsigned IOCTL_FIONCLEX = FIONCLEX;
+unsigned IOCTL_FIOSEEKDATA = FIOSEEKDATA;
+unsigned IOCTL_FIOSEEKHOLE = FIOSEEKHOLE;
+unsigned IOCTL_FIONREAD = FIONREAD;
+unsigned IOCTL_FIONBIO = FIONBIO;
+unsigned IOCTL_FIOASYNC = FIOASYNC;
+unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+unsigned IOCTL_FIOGETOWN = FIOGETOWN;
+unsigned IOCTL_OFIOGETBMAP = OFIOGETBMAP;
+unsigned IOCTL_FIOGETBMAP = FIOGETBMAP;
+unsigned IOCTL_FIONWRITE = FIONWRITE;
+unsigned IOCTL_FIONSPACE = FIONSPACE;
+unsigned IOCTL_GPIOINFO = GPIOINFO;
+unsigned IOCTL_GPIOSET = GPIOSET;
+unsigned IOCTL_GPIOUNSET = GPIOUNSET;
+unsigned IOCTL_GPIOREAD = GPIOREAD;
+unsigned IOCTL_GPIOWRITE = GPIOWRITE;
+unsigned IOCTL_GPIOTOGGLE = GPIOTOGGLE;
+unsigned IOCTL_GPIOATTACH = GPIOATTACH;
+unsigned IOCTL_PTIOCNETBSD = PTIOCNETBSD;
+unsigned IOCTL_PTIOCSUNOS = PTIOCSUNOS;
+unsigned IOCTL_PTIOCLINUX = PTIOCLINUX;
+unsigned IOCTL_PTIOCFREEBSD = PTIOCFREEBSD;
+unsigned IOCTL_PTIOCULTRIX = PTIOCULTRIX;
+unsigned IOCTL_TIOCHPCL = TIOCHPCL;
+unsigned IOCTL_TIOCGETP = TIOCGETP;
+unsigned IOCTL_TIOCSETP = TIOCSETP;
+unsigned IOCTL_TIOCSETN = TIOCSETN;
+unsigned IOCTL_TIOCSETC = TIOCSETC;
+unsigned IOCTL_TIOCGETC = TIOCGETC;
+unsigned IOCTL_TIOCLBIS = TIOCLBIS;
+unsigned IOCTL_TIOCLBIC = TIOCLBIC;
+unsigned IOCTL_TIOCLSET = TIOCLSET;
+unsigned IOCTL_TIOCLGET = TIOCLGET;
+unsigned IOCTL_TIOCSLTC = TIOCSLTC;
+unsigned IOCTL_TIOCGLTC = TIOCGLTC;
+unsigned IOCTL_OTIOCCONS = OTIOCCONS;
+unsigned IOCTL_JOY_SETTIMEOUT = JOY_SETTIMEOUT;
+unsigned IOCTL_JOY_GETTIMEOUT = JOY_GETTIMEOUT;
+unsigned IOCTL_JOY_SET_X_OFFSET = JOY_SET_X_OFFSET;
+unsigned IOCTL_JOY_SET_Y_OFFSET = JOY_SET_Y_OFFSET;
+unsigned IOCTL_JOY_GET_X_OFFSET = JOY_GET_X_OFFSET;
+unsigned IOCTL_JOY_GET_Y_OFFSET = JOY_GET_Y_OFFSET;
+unsigned IOCTL_OKIOCGSYMBOL = OKIOCGSYMBOL;
+unsigned IOCTL_OKIOCGVALUE = OKIOCGVALUE;
+unsigned IOCTL_KIOCGSIZE = KIOCGSIZE;
+unsigned IOCTL_KIOCGVALUE = KIOCGVALUE;
+unsigned IOCTL_KIOCGSYMBOL = KIOCGSYMBOL;
+unsigned IOCTL_LUAINFO = LUAINFO;
+unsigned IOCTL_LUACREATE = LUACREATE;
+unsigned IOCTL_LUADESTROY = LUADESTROY;
+unsigned IOCTL_LUAREQUIRE = LUAREQUIRE;
+unsigned IOCTL_LUALOAD = LUALOAD;
+unsigned IOCTL_MIDI_PRETIME = MIDI_PRETIME;
+unsigned IOCTL_MIDI_MPUMODE = MIDI_MPUMODE;
+unsigned IOCTL_MIDI_MPUCMD = MIDI_MPUCMD;
+unsigned IOCTL_SEQUENCER_RESET = SEQUENCER_RESET;
+unsigned IOCTL_SEQUENCER_SYNC = SEQUENCER_SYNC;
+unsigned IOCTL_SEQUENCER_INFO = SEQUENCER_INFO;
+unsigned IOCTL_SEQUENCER_CTRLRATE = SEQUENCER_CTRLRATE;
+unsigned IOCTL_SEQUENCER_GETOUTCOUNT = SEQUENCER_GETOUTCOUNT;
+unsigned IOCTL_SEQUENCER_GETINCOUNT = SEQUENCER_GETINCOUNT;
+unsigned IOCTL_SEQUENCER_RESETSAMPLES = SEQUENCER_RESETSAMPLES;
+unsigned IOCTL_SEQUENCER_NRSYNTHS = SEQUENCER_NRSYNTHS;
+unsigned IOCTL_SEQUENCER_NRMIDIS = SEQUENCER_NRMIDIS;
+unsigned IOCTL_SEQUENCER_THRESHOLD = SEQUENCER_THRESHOLD;
+unsigned IOCTL_SEQUENCER_MEMAVL = SEQUENCER_MEMAVL;
+unsigned IOCTL_SEQUENCER_PANIC = SEQUENCER_PANIC;
+unsigned IOCTL_SEQUENCER_OUTOFBAND = SEQUENCER_OUTOFBAND;
+unsigned IOCTL_SEQUENCER_GETTIME = SEQUENCER_GETTIME;
+unsigned IOCTL_SEQUENCER_TMR_TIMEBASE = SEQUENCER_TMR_TIMEBASE;
+unsigned IOCTL_SEQUENCER_TMR_START = SEQUENCER_TMR_START;
+unsigned IOCTL_SEQUENCER_TMR_STOP = SEQUENCER_TMR_STOP;
+unsigned IOCTL_SEQUENCER_TMR_CONTINUE = SEQUENCER_TMR_CONTINUE;
+unsigned IOCTL_SEQUENCER_TMR_TEMPO = SEQUENCER_TMR_TEMPO;
+unsigned IOCTL_SEQUENCER_TMR_SOURCE = SEQUENCER_TMR_SOURCE;
+unsigned IOCTL_SEQUENCER_TMR_METRONOME = SEQUENCER_TMR_METRONOME;
+unsigned IOCTL_SEQUENCER_TMR_SELECT = SEQUENCER_TMR_SELECT;
+unsigned IOCTL_MTIOCTOP = MTIOCTOP;
+unsigned IOCTL_MTIOCGET = MTIOCGET;
+unsigned IOCTL_MTIOCIEOT = MTIOCIEOT;
+unsigned IOCTL_MTIOCEEOT = MTIOCEEOT;
+unsigned IOCTL_MTIOCRDSPOS = MTIOCRDSPOS;
+unsigned IOCTL_MTIOCRDHPOS = MTIOCRDHPOS;
+unsigned IOCTL_MTIOCSLOCATE = MTIOCSLOCATE;
+unsigned IOCTL_MTIOCHLOCATE = MTIOCHLOCATE;
+unsigned IOCTL_POWER_EVENT_RECVDICT = POWER_EVENT_RECVDICT;
+unsigned IOCTL_POWER_IOC_GET_TYPE = POWER_IOC_GET_TYPE;
+unsigned IOCTL_RIOCGINFO = RIOCGINFO;
+unsigned IOCTL_RIOCSINFO = RIOCSINFO;
+unsigned IOCTL_RIOCSSRCH = RIOCSSRCH;
+unsigned IOCTL_RNDGETENTCNT = RNDGETENTCNT;
+unsigned IOCTL_RNDGETSRCNUM = RNDGETSRCNUM;
+unsigned IOCTL_RNDGETSRCNAME = RNDGETSRCNAME;
+unsigned IOCTL_RNDCTL = RNDCTL;
+unsigned IOCTL_RNDADDDATA = RNDADDDATA;
+unsigned IOCTL_RNDGETPOOLSTAT = RNDGETPOOLSTAT;
+unsigned IOCTL_RNDGETESTNUM = RNDGETESTNUM;
+unsigned IOCTL_RNDGETESTNAME = RNDGETESTNAME;
+unsigned IOCTL_SCIOCGET = SCIOCGET;
+unsigned IOCTL_SCIOCSET = SCIOCSET;
+unsigned IOCTL_SCIOCRESTART = SCIOCRESTART;
+unsigned IOCTL_SCIOC_USE_ADF = SCIOC_USE_ADF;
+unsigned IOCTL_SCIOCCOMMAND = SCIOCCOMMAND;
+unsigned IOCTL_SCIOCDEBUG = SCIOCDEBUG;
+unsigned IOCTL_SCIOCIDENTIFY = SCIOCIDENTIFY;
+unsigned IOCTL_OSCIOCIDENTIFY = OSCIOCIDENTIFY;
+unsigned IOCTL_SCIOCDECONFIG = SCIOCDECONFIG;
+unsigned IOCTL_SCIOCRECONFIG = SCIOCRECONFIG;
+unsigned IOCTL_SCIOCRESET = SCIOCRESET;
+unsigned IOCTL_SCBUSIOSCAN = SCBUSIOSCAN;
+unsigned IOCTL_SCBUSIORESET = SCBUSIORESET;
+unsigned IOCTL_SCBUSIODETACH = SCBUSIODETACH;
+unsigned IOCTL_SCBUSACCEL = SCBUSACCEL;
+unsigned IOCTL_SCBUSIOLLSCAN = SCBUSIOLLSCAN;
+unsigned IOCTL_SIOCSHIWAT = SIOCSHIWAT;
+unsigned IOCTL_SIOCGHIWAT = SIOCGHIWAT;
+unsigned IOCTL_SIOCSLOWAT = SIOCSLOWAT;
+unsigned IOCTL_SIOCGLOWAT = SIOCGLOWAT;
+unsigned IOCTL_SIOCATMARK = SIOCATMARK;
+unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;
+unsigned IOCTL_SIOCPEELOFF = SIOCPEELOFF;
+unsigned IOCTL_SIOCADDRT = SIOCADDRT;
+unsigned IOCTL_SIOCDELRT = SIOCDELRT;
+unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;
+unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;
+unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;
+unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;
+unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;
+unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;
+unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;
+unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;
+unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;
+unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;
+unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
+unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;
+unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;
+unsigned IOCTL_SIOCDIFADDR = SIOCDIFADDR;
+unsigned IOCTL_SIOCAIFADDR = SIOCAIFADDR;
+unsigned IOCTL_SIOCGIFALIAS = SIOCGIFALIAS;
+unsigned IOCTL_SIOCGIFAFLAG_IN = SIOCGIFAFLAG_IN;
+unsigned IOCTL_SIOCALIFADDR = SIOCALIFADDR;
+unsigned IOCTL_SIOCGLIFADDR = SIOCGLIFADDR;
+unsigned IOCTL_SIOCDLIFADDR = SIOCDLIFADDR;
+unsigned IOCTL_SIOCSIFADDRPREF = SIOCSIFADDRPREF;
+unsigned IOCTL_SIOCGIFADDRPREF = SIOCGIFADDRPREF;
+unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
+unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
+unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
+unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
+unsigned IOCTL_SIOCSIFMEDIA = SIOCSIFMEDIA;
+unsigned IOCTL_SIOCGIFMEDIA = SIOCGIFMEDIA;
+unsigned IOCTL_SIOCSIFGENERIC = SIOCSIFGENERIC;
+unsigned IOCTL_SIOCGIFGENERIC = SIOCGIFGENERIC;
+unsigned IOCTL_SIOCSIFPHYADDR = SIOCSIFPHYADDR;
+unsigned IOCTL_SIOCGIFPSRCADDR = SIOCGIFPSRCADDR;
+unsigned IOCTL_SIOCGIFPDSTADDR = SIOCGIFPDSTADDR;
+unsigned IOCTL_SIOCDIFPHYADDR = SIOCDIFPHYADDR;
+unsigned IOCTL_SIOCSLIFPHYADDR = SIOCSLIFPHYADDR;
+unsigned IOCTL_SIOCGLIFPHYADDR = SIOCGLIFPHYADDR;
+unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
+unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;
+unsigned IOCTL_SIOCSDRVSPEC = SIOCSDRVSPEC;
+unsigned IOCTL_SIOCGDRVSPEC = SIOCGDRVSPEC;
+unsigned IOCTL_SIOCIFCREATE = SIOCIFCREATE;
+unsigned IOCTL_SIOCIFDESTROY = SIOCIFDESTROY;
+unsigned IOCTL_SIOCIFGCLONERS = SIOCIFGCLONERS;
+unsigned IOCTL_SIOCGIFDLT = SIOCGIFDLT;
+unsigned IOCTL_SIOCGIFCAP = SIOCGIFCAP;
+unsigned IOCTL_SIOCSIFCAP = SIOCSIFCAP;
+unsigned IOCTL_SIOCSVH = SIOCSVH;
+unsigned IOCTL_SIOCGVH = SIOCGVH;
+unsigned IOCTL_SIOCINITIFADDR = SIOCINITIFADDR;
+unsigned IOCTL_SIOCGIFDATA = SIOCGIFDATA;
+unsigned IOCTL_SIOCZIFDATA = SIOCZIFDATA;
+unsigned IOCTL_SIOCGLINKSTR = SIOCGLINKSTR;
+unsigned IOCTL_SIOCSLINKSTR = SIOCSLINKSTR;
+unsigned IOCTL_SIOCGETHERCAP = SIOCGETHERCAP;
+unsigned IOCTL_SIOCGIFINDEX = SIOCGIFINDEX;
+unsigned IOCTL_SIOCSETHERCAP = SIOCSETHERCAP;
+unsigned IOCTL_SIOCSIFDESCR = SIOCSIFDESCR;
+unsigned IOCTL_SIOCGIFDESCR = SIOCGIFDESCR;
+unsigned IOCTL_SIOCGUMBINFO = SIOCGUMBINFO;
+unsigned IOCTL_SIOCSUMBPARAM = SIOCSUMBPARAM;
+unsigned IOCTL_SIOCGUMBPARAM = SIOCGUMBPARAM;
+unsigned IOCTL_SIOCSETPFSYNC = SIOCSETPFSYNC;
+unsigned IOCTL_SIOCGETPFSYNC = SIOCGETPFSYNC;
+unsigned IOCTL_PPS_IOC_CREATE = PPS_IOC_CREATE;
+unsigned IOCTL_PPS_IOC_DESTROY = PPS_IOC_DESTROY;
+unsigned IOCTL_PPS_IOC_SETPARAMS = PPS_IOC_SETPARAMS;
+unsigned IOCTL_PPS_IOC_GETPARAMS = PPS_IOC_GETPARAMS;
+unsigned IOCTL_PPS_IOC_GETCAP = PPS_IOC_GETCAP;
+unsigned IOCTL_PPS_IOC_FETCH = PPS_IOC_FETCH;
+unsigned IOCTL_PPS_IOC_KCBIND = PPS_IOC_KCBIND;
+unsigned IOCTL_TIOCEXCL = TIOCEXCL;
+unsigned IOCTL_TIOCNXCL = TIOCNXCL;
+unsigned IOCTL_TIOCFLUSH = TIOCFLUSH;
+unsigned IOCTL_TIOCGETA = TIOCGETA;
+unsigned IOCTL_TIOCSETA = TIOCSETA;
+unsigned IOCTL_TIOCSETAW = TIOCSETAW;
+unsigned IOCTL_TIOCSETAF = TIOCSETAF;
+unsigned IOCTL_TIOCGETD = TIOCGETD;
+unsigned IOCTL_TIOCSETD = TIOCSETD;
+unsigned IOCTL_TIOCGLINED = TIOCGLINED;
+unsigned IOCTL_TIOCSLINED = TIOCSLINED;
+unsigned IOCTL_TIOCSBRK = TIOCSBRK;
+unsigned IOCTL_TIOCCBRK = TIOCCBRK;
+unsigned IOCTL_TIOCSDTR = TIOCSDTR;
+unsigned IOCTL_TIOCCDTR = TIOCCDTR;
+unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
+unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
+unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
+unsigned IOCTL_TIOCSTI = TIOCSTI;
+unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+unsigned IOCTL_TIOCPKT = TIOCPKT;
+unsigned IOCTL_TIOCSTOP = TIOCSTOP;
+unsigned IOCTL_TIOCSTART = TIOCSTART;
+unsigned IOCTL_TIOCMSET = TIOCMSET;
+unsigned IOCTL_TIOCMBIS = TIOCMBIS;
+unsigned IOCTL_TIOCMBIC = TIOCMBIC;
+unsigned IOCTL_TIOCMGET = TIOCMGET;
+unsigned IOCTL_TIOCREMOTE = TIOCREMOTE;
+unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
+unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
+unsigned IOCTL_TIOCUCNTL = TIOCUCNTL;
+unsigned IOCTL_TIOCSTAT = TIOCSTAT;
+unsigned IOCTL_TIOCGSID = TIOCGSID;
+unsigned IOCTL_TIOCCONS = TIOCCONS;
+unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
+unsigned IOCTL_TIOCEXT = TIOCEXT;
+unsigned IOCTL_TIOCSIG = TIOCSIG;
+unsigned IOCTL_TIOCDRAIN = TIOCDRAIN;
+unsigned IOCTL_TIOCGFLAGS = TIOCGFLAGS;
+unsigned IOCTL_TIOCSFLAGS = TIOCSFLAGS;
+unsigned IOCTL_TIOCDCDTIMESTAMP = TIOCDCDTIMESTAMP;
+unsigned IOCTL_TIOCRCVFRAME = TIOCRCVFRAME;
+unsigned IOCTL_TIOCXMTFRAME = TIOCXMTFRAME;
+unsigned IOCTL_TIOCPTMGET = TIOCPTMGET;
+unsigned IOCTL_TIOCGRANTPT = TIOCGRANTPT;
+unsigned IOCTL_TIOCPTSNAME = TIOCPTSNAME;
+unsigned IOCTL_TIOCSQSIZE = TIOCSQSIZE;
+unsigned IOCTL_TIOCGQSIZE = TIOCGQSIZE;
+unsigned IOCTL_VERIEXEC_LOAD = VERIEXEC_LOAD;
+unsigned IOCTL_VERIEXEC_TABLESIZE = VERIEXEC_TABLESIZE;
+unsigned IOCTL_VERIEXEC_DELETE = VERIEXEC_DELETE;
+unsigned IOCTL_VERIEXEC_QUERY = VERIEXEC_QUERY;
+unsigned IOCTL_VERIEXEC_DUMP = VERIEXEC_DUMP;
+unsigned IOCTL_VERIEXEC_FLUSH = VERIEXEC_FLUSH;
+unsigned IOCTL_VIDIOC_QUERYCAP = VIDIOC_QUERYCAP;
+unsigned IOCTL_VIDIOC_RESERVED = VIDIOC_RESERVED;
+unsigned IOCTL_VIDIOC_ENUM_FMT = VIDIOC_ENUM_FMT;
+unsigned IOCTL_VIDIOC_G_FMT = VIDIOC_G_FMT;
+unsigned IOCTL_VIDIOC_S_FMT = VIDIOC_S_FMT;
+unsigned IOCTL_VIDIOC_REQBUFS = VIDIOC_REQBUFS;
+unsigned IOCTL_VIDIOC_QUERYBUF = VIDIOC_QUERYBUF;
+unsigned IOCTL_VIDIOC_G_FBUF = VIDIOC_G_FBUF;
+unsigned IOCTL_VIDIOC_S_FBUF = VIDIOC_S_FBUF;
+unsigned IOCTL_VIDIOC_OVERLAY = VIDIOC_OVERLAY;
+unsigned IOCTL_VIDIOC_QBUF = VIDIOC_QBUF;
+unsigned IOCTL_VIDIOC_DQBUF = VIDIOC_DQBUF;
+unsigned IOCTL_VIDIOC_STREAMON = VIDIOC_STREAMON;
+unsigned IOCTL_VIDIOC_STREAMOFF = VIDIOC_STREAMOFF;
+unsigned IOCTL_VIDIOC_G_PARM = VIDIOC_G_PARM;
+unsigned IOCTL_VIDIOC_S_PARM = VIDIOC_S_PARM;
+unsigned IOCTL_VIDIOC_G_STD = VIDIOC_G_STD;
+unsigned IOCTL_VIDIOC_S_STD = VIDIOC_S_STD;
+unsigned IOCTL_VIDIOC_ENUMSTD = VIDIOC_ENUMSTD;
+unsigned IOCTL_VIDIOC_ENUMINPUT = VIDIOC_ENUMINPUT;
+unsigned IOCTL_VIDIOC_G_CTRL = VIDIOC_G_CTRL;
+unsigned IOCTL_VIDIOC_S_CTRL = VIDIOC_S_CTRL;
+unsigned IOCTL_VIDIOC_G_TUNER = VIDIOC_G_TUNER;
+unsigned IOCTL_VIDIOC_S_TUNER = VIDIOC_S_TUNER;
+unsigned IOCTL_VIDIOC_G_AUDIO = VIDIOC_G_AUDIO;
+unsigned IOCTL_VIDIOC_S_AUDIO = VIDIOC_S_AUDIO;
+unsigned IOCTL_VIDIOC_QUERYCTRL = VIDIOC_QUERYCTRL;
+unsigned IOCTL_VIDIOC_QUERYMENU = VIDIOC_QUERYMENU;
+unsigned IOCTL_VIDIOC_G_INPUT = VIDIOC_G_INPUT;
+unsigned IOCTL_VIDIOC_S_INPUT = VIDIOC_S_INPUT;
+unsigned IOCTL_VIDIOC_G_OUTPUT = VIDIOC_G_OUTPUT;
+unsigned IOCTL_VIDIOC_S_OUTPUT = VIDIOC_S_OUTPUT;
+unsigned IOCTL_VIDIOC_ENUMOUTPUT = VIDIOC_ENUMOUTPUT;
+unsigned IOCTL_VIDIOC_G_AUDOUT = VIDIOC_G_AUDOUT;
+unsigned IOCTL_VIDIOC_S_AUDOUT = VIDIOC_S_AUDOUT;
+unsigned IOCTL_VIDIOC_G_MODULATOR = VIDIOC_G_MODULATOR;
+unsigned IOCTL_VIDIOC_S_MODULATOR = VIDIOC_S_MODULATOR;
+unsigned IOCTL_VIDIOC_G_FREQUENCY = VIDIOC_G_FREQUENCY;
+unsigned IOCTL_VIDIOC_S_FREQUENCY = VIDIOC_S_FREQUENCY;
+unsigned IOCTL_VIDIOC_CROPCAP = VIDIOC_CROPCAP;
+unsigned IOCTL_VIDIOC_G_CROP = VIDIOC_G_CROP;
+unsigned IOCTL_VIDIOC_S_CROP = VIDIOC_S_CROP;
+unsigned IOCTL_VIDIOC_G_JPEGCOMP = VIDIOC_G_JPEGCOMP;
+unsigned IOCTL_VIDIOC_S_JPEGCOMP = VIDIOC_S_JPEGCOMP;
+unsigned IOCTL_VIDIOC_QUERYSTD = VIDIOC_QUERYSTD;
+unsigned IOCTL_VIDIOC_TRY_FMT = VIDIOC_TRY_FMT;
+unsigned IOCTL_VIDIOC_ENUMAUDIO = VIDIOC_ENUMAUDIO;
+unsigned IOCTL_VIDIOC_ENUMAUDOUT = VIDIOC_ENUMAUDOUT;
+unsigned IOCTL_VIDIOC_G_PRIORITY = VIDIOC_G_PRIORITY;
+unsigned IOCTL_VIDIOC_S_PRIORITY = VIDIOC_S_PRIORITY;
+unsigned IOCTL_VIDIOC_ENUM_FRAMESIZES = VIDIOC_ENUM_FRAMESIZES;
+unsigned IOCTL_VIDIOC_ENUM_FRAMEINTERVALS = VIDIOC_ENUM_FRAMEINTERVALS;
+unsigned IOCTL_WDOGIOC_GMODE = WDOGIOC_GMODE;
+unsigned IOCTL_WDOGIOC_SMODE = WDOGIOC_SMODE;
+unsigned IOCTL_WDOGIOC_WHICH = WDOGIOC_WHICH;
+unsigned IOCTL_WDOGIOC_TICKLE = WDOGIOC_TICKLE;
+unsigned IOCTL_WDOGIOC_GTICKLER = WDOGIOC_GTICKLER;
+unsigned IOCTL_WDOGIOC_GWDOGS = WDOGIOC_GWDOGS;
+unsigned IOCTL_KCOV_IOC_SETBUFSIZE = KCOV_IOC_SETBUFSIZE;
+unsigned IOCTL_KCOV_IOC_ENABLE = KCOV_IOC_ENABLE;
+unsigned IOCTL_KCOV_IOC_DISABLE = KCOV_IOC_DISABLE;
+unsigned IOCTL_IPMICTL_RECEIVE_MSG_TRUNC = IPMICTL_RECEIVE_MSG_TRUNC;
+unsigned IOCTL_IPMICTL_RECEIVE_MSG = IPMICTL_RECEIVE_MSG;
+unsigned IOCTL_IPMICTL_SEND_COMMAND = IPMICTL_SEND_COMMAND;
+unsigned IOCTL_IPMICTL_REGISTER_FOR_CMD = IPMICTL_REGISTER_FOR_CMD;
+unsigned IOCTL_IPMICTL_UNREGISTER_FOR_CMD = IPMICTL_UNREGISTER_FOR_CMD;
+unsigned IOCTL_IPMICTL_SET_GETS_EVENTS_CMD = IPMICTL_SET_GETS_EVENTS_CMD;
+unsigned IOCTL_IPMICTL_SET_MY_ADDRESS_CMD = IPMICTL_SET_MY_ADDRESS_CMD;
+unsigned IOCTL_IPMICTL_GET_MY_ADDRESS_CMD = IPMICTL_GET_MY_ADDRESS_CMD;
+unsigned IOCTL_IPMICTL_SET_MY_LUN_CMD = IPMICTL_SET_MY_LUN_CMD;
+unsigned IOCTL_IPMICTL_GET_MY_LUN_CMD = IPMICTL_GET_MY_LUN_CMD;
+unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET;
+unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC;
+unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED;
+unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE;
+unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO;
+unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
+unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT;
+unsigned IOCTL_SOUND_PCM_READ_BITS = SOUND_PCM_READ_BITS;
+unsigned IOCTL_SNDCTL_DSP_CHANNELS = SNDCTL_DSP_CHANNELS;
+unsigned IOCTL_SOUND_PCM_READ_CHANNELS = SOUND_PCM_READ_CHANNELS;
+unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;
+unsigned IOCTL_SOUND_PCM_READ_FILTER = SOUND_PCM_READ_FILTER;
+unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST;
+unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE;
+unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT;
+unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
+unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
+unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
+unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
+unsigned IOCTL_SNDCTL_DSP_GETCAPS = SNDCTL_DSP_GETCAPS;
+unsigned IOCTL_SNDCTL_DSP_GETTRIGGER = SNDCTL_DSP_GETTRIGGER;
+unsigned IOCTL_SNDCTL_DSP_SETTRIGGER = SNDCTL_DSP_SETTRIGGER;
+unsigned IOCTL_SNDCTL_DSP_GETIPTR = SNDCTL_DSP_GETIPTR;
+unsigned IOCTL_SNDCTL_DSP_GETOPTR = SNDCTL_DSP_GETOPTR;
+unsigned IOCTL_SNDCTL_DSP_MAPINBUF = SNDCTL_DSP_MAPINBUF;
+unsigned IOCTL_SNDCTL_DSP_MAPOUTBUF = SNDCTL_DSP_MAPOUTBUF;
+unsigned IOCTL_SNDCTL_DSP_SETSYNCRO = SNDCTL_DSP_SETSYNCRO;
+unsigned IOCTL_SNDCTL_DSP_SETDUPLEX = SNDCTL_DSP_SETDUPLEX;
+unsigned IOCTL_SNDCTL_DSP_PROFILE = SNDCTL_DSP_PROFILE;
+unsigned IOCTL_SNDCTL_DSP_GETODELAY = SNDCTL_DSP_GETODELAY;
+unsigned IOCTL_SOUND_MIXER_INFO = SOUND_MIXER_INFO;
+unsigned IOCTL_SOUND_OLD_MIXER_INFO = SOUND_OLD_MIXER_INFO;
+unsigned IOCTL_OSS_GETVERSION = OSS_GETVERSION;
+unsigned IOCTL_SNDCTL_SYSINFO = SNDCTL_SYSINFO;
+unsigned IOCTL_SNDCTL_AUDIOINFO = SNDCTL_AUDIOINFO;
+unsigned IOCTL_SNDCTL_ENGINEINFO = SNDCTL_ENGINEINFO;
+unsigned IOCTL_SNDCTL_DSP_GETPLAYVOL = SNDCTL_DSP_GETPLAYVOL;
+unsigned IOCTL_SNDCTL_DSP_SETPLAYVOL = SNDCTL_DSP_SETPLAYVOL;
+unsigned IOCTL_SNDCTL_DSP_GETRECVOL = SNDCTL_DSP_GETRECVOL;
+unsigned IOCTL_SNDCTL_DSP_SETRECVOL = SNDCTL_DSP_SETRECVOL;
+unsigned IOCTL_SNDCTL_DSP_SKIP = SNDCTL_DSP_SKIP;
+unsigned IOCTL_SNDCTL_DSP_SILENCE = SNDCTL_DSP_SILENCE;
+
+const int si_SEGV_MAPERR = SEGV_MAPERR;
+const int si_SEGV_ACCERR = SEGV_ACCERR;
+
+const int modctl_load = MODCTL_LOAD;
+const int modctl_unload = MODCTL_UNLOAD;
+const int modctl_stat = MODCTL_STAT;
+const int modctl_exists = MODCTL_EXISTS;
+
+const unsigned SHA1_CTX_sz = sizeof(SHA1_CTX);
+const unsigned SHA1_return_length = SHA1_DIGEST_STRING_LENGTH;
+
+const unsigned MD4_CTX_sz = sizeof(MD4_CTX);
+const unsigned MD4_return_length = MD4_DIGEST_STRING_LENGTH;
+
+const unsigned RMD160_CTX_sz = sizeof(RMD160_CTX);
+const unsigned RMD160_return_length = RMD160_DIGEST_STRING_LENGTH;
+
+const unsigned MD5_CTX_sz = sizeof(MD5_CTX);
+const unsigned MD5_return_length = MD5_DIGEST_STRING_LENGTH;
+
+const unsigned fpos_t_sz = sizeof(fpos_t);
+
+const unsigned MD2_CTX_sz = sizeof(MD2_CTX);
+const unsigned MD2_return_length = MD2_DIGEST_STRING_LENGTH;
+
+#define SHA2_CONST(LEN) \
+ const unsigned SHA##LEN##_CTX_sz = sizeof(SHA##LEN##_CTX); \
+ const unsigned SHA##LEN##_return_length = SHA##LEN##_DIGEST_STRING_LENGTH; \
+ const unsigned SHA##LEN##_block_length = SHA##LEN##_BLOCK_LENGTH; \
+ const unsigned SHA##LEN##_digest_length = SHA##LEN##_DIGEST_LENGTH
+
+SHA2_CONST(224);
+SHA2_CONST(256);
+SHA2_CONST(384);
+SHA2_CONST(512);
+
+#undef SHA2_CONST
+
+const int unvis_valid = UNVIS_VALID;
+const int unvis_validpush = UNVIS_VALIDPUSH;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+// There are more undocumented fields in dl_phdr_info that we are not interested
+// in.
+COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_fileno);
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+
+CHECK_TYPE_SIZE(wordexp_t);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
+
+COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
+CHECK_SIZE_AND_OFFSET(FILE, _p);
+CHECK_SIZE_AND_OFFSET(FILE, _r);
+CHECK_SIZE_AND_OFFSET(FILE, _w);
+CHECK_SIZE_AND_OFFSET(FILE, _flags);
+CHECK_SIZE_AND_OFFSET(FILE, _file);
+CHECK_SIZE_AND_OFFSET(FILE, _bf);
+CHECK_SIZE_AND_OFFSET(FILE, _lbfsize);
+CHECK_SIZE_AND_OFFSET(FILE, _cookie);
+CHECK_SIZE_AND_OFFSET(FILE, _close);
+CHECK_SIZE_AND_OFFSET(FILE, _read);
+CHECK_SIZE_AND_OFFSET(FILE, _seek);
+CHECK_SIZE_AND_OFFSET(FILE, _write);
+CHECK_SIZE_AND_OFFSET(FILE, _ext);
+CHECK_SIZE_AND_OFFSET(FILE, _up);
+CHECK_SIZE_AND_OFFSET(FILE, _ur);
+CHECK_SIZE_AND_OFFSET(FILE, _ubuf);
+CHECK_SIZE_AND_OFFSET(FILE, _nbuf);
+CHECK_SIZE_AND_OFFSET(FILE, _flush);
+CHECK_SIZE_AND_OFFSET(FILE, _lb_unused);
+CHECK_SIZE_AND_OFFSET(FILE, _blksize);
+CHECK_SIZE_AND_OFFSET(FILE, _offset);
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
+CHECK_SIZE_AND_OFFSET(tm, tm_zone);
+
+CHECK_TYPE_SIZE(ether_addr);
+
+CHECK_TYPE_SIZE(ipc_perm);
+CHECK_SIZE_AND_OFFSET(ipc_perm, _key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, _seq);
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+
+CHECK_TYPE_SIZE(clock_t);
+
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+CHECK_TYPE_SIZE(modctl_load_t);
+CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_filename);
+CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_flags);
+CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_props);
+CHECK_SIZE_AND_OFFSET(modctl_load_t, ml_propslen);
+
+#endif // SANITIZER_NETBSD
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_netbsd.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h (revision 351984)
@@ -0,0 +1,2405 @@
+//===-- sanitizer_platform_limits_netbsd.h --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific NetBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_NETBSD_H
+#define SANITIZER_PLATFORM_LIMITS_NETBSD_H
+
+#if SANITIZER_NETBSD
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+#define _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, shift) \
+ ((link_map *)((handle) == nullptr ? nullptr : ((char *)(handle) + (shift))))
+
+#if defined(__x86_64__)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 264)
+#elif defined(__i386__)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 136)
+#endif
+
+namespace __sanitizer {
+extern unsigned struct_utsname_sz;
+extern unsigned struct_stat_sz;
+extern unsigned struct_rusage_sz;
+extern unsigned siginfo_t_sz;
+extern unsigned struct_itimerval_sz;
+extern unsigned pthread_t_sz;
+extern unsigned pthread_mutex_t_sz;
+extern unsigned pthread_cond_t_sz;
+extern unsigned pid_t_sz;
+extern unsigned timeval_sz;
+extern unsigned uid_t_sz;
+extern unsigned gid_t_sz;
+extern unsigned mbstate_t_sz;
+extern unsigned struct_timezone_sz;
+extern unsigned struct_tms_sz;
+extern unsigned struct_itimerspec_sz;
+extern unsigned struct_sigevent_sz;
+extern unsigned struct_sched_param_sz;
+extern unsigned struct_statfs_sz;
+extern unsigned struct_sockaddr_sz;
+extern unsigned ucontext_t_sz;
+
+extern unsigned struct_rlimit_sz;
+extern unsigned struct_utimbuf_sz;
+extern unsigned struct_timespec_sz;
+extern unsigned struct_sembuf_sz;
+
+extern unsigned struct_kevent_sz;
+extern unsigned struct_FTS_sz;
+extern unsigned struct_FTSENT_sz;
+
+extern unsigned struct_regex_sz;
+extern unsigned struct_regmatch_sz;
+
+extern unsigned struct_fstab_sz;
+
+struct __sanitizer_regmatch {
+ OFF_T rm_so;
+ OFF_T rm_eo;
+};
+
+typedef struct __sanitizer_modctl_load {
+ const char *ml_filename;
+ int ml_flags;
+ const char *ml_props;
+ uptr ml_propslen;
+} __sanitizer_modctl_load_t;
+extern const int modctl_load;
+extern const int modctl_unload;
+extern const int modctl_stat;
+extern const int modctl_exists;
+
+union __sanitizer_sigval {
+ int sival_int;
+ uptr sival_ptr;
+};
+
+struct __sanitizer_sigevent {
+ int sigev_notify;
+ int sigev_signo;
+ union __sanitizer_sigval sigev_value;
+ uptr sigev_notify_function;
+ uptr sigev_notify_attributes;
+};
+
+struct __sanitizer_aiocb {
+ u64 aio_offset;
+ uptr aio_buf;
+ uptr aio_nbytes;
+ int aio_fildes;
+ int aio_lio_opcode;
+ int aio_reqprio;
+ struct __sanitizer_sigevent aio_sigevent;
+ int _state;
+ int _errno;
+ long _retval;
+};
+
+struct __sanitizer_sem_t {
+ uptr data[5];
+};
+
+struct __sanitizer_ipc_perm {
+ u32 uid;
+ u32 gid;
+ u32 cuid;
+ u32 cgid;
+ u32 mode;
+ unsigned short _seq;
+ long _key;
+};
+
+struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz;
+ u32 shm_lpid;
+ u32 shm_cpid;
+ unsigned int shm_nattch;
+ u64 shm_atime;
+ u64 shm_dtime;
+ u64 shm_ctime;
+ void *_shm_internal;
+};
+
+struct __sanitizer_protoent {
+ char *p_name;
+ char **p_aliases;
+ int p_proto;
+};
+
+struct __sanitizer_netent {
+ char *n_name;
+ char **n_aliases;
+ int n_addrtype;
+ u32 n_net;
+};
+
+extern unsigned struct_msqid_ds_sz;
+extern unsigned struct_mq_attr_sz;
+extern unsigned struct_timex_sz;
+extern unsigned struct_statvfs_sz;
+
+struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+};
+
+struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+ unsigned int ifa_addrflags;
+};
+
+typedef unsigned int __sanitizer_socklen_t;
+
+typedef unsigned __sanitizer_pthread_key_t;
+
+typedef long long __sanitizer_time_t;
+typedef int __sanitizer_suseconds_t;
+
+struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+};
+
+struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+};
+
+struct __sanitizer_timespec {
+ __sanitizer_time_t tv_sec;
+ long tv_nsec;
+};
+
+struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+ __sanitizer_time_t pw_change;
+ char *pw_class;
+ char *pw_gecos;
+ char *pw_dir;
+ char *pw_shell;
+ __sanitizer_time_t pw_expire;
+};
+
+struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+};
+
+struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+};
+
+struct __sanitizer_ether_addr {
+ u8 octet[6];
+};
+
+struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
+ const char *tm_zone;
+};
+
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+};
+
+struct __sanitizer_mmsghdr {
+ struct __sanitizer_msghdr msg_hdr;
+ unsigned int msg_len;
+};
+
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+struct __sanitizer_dirent {
+ u64 d_fileno;
+ u16 d_reclen;
+ // more fields that we don't care about
+};
+
+typedef int __sanitizer_clock_t;
+typedef int __sanitizer_clockid_t;
+
+typedef u32 __sanitizer___kernel_uid_t;
+typedef u32 __sanitizer___kernel_gid_t;
+typedef u64 __sanitizer___kernel_off_t;
+typedef struct {
+ u32 fds_bits[8];
+} __sanitizer___kernel_fd_set;
+
+typedef struct {
+ unsigned int pta_magic;
+ int pta_flags;
+ void *pta_private;
+} __sanitizer_pthread_attr_t;
+
+struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+};
+
+struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+};
+
+using __sanitizer_sighandler_ptr = void (*)(int sig);
+using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
+ __sanitizer_siginfo *siginfo,
+ void *uctx);
+
+struct __sanitizer_sigaction {
+ union {
+ __sanitizer_sighandler_ptr handler;
+ __sanitizer_sigactionhandler_ptr sigaction;
+ };
+ __sanitizer_sigset_t sa_mask;
+ int sa_flags;
+};
+
+extern unsigned struct_sigaltstack_sz;
+
+typedef unsigned int __sanitizer_sigset13_t;
+
+struct __sanitizer_sigaction13 {
+ __sanitizer_sighandler_ptr osa_handler;
+ __sanitizer_sigset13_t osa_mask;
+ int osa_flags;
+};
+
+struct __sanitizer_sigaltstack {
+ void *ss_sp;
+ uptr ss_size;
+ int ss_flags;
+};
+
+typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+
+struct __sanitizer_kernel_sigaction_t {
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
+ };
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ __sanitizer_kernel_sigset_t sa_mask;
+};
+
+extern const uptr sig_ign;
+extern const uptr sig_dfl;
+extern const uptr sig_err;
+extern const uptr sa_siginfo;
+
+extern int af_inet;
+extern int af_inet6;
+uptr __sanitizer_in_addr_sz(int af);
+
+struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+};
+
+extern unsigned struct_ElfW_Phdr_sz;
+
+struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+ unsigned ai_addrlen;
+ char *ai_canonname;
+ void *ai_addr;
+ struct __sanitizer_addrinfo *ai_next;
+};
+
+struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+};
+
+struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+typedef unsigned __sanitizer_nfds_t;
+
+typedef int __sanitizer_lwpid_t;
+
+struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ uptr gl_matchc;
+ uptr gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ int (*gl_errfunc)(const char *, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char *);
+ int (*gl_lstat)(const char *, void * /* struct stat* */);
+ int (*gl_stat)(const char *, void * /* struct stat* */);
+};
+
+extern int glob_nomatch;
+extern int glob_altdirfunc;
+
+extern unsigned path_max;
+
+extern int struct_ttyent_sz;
+
+extern int ptrace_pt_io;
+extern int ptrace_pt_lwpinfo;
+extern int ptrace_pt_set_event_mask;
+extern int ptrace_pt_get_event_mask;
+extern int ptrace_pt_get_process_state;
+extern int ptrace_pt_set_siginfo;
+extern int ptrace_pt_get_siginfo;
+extern int ptrace_piod_read_d;
+extern int ptrace_piod_write_d;
+extern int ptrace_piod_read_i;
+extern int ptrace_piod_write_i;
+extern int ptrace_piod_read_auxv;
+extern int ptrace_pt_setregs;
+extern int ptrace_pt_getregs;
+extern int ptrace_pt_setfpregs;
+extern int ptrace_pt_getfpregs;
+extern int ptrace_pt_setdbregs;
+extern int ptrace_pt_getdbregs;
+
+struct __sanitizer_ptrace_io_desc {
+ int piod_op;
+ void *piod_offs;
+ void *piod_addr;
+ uptr piod_len;
+};
+
+struct __sanitizer_ptrace_lwpinfo {
+ __sanitizer_lwpid_t pl_lwpid;
+ int pl_event;
+};
+
+extern unsigned struct_ptrace_ptrace_io_desc_struct_sz;
+extern unsigned struct_ptrace_ptrace_lwpinfo_struct_sz;
+extern unsigned struct_ptrace_ptrace_event_struct_sz;
+extern unsigned struct_ptrace_ptrace_siginfo_struct_sz;
+
+extern unsigned struct_ptrace_reg_struct_sz;
+extern unsigned struct_ptrace_fpreg_struct_sz;
+extern unsigned struct_ptrace_dbreg_struct_sz;
+
+struct __sanitizer_wordexp_t {
+ uptr we_wordc;
+ char **we_wordv;
+ uptr we_offs;
+ char *we_strings;
+ uptr we_nbytes;
+};
+
+struct __sanitizer_FILE {
+ unsigned char *_p;
+ int _r;
+ int _w;
+ unsigned short _flags;
+ short _file;
+ struct {
+ unsigned char *_base;
+ int _size;
+ } _bf;
+ int _lbfsize;
+ void *_cookie;
+ int (*_close)(void *ptr);
+ u64 (*_read)(void *, void *, uptr);
+ u64 (*_seek)(void *, u64, int);
+ uptr (*_write)(void *, const void *, uptr);
+ struct {
+ unsigned char *_base;
+ int _size;
+ } _ext;
+ unsigned char *_up;
+ int _ur;
+ unsigned char _ubuf[3];
+ unsigned char _nbuf[1];
+ int (*_flush)(void *ptr);
+ char _lb_unused[sizeof(uptr)];
+ int _blksize;
+ u64 _offset;
+};
+#define SANITIZER_HAS_STRUCT_FILE 1
+
+extern int shmctl_ipc_stat;
+
+// This simplifies generic code
+#define struct_shminfo_sz -1
+#define struct_shm_info_sz -1
+#define shmctl_shm_stat -1
+#define shmctl_ipc_info -1
+#define shmctl_shm_info -1
+
+extern unsigned struct_utmp_sz;
+extern unsigned struct_utmpx_sz;
+
+extern int map_fixed;
+
+// ioctl arguments
+struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+};
+
+struct __sanitizer_ttyent {
+ char *ty_name;
+ char *ty_getty;
+ char *ty_type;
+ int ty_status;
+ char *ty_window;
+ char *ty_comment;
+ char *ty_class;
+};
+
+extern const unsigned long __sanitizer_bufsiz;
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#undef IOC_DIRMASK
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+
+// ioctl request identifiers
+
+extern unsigned struct_altqreq_sz;
+extern unsigned struct_amr_user_ioctl_sz;
+extern unsigned struct_ap_control_sz;
+extern unsigned struct_apm_ctl_sz;
+extern unsigned struct_apm_event_info_sz;
+extern unsigned struct_apm_power_info_sz;
+extern unsigned struct_atabusiodetach_args_sz;
+extern unsigned struct_atabusioscan_args_sz;
+extern unsigned struct_ath_diag_sz;
+extern unsigned struct_atm_flowmap_sz;
+extern unsigned struct_audio_buf_info_sz;
+extern unsigned struct_audio_device_sz;
+extern unsigned struct_audio_encoding_sz;
+extern unsigned struct_audio_info_sz;
+extern unsigned struct_audio_offset_sz;
+extern unsigned struct_bio_locate_sz;
+extern unsigned struct_bioc_alarm_sz;
+extern unsigned struct_bioc_blink_sz;
+extern unsigned struct_bioc_disk_sz;
+extern unsigned struct_bioc_inq_sz;
+extern unsigned struct_bioc_setstate_sz;
+extern unsigned struct_bioc_vol_sz;
+extern unsigned struct_bioc_volops_sz;
+extern unsigned struct_bktr_chnlset_sz;
+extern unsigned struct_bktr_remote_sz;
+extern unsigned struct_blue_conf_sz;
+extern unsigned struct_blue_interface_sz;
+extern unsigned struct_blue_stats_sz;
+extern unsigned struct_bpf_dltlist_sz;
+extern unsigned struct_bpf_program_sz;
+extern unsigned struct_bpf_stat_old_sz;
+extern unsigned struct_bpf_stat_sz;
+extern unsigned struct_bpf_version_sz;
+extern unsigned struct_btreq_sz;
+extern unsigned struct_btsco_info_sz;
+extern unsigned struct_buffmem_desc_sz;
+extern unsigned struct_cbq_add_class_sz;
+extern unsigned struct_cbq_add_filter_sz;
+extern unsigned struct_cbq_delete_class_sz;
+extern unsigned struct_cbq_delete_filter_sz;
+extern unsigned struct_cbq_getstats_sz;
+extern unsigned struct_cbq_interface_sz;
+extern unsigned struct_cbq_modify_class_sz;
+extern unsigned struct_ccd_ioctl_sz;
+extern unsigned struct_cdnr_add_element_sz;
+extern unsigned struct_cdnr_add_filter_sz;
+extern unsigned struct_cdnr_add_tbmeter_sz;
+extern unsigned struct_cdnr_add_trtcm_sz;
+extern unsigned struct_cdnr_add_tswtcm_sz;
+extern unsigned struct_cdnr_delete_element_sz;
+extern unsigned struct_cdnr_delete_filter_sz;
+extern unsigned struct_cdnr_get_stats_sz;
+extern unsigned struct_cdnr_interface_sz;
+extern unsigned struct_cdnr_modify_tbmeter_sz;
+extern unsigned struct_cdnr_modify_trtcm_sz;
+extern unsigned struct_cdnr_modify_tswtcm_sz;
+extern unsigned struct_cdnr_tbmeter_stats_sz;
+extern unsigned struct_cdnr_tcm_stats_sz;
+extern unsigned struct_cgd_ioctl_sz;
+extern unsigned struct_cgd_user_sz;
+extern unsigned struct_changer_element_status_request_sz;
+extern unsigned struct_changer_exchange_request_sz;
+extern unsigned struct_changer_move_request_sz;
+extern unsigned struct_changer_params_sz;
+extern unsigned struct_changer_position_request_sz;
+extern unsigned struct_changer_set_voltag_request_sz;
+extern unsigned struct_clockctl_adjtime_sz;
+extern unsigned struct_clockctl_clock_settime_sz;
+extern unsigned struct_clockctl_ntp_adjtime_sz;
+extern unsigned struct_clockctl_settimeofday_sz;
+extern unsigned struct_cnwistats_sz;
+extern unsigned struct_cnwitrail_sz;
+extern unsigned struct_cnwstatus_sz;
+extern unsigned struct_count_info_sz;
+extern unsigned struct_cpu_ucode_sz;
+extern unsigned struct_cpu_ucode_version_sz;
+extern unsigned struct_crypt_kop_sz;
+extern unsigned struct_crypt_mkop_sz;
+extern unsigned struct_crypt_mop_sz;
+extern unsigned struct_crypt_op_sz;
+extern unsigned struct_crypt_result_sz;
+extern unsigned struct_crypt_sfop_sz;
+extern unsigned struct_crypt_sgop_sz;
+extern unsigned struct_cryptret_sz;
+extern unsigned struct_devdetachargs_sz;
+extern unsigned struct_devlistargs_sz;
+extern unsigned struct_devpmargs_sz;
+extern unsigned struct_devrescanargs_sz;
+extern unsigned struct_disk_badsecinfo_sz;
+extern unsigned struct_disk_strategy_sz;
+extern unsigned struct_disklabel_sz;
+extern unsigned struct_dkbad_sz;
+extern unsigned struct_dkwedge_info_sz;
+extern unsigned struct_dkwedge_list_sz;
+extern unsigned struct_dmio_setfunc_sz;
+extern unsigned struct_dmx_pes_filter_params_sz;
+extern unsigned struct_dmx_sct_filter_params_sz;
+extern unsigned struct_dmx_stc_sz;
+extern unsigned struct_dvb_diseqc_master_cmd_sz;
+extern unsigned struct_dvb_diseqc_slave_reply_sz;
+extern unsigned struct_dvb_frontend_event_sz;
+extern unsigned struct_dvb_frontend_info_sz;
+extern unsigned struct_dvb_frontend_parameters_sz;
+extern unsigned struct_eccapreq_sz;
+extern unsigned struct_fbcmap_sz;
+extern unsigned struct_fbcurpos_sz;
+extern unsigned struct_fbcursor_sz;
+extern unsigned struct_fbgattr_sz;
+extern unsigned struct_fbsattr_sz;
+extern unsigned struct_fbtype_sz;
+extern unsigned struct_fdformat_cmd_sz;
+extern unsigned struct_fdformat_parms_sz;
+extern unsigned struct_fifoq_conf_sz;
+extern unsigned struct_fifoq_getstats_sz;
+extern unsigned struct_fifoq_interface_sz;
+extern unsigned struct_format_op_sz;
+extern unsigned struct_fss_get_sz;
+extern unsigned struct_fss_set_sz;
+extern unsigned struct_gpio_attach_sz;
+extern unsigned struct_gpio_info_sz;
+extern unsigned struct_gpio_req_sz;
+extern unsigned struct_gpio_set_sz;
+extern unsigned struct_hfsc_add_class_sz;
+extern unsigned struct_hfsc_add_filter_sz;
+extern unsigned struct_hfsc_attach_sz;
+extern unsigned struct_hfsc_class_stats_sz;
+extern unsigned struct_hfsc_delete_class_sz;
+extern unsigned struct_hfsc_delete_filter_sz;
+extern unsigned struct_hfsc_interface_sz;
+extern unsigned struct_hfsc_modify_class_sz;
+extern unsigned struct_hpcfb_dsp_op_sz;
+extern unsigned struct_hpcfb_dspconf_sz;
+extern unsigned struct_hpcfb_fbconf_sz;
+extern unsigned struct_if_addrprefreq_sz;
+extern unsigned struct_if_clonereq_sz;
+extern unsigned struct_if_laddrreq_sz;
+extern unsigned struct_ifaddr_sz;
+extern unsigned struct_ifaliasreq_sz;
+extern unsigned struct_ifcapreq_sz;
+extern unsigned struct_ifconf_sz;
+extern unsigned struct_ifdatareq_sz;
+extern unsigned struct_ifdrv_sz;
+extern unsigned struct_ifmediareq_sz;
+extern unsigned struct_ifpppcstatsreq_sz;
+extern unsigned struct_ifpppstatsreq_sz;
+extern unsigned struct_ifreq_sz;
+extern unsigned struct_in6_addrpolicy_sz;
+extern unsigned struct_in6_ndireq_sz;
+extern unsigned struct_ioc_load_unload_sz;
+extern unsigned struct_ioc_patch_sz;
+extern unsigned struct_ioc_play_blocks_sz;
+extern unsigned struct_ioc_play_msf_sz;
+extern unsigned struct_ioc_play_track_sz;
+extern unsigned struct_ioc_read_subchannel_sz;
+extern unsigned struct_ioc_read_toc_entry_sz;
+extern unsigned struct_ioc_toc_header_sz;
+extern unsigned struct_ioc_vol_sz;
+extern unsigned struct_ioctl_pt_sz;
+extern unsigned struct_ioppt_sz;
+extern unsigned struct_iovec_sz;
+extern unsigned struct_ipfobj_sz;
+extern unsigned struct_irda_params_sz;
+extern unsigned struct_isp_fc_device_sz;
+extern unsigned struct_isp_fc_tsk_mgmt_sz;
+extern unsigned struct_isp_hba_device_sz;
+extern unsigned struct_isv_cmd_sz;
+extern unsigned struct_jobs_add_class_sz;
+extern unsigned struct_jobs_add_filter_sz;
+extern unsigned struct_jobs_attach_sz;
+extern unsigned struct_jobs_class_stats_sz;
+extern unsigned struct_jobs_delete_class_sz;
+extern unsigned struct_jobs_delete_filter_sz;
+extern unsigned struct_jobs_interface_sz;
+extern unsigned struct_jobs_modify_class_sz;
+extern unsigned struct_kbentry_sz;
+extern unsigned struct_kfilter_mapping_sz;
+extern unsigned struct_kiockeymap_sz;
+extern unsigned struct_ksyms_gsymbol_sz;
+extern unsigned struct_ksyms_gvalue_sz;
+extern unsigned struct_ksyms_ogsymbol_sz;
+extern unsigned struct_kttcp_io_args_sz;
+extern unsigned struct_ltchars_sz;
+extern unsigned struct_lua_create_sz;
+extern unsigned struct_lua_info_sz;
+extern unsigned struct_lua_load_sz;
+extern unsigned struct_lua_require_sz;
+extern unsigned struct_mbpp_param_sz;
+extern unsigned struct_md_conf_sz;
+extern unsigned struct_meteor_capframe_sz;
+extern unsigned struct_meteor_counts_sz;
+extern unsigned struct_meteor_geomet_sz;
+extern unsigned struct_meteor_pixfmt_sz;
+extern unsigned struct_meteor_video_sz;
+extern unsigned struct_mlx_cinfo_sz;
+extern unsigned struct_mlx_pause_sz;
+extern unsigned struct_mlx_rebuild_request_sz;
+extern unsigned struct_mlx_rebuild_status_sz;
+extern unsigned struct_mlx_usercommand_sz;
+extern unsigned struct_mly_user_command_sz;
+extern unsigned struct_mly_user_health_sz;
+extern unsigned struct_mtget_sz;
+extern unsigned struct_mtop_sz;
+extern unsigned struct_npf_ioctl_table_sz;
+extern unsigned struct_npioctl_sz;
+extern unsigned struct_nvme_pt_command_sz;
+extern unsigned struct_ochanger_element_status_request_sz;
+extern unsigned struct_ofiocdesc_sz;
+extern unsigned struct_okiockey_sz;
+extern unsigned struct_ortentry_sz;
+extern unsigned struct_oscsi_addr_sz;
+extern unsigned struct_oss_audioinfo_sz;
+extern unsigned struct_oss_sysinfo_sz;
+extern unsigned struct_pciio_bdf_cfgreg_sz;
+extern unsigned struct_pciio_businfo_sz;
+extern unsigned struct_pciio_cfgreg_sz;
+extern unsigned struct_pciio_drvname_sz;
+extern unsigned struct_pciio_drvnameonbus_sz;
+extern unsigned struct_pcvtid_sz;
+extern unsigned struct_pf_osfp_ioctl_sz;
+extern unsigned struct_pf_status_sz;
+extern unsigned struct_pfioc_altq_sz;
+extern unsigned struct_pfioc_if_sz;
+extern unsigned struct_pfioc_iface_sz;
+extern unsigned struct_pfioc_limit_sz;
+extern unsigned struct_pfioc_natlook_sz;
+extern unsigned struct_pfioc_pooladdr_sz;
+extern unsigned struct_pfioc_qstats_sz;
+extern unsigned struct_pfioc_rule_sz;
+extern unsigned struct_pfioc_ruleset_sz;
+extern unsigned struct_pfioc_src_node_kill_sz;
+extern unsigned struct_pfioc_src_nodes_sz;
+extern unsigned struct_pfioc_state_kill_sz;
+extern unsigned struct_pfioc_state_sz;
+extern unsigned struct_pfioc_states_sz;
+extern unsigned struct_pfioc_table_sz;
+extern unsigned struct_pfioc_tm_sz;
+extern unsigned struct_pfioc_trans_sz;
+extern unsigned struct_plistref_sz;
+extern unsigned struct_power_type_sz;
+extern unsigned struct_ppp_idle_sz;
+extern unsigned struct_ppp_option_data_sz;
+extern unsigned struct_ppp_rawin_sz;
+extern unsigned struct_pppoeconnectionstate_sz;
+extern unsigned struct_pppoediscparms_sz;
+extern unsigned struct_priq_add_class_sz;
+extern unsigned struct_priq_add_filter_sz;
+extern unsigned struct_priq_class_stats_sz;
+extern unsigned struct_priq_delete_class_sz;
+extern unsigned struct_priq_delete_filter_sz;
+extern unsigned struct_priq_interface_sz;
+extern unsigned struct_priq_modify_class_sz;
+extern unsigned struct_ptmget_sz;
+extern unsigned struct_pvctxreq_sz;
+extern unsigned struct_radio_info_sz;
+extern unsigned struct_red_conf_sz;
+extern unsigned struct_red_interface_sz;
+extern unsigned struct_red_stats_sz;
+extern unsigned struct_redparams_sz;
+extern unsigned struct_rf_pmparams_sz;
+extern unsigned struct_rf_pmstat_sz;
+extern unsigned struct_rf_recon_req_sz;
+extern unsigned struct_rio_conf_sz;
+extern unsigned struct_rio_interface_sz;
+extern unsigned struct_rio_stats_sz;
+extern unsigned struct_scan_io_sz;
+extern unsigned struct_scbusaccel_args_sz;
+extern unsigned struct_scbusiodetach_args_sz;
+extern unsigned struct_scbusioscan_args_sz;
+extern unsigned struct_scsi_addr_sz;
+extern unsigned struct_seq_event_rec_sz;
+extern unsigned struct_session_op_sz;
+extern unsigned struct_sgttyb_sz;
+extern unsigned struct_sioc_sg_req_sz;
+extern unsigned struct_sioc_vif_req_sz;
+extern unsigned struct_smbioc_flags_sz;
+extern unsigned struct_smbioc_lookup_sz;
+extern unsigned struct_smbioc_oshare_sz;
+extern unsigned struct_smbioc_ossn_sz;
+extern unsigned struct_smbioc_rq_sz;
+extern unsigned struct_smbioc_rw_sz;
+extern unsigned struct_spppauthcfg_sz;
+extern unsigned struct_spppauthfailuresettings_sz;
+extern unsigned struct_spppauthfailurestats_sz;
+extern unsigned struct_spppdnsaddrs_sz;
+extern unsigned struct_spppdnssettings_sz;
+extern unsigned struct_spppidletimeout_sz;
+extern unsigned struct_spppkeepalivesettings_sz;
+extern unsigned struct_sppplcpcfg_sz;
+extern unsigned struct_spppstatus_sz;
+extern unsigned struct_spppstatusncp_sz;
+extern unsigned struct_srt_rt_sz;
+extern unsigned struct_stic_xinfo_sz;
+extern unsigned struct_sun_dkctlr_sz;
+extern unsigned struct_sun_dkgeom_sz;
+extern unsigned struct_sun_dkpart_sz;
+extern unsigned struct_synth_info_sz;
+extern unsigned struct_tbrreq_sz;
+extern unsigned struct_tchars_sz;
+extern unsigned struct_termios_sz;
+extern unsigned struct_timeval_sz;
+extern unsigned struct_twe_drivecommand_sz;
+extern unsigned struct_twe_paramcommand_sz;
+extern unsigned struct_twe_usercommand_sz;
+extern unsigned struct_ukyopon_identify_sz;
+extern unsigned struct_urio_command_sz;
+extern unsigned struct_usb_alt_interface_sz;
+extern unsigned struct_usb_bulk_ra_wb_opt_sz;
+extern unsigned struct_usb_config_desc_sz;
+extern unsigned struct_usb_ctl_report_desc_sz;
+extern unsigned struct_usb_ctl_report_sz;
+extern unsigned struct_usb_ctl_request_sz;
+#if defined(__x86_64__)
+extern unsigned struct_nvmm_ioc_capability_sz;
+extern unsigned struct_nvmm_ioc_machine_create_sz;
+extern unsigned struct_nvmm_ioc_machine_destroy_sz;
+extern unsigned struct_nvmm_ioc_machine_configure_sz;
+extern unsigned struct_nvmm_ioc_vcpu_create_sz;
+extern unsigned struct_nvmm_ioc_vcpu_destroy_sz;
+extern unsigned struct_nvmm_ioc_vcpu_setstate_sz;
+extern unsigned struct_nvmm_ioc_vcpu_getstate_sz;
+extern unsigned struct_nvmm_ioc_vcpu_inject_sz;
+extern unsigned struct_nvmm_ioc_vcpu_run_sz;
+extern unsigned struct_nvmm_ioc_gpa_map_sz;
+extern unsigned struct_nvmm_ioc_gpa_unmap_sz;
+extern unsigned struct_nvmm_ioc_hva_map_sz;
+extern unsigned struct_nvmm_ioc_hva_unmap_sz;
+extern unsigned struct_nvmm_ioc_ctl_sz;
+#endif
+extern unsigned struct_spi_ioctl_configure_sz;
+extern unsigned struct_spi_ioctl_transfer_sz;
+extern unsigned struct_autofs_daemon_request_sz;
+extern unsigned struct_autofs_daemon_done_sz;
+extern unsigned struct_sctp_connectx_addrs_sz;
+extern unsigned struct_usb_device_info_old_sz;
+extern unsigned struct_usb_device_info_sz;
+extern unsigned struct_usb_device_stats_sz;
+extern unsigned struct_usb_endpoint_desc_sz;
+extern unsigned struct_usb_full_desc_sz;
+extern unsigned struct_usb_interface_desc_sz;
+extern unsigned struct_usb_string_desc_sz;
+extern unsigned struct_utoppy_readfile_sz;
+extern unsigned struct_utoppy_rename_sz;
+extern unsigned struct_utoppy_stats_sz;
+extern unsigned struct_utoppy_writefile_sz;
+extern unsigned struct_v4l2_audio_sz;
+extern unsigned struct_v4l2_audioout_sz;
+extern unsigned struct_v4l2_buffer_sz;
+extern unsigned struct_v4l2_capability_sz;
+extern unsigned struct_v4l2_control_sz;
+extern unsigned struct_v4l2_crop_sz;
+extern unsigned struct_v4l2_cropcap_sz;
+extern unsigned struct_v4l2_fmtdesc_sz;
+extern unsigned struct_v4l2_format_sz;
+extern unsigned struct_v4l2_framebuffer_sz;
+extern unsigned struct_v4l2_frequency_sz;
+extern unsigned struct_v4l2_frmivalenum_sz;
+extern unsigned struct_v4l2_frmsizeenum_sz;
+extern unsigned struct_v4l2_input_sz;
+extern unsigned struct_v4l2_jpegcompression_sz;
+extern unsigned struct_v4l2_modulator_sz;
+extern unsigned struct_v4l2_output_sz;
+extern unsigned struct_v4l2_queryctrl_sz;
+extern unsigned struct_v4l2_querymenu_sz;
+extern unsigned struct_v4l2_requestbuffers_sz;
+extern unsigned struct_v4l2_standard_sz;
+extern unsigned struct_v4l2_streamparm_sz;
+extern unsigned struct_v4l2_tuner_sz;
+extern unsigned struct_vnd_ioctl_sz;
+extern unsigned struct_vnd_user_sz;
+extern unsigned struct_vt_stat_sz;
+extern unsigned struct_wdog_conf_sz;
+extern unsigned struct_wdog_mode_sz;
+extern unsigned struct_ipmi_recv_sz;
+extern unsigned struct_ipmi_req_sz;
+extern unsigned struct_ipmi_cmdspec_sz;
+extern unsigned struct_wfq_conf_sz;
+extern unsigned struct_wfq_getqid_sz;
+extern unsigned struct_wfq_getstats_sz;
+extern unsigned struct_wfq_interface_sz;
+extern unsigned struct_wfq_setweight_sz;
+extern unsigned struct_winsize_sz;
+extern unsigned struct_wscons_event_sz;
+extern unsigned struct_wsdisplay_addscreendata_sz;
+extern unsigned struct_wsdisplay_char_sz;
+extern unsigned struct_wsdisplay_cmap_sz;
+extern unsigned struct_wsdisplay_curpos_sz;
+extern unsigned struct_wsdisplay_cursor_sz;
+extern unsigned struct_wsdisplay_delscreendata_sz;
+extern unsigned struct_wsdisplay_fbinfo_sz;
+extern unsigned struct_wsdisplay_font_sz;
+extern unsigned struct_wsdisplay_kbddata_sz;
+extern unsigned struct_wsdisplay_msgattrs_sz;
+extern unsigned struct_wsdisplay_param_sz;
+extern unsigned struct_wsdisplay_scroll_data_sz;
+extern unsigned struct_wsdisplay_usefontdata_sz;
+extern unsigned struct_wsdisplayio_blit_sz;
+extern unsigned struct_wsdisplayio_bus_id_sz;
+extern unsigned struct_wsdisplayio_edid_info_sz;
+extern unsigned struct_wsdisplayio_fbinfo_sz;
+extern unsigned struct_wskbd_bell_data_sz;
+extern unsigned struct_wskbd_keyrepeat_data_sz;
+extern unsigned struct_wskbd_map_data_sz;
+extern unsigned struct_wskbd_scroll_data_sz;
+extern unsigned struct_wsmouse_calibcoords_sz;
+extern unsigned struct_wsmouse_id_sz;
+extern unsigned struct_wsmouse_repeat_sz;
+extern unsigned struct_wsmux_device_list_sz;
+extern unsigned struct_wsmux_device_sz;
+extern unsigned struct_xd_iocmd_sz;
+
+extern unsigned struct_scsireq_sz;
+extern unsigned struct_tone_sz;
+extern unsigned union_twe_statrequest_sz;
+extern unsigned struct_usb_device_descriptor_sz;
+extern unsigned struct_vt_mode_sz;
+extern unsigned struct__old_mixer_info_sz;
+extern unsigned struct__agp_allocate_sz;
+extern unsigned struct__agp_bind_sz;
+extern unsigned struct__agp_info_sz;
+extern unsigned struct__agp_setup_sz;
+extern unsigned struct__agp_unbind_sz;
+extern unsigned struct_atareq_sz;
+extern unsigned struct_cpustate_sz;
+extern unsigned struct_dmx_caps_sz;
+extern unsigned enum_dmx_source_sz;
+extern unsigned union_dvd_authinfo_sz;
+extern unsigned union_dvd_struct_sz;
+extern unsigned enum_v4l2_priority_sz;
+extern unsigned struct_envsys_basic_info_sz;
+extern unsigned struct_envsys_tre_data_sz;
+extern unsigned enum_fe_sec_mini_cmd_sz;
+extern unsigned enum_fe_sec_tone_mode_sz;
+extern unsigned enum_fe_sec_voltage_sz;
+extern unsigned enum_fe_status_sz;
+extern unsigned struct_gdt_ctrt_sz;
+extern unsigned struct_gdt_event_sz;
+extern unsigned struct_gdt_osv_sz;
+extern unsigned struct_gdt_rescan_sz;
+extern unsigned struct_gdt_statist_sz;
+extern unsigned struct_gdt_ucmd_sz;
+extern unsigned struct_iscsi_conn_status_parameters_sz;
+extern unsigned struct_iscsi_get_version_parameters_sz;
+extern unsigned struct_iscsi_iocommand_parameters_sz;
+extern unsigned struct_iscsi_login_parameters_sz;
+extern unsigned struct_iscsi_logout_parameters_sz;
+extern unsigned struct_iscsi_register_event_parameters_sz;
+extern unsigned struct_iscsi_remove_parameters_sz;
+extern unsigned struct_iscsi_send_targets_parameters_sz;
+extern unsigned struct_iscsi_set_node_name_parameters_sz;
+extern unsigned struct_iscsi_wait_event_parameters_sz;
+extern unsigned struct_isp_stats_sz;
+extern unsigned struct_lsenable_sz;
+extern unsigned struct_lsdisable_sz;
+extern unsigned struct_audio_format_query_sz;
+extern unsigned struct_mixer_ctrl_sz;
+extern unsigned struct_mixer_devinfo_sz;
+extern unsigned struct_mpu_command_rec_sz;
+extern unsigned struct_rndstat_sz;
+extern unsigned struct_rndstat_name_sz;
+extern unsigned struct_rndctl_sz;
+extern unsigned struct_rnddata_sz;
+extern unsigned struct_rndpoolstat_sz;
+extern unsigned struct_rndstat_est_sz;
+extern unsigned struct_rndstat_est_name_sz;
+extern unsigned struct_pps_params_sz;
+extern unsigned struct_pps_info_sz;
+extern unsigned struct_mixer_info_sz;
+extern unsigned struct_RF_SparetWait_sz;
+extern unsigned struct_RF_ComponentLabel_sz;
+extern unsigned struct_RF_SingleComponent_sz;
+extern unsigned struct_RF_ProgressInfo_sz;
+extern unsigned struct_nvlist_ref_sz;
+extern unsigned struct_StringList_sz;
+
+
+// A special value to mark ioctls that are not present on the target platform,
+// when it can not be determined without including any system headers.
+extern const unsigned IOCTL_NOT_PRESENT;
+
+
+extern unsigned IOCTL_AFM_ADDFMAP;
+extern unsigned IOCTL_AFM_DELFMAP;
+extern unsigned IOCTL_AFM_CLEANFMAP;
+extern unsigned IOCTL_AFM_GETFMAP;
+extern unsigned IOCTL_ALTQGTYPE;
+extern unsigned IOCTL_ALTQTBRSET;
+extern unsigned IOCTL_ALTQTBRGET;
+extern unsigned IOCTL_BLUE_IF_ATTACH;
+extern unsigned IOCTL_BLUE_IF_DETACH;
+extern unsigned IOCTL_BLUE_ENABLE;
+extern unsigned IOCTL_BLUE_DISABLE;
+extern unsigned IOCTL_BLUE_CONFIG;
+extern unsigned IOCTL_BLUE_GETSTATS;
+extern unsigned IOCTL_CBQ_IF_ATTACH;
+extern unsigned IOCTL_CBQ_IF_DETACH;
+extern unsigned IOCTL_CBQ_ENABLE;
+extern unsigned IOCTL_CBQ_DISABLE;
+extern unsigned IOCTL_CBQ_CLEAR_HIERARCHY;
+extern unsigned IOCTL_CBQ_ADD_CLASS;
+extern unsigned IOCTL_CBQ_DEL_CLASS;
+extern unsigned IOCTL_CBQ_MODIFY_CLASS;
+extern unsigned IOCTL_CBQ_ADD_FILTER;
+extern unsigned IOCTL_CBQ_DEL_FILTER;
+extern unsigned IOCTL_CBQ_GETSTATS;
+extern unsigned IOCTL_CDNR_IF_ATTACH;
+extern unsigned IOCTL_CDNR_IF_DETACH;
+extern unsigned IOCTL_CDNR_ENABLE;
+extern unsigned IOCTL_CDNR_DISABLE;
+extern unsigned IOCTL_CDNR_ADD_FILTER;
+extern unsigned IOCTL_CDNR_DEL_FILTER;
+extern unsigned IOCTL_CDNR_GETSTATS;
+extern unsigned IOCTL_CDNR_ADD_ELEM;
+extern unsigned IOCTL_CDNR_DEL_ELEM;
+extern unsigned IOCTL_CDNR_ADD_TBM;
+extern unsigned IOCTL_CDNR_MOD_TBM;
+extern unsigned IOCTL_CDNR_TBM_STATS;
+extern unsigned IOCTL_CDNR_ADD_TCM;
+extern unsigned IOCTL_CDNR_MOD_TCM;
+extern unsigned IOCTL_CDNR_TCM_STATS;
+extern unsigned IOCTL_CDNR_ADD_TSW;
+extern unsigned IOCTL_CDNR_MOD_TSW;
+extern unsigned IOCTL_FIFOQ_IF_ATTACH;
+extern unsigned IOCTL_FIFOQ_IF_DETACH;
+extern unsigned IOCTL_FIFOQ_ENABLE;
+extern unsigned IOCTL_FIFOQ_DISABLE;
+extern unsigned IOCTL_FIFOQ_CONFIG;
+extern unsigned IOCTL_FIFOQ_GETSTATS;
+extern unsigned IOCTL_HFSC_IF_ATTACH;
+extern unsigned IOCTL_HFSC_IF_DETACH;
+extern unsigned IOCTL_HFSC_ENABLE;
+extern unsigned IOCTL_HFSC_DISABLE;
+extern unsigned IOCTL_HFSC_CLEAR_HIERARCHY;
+extern unsigned IOCTL_HFSC_ADD_CLASS;
+extern unsigned IOCTL_HFSC_DEL_CLASS;
+extern unsigned IOCTL_HFSC_MOD_CLASS;
+extern unsigned IOCTL_HFSC_ADD_FILTER;
+extern unsigned IOCTL_HFSC_DEL_FILTER;
+extern unsigned IOCTL_HFSC_GETSTATS;
+extern unsigned IOCTL_JOBS_IF_ATTACH;
+extern unsigned IOCTL_JOBS_IF_DETACH;
+extern unsigned IOCTL_JOBS_ENABLE;
+extern unsigned IOCTL_JOBS_DISABLE;
+extern unsigned IOCTL_JOBS_CLEAR;
+extern unsigned IOCTL_JOBS_ADD_CLASS;
+extern unsigned IOCTL_JOBS_DEL_CLASS;
+extern unsigned IOCTL_JOBS_MOD_CLASS;
+extern unsigned IOCTL_JOBS_ADD_FILTER;
+extern unsigned IOCTL_JOBS_DEL_FILTER;
+extern unsigned IOCTL_JOBS_GETSTATS;
+extern unsigned IOCTL_PRIQ_IF_ATTACH;
+extern unsigned IOCTL_PRIQ_IF_DETACH;
+extern unsigned IOCTL_PRIQ_ENABLE;
+extern unsigned IOCTL_PRIQ_DISABLE;
+extern unsigned IOCTL_PRIQ_CLEAR;
+extern unsigned IOCTL_PRIQ_ADD_CLASS;
+extern unsigned IOCTL_PRIQ_DEL_CLASS;
+extern unsigned IOCTL_PRIQ_MOD_CLASS;
+extern unsigned IOCTL_PRIQ_ADD_FILTER;
+extern unsigned IOCTL_PRIQ_DEL_FILTER;
+extern unsigned IOCTL_PRIQ_GETSTATS;
+extern unsigned IOCTL_RED_IF_ATTACH;
+extern unsigned IOCTL_RED_IF_DETACH;
+extern unsigned IOCTL_RED_ENABLE;
+extern unsigned IOCTL_RED_DISABLE;
+extern unsigned IOCTL_RED_CONFIG;
+extern unsigned IOCTL_RED_GETSTATS;
+extern unsigned IOCTL_RED_SETDEFAULTS;
+extern unsigned IOCTL_RIO_IF_ATTACH;
+extern unsigned IOCTL_RIO_IF_DETACH;
+extern unsigned IOCTL_RIO_ENABLE;
+extern unsigned IOCTL_RIO_DISABLE;
+extern unsigned IOCTL_RIO_CONFIG;
+extern unsigned IOCTL_RIO_GETSTATS;
+extern unsigned IOCTL_RIO_SETDEFAULTS;
+extern unsigned IOCTL_WFQ_IF_ATTACH;
+extern unsigned IOCTL_WFQ_IF_DETACH;
+extern unsigned IOCTL_WFQ_ENABLE;
+extern unsigned IOCTL_WFQ_DISABLE;
+extern unsigned IOCTL_WFQ_CONFIG;
+extern unsigned IOCTL_WFQ_GET_STATS;
+extern unsigned IOCTL_WFQ_GET_QID;
+extern unsigned IOCTL_WFQ_SET_WEIGHT;
+extern unsigned IOCTL_CRIOGET;
+extern unsigned IOCTL_CIOCFSESSION;
+extern unsigned IOCTL_CIOCKEY;
+extern unsigned IOCTL_CIOCNFKEYM;
+extern unsigned IOCTL_CIOCNFSESSION;
+extern unsigned IOCTL_CIOCNCRYPTRETM;
+extern unsigned IOCTL_CIOCNCRYPTRET;
+extern unsigned IOCTL_CIOCGSESSION;
+extern unsigned IOCTL_CIOCNGSESSION;
+extern unsigned IOCTL_CIOCCRYPT;
+extern unsigned IOCTL_CIOCNCRYPTM;
+extern unsigned IOCTL_CIOCASYMFEAT;
+extern unsigned IOCTL_APM_IOC_REJECT;
+extern unsigned IOCTL_APM_IOC_STANDBY;
+extern unsigned IOCTL_APM_IOC_SUSPEND;
+extern unsigned IOCTL_OAPM_IOC_GETPOWER;
+extern unsigned IOCTL_APM_IOC_GETPOWER;
+extern unsigned IOCTL_APM_IOC_NEXTEVENT;
+extern unsigned IOCTL_APM_IOC_DEV_CTL;
+extern unsigned IOCTL_NETBSD_DM_IOCTL;
+extern unsigned IOCTL_DMIO_SETFUNC;
+extern unsigned IOCTL_DMX_START;
+extern unsigned IOCTL_DMX_STOP;
+extern unsigned IOCTL_DMX_SET_FILTER;
+extern unsigned IOCTL_DMX_SET_PES_FILTER;
+extern unsigned IOCTL_DMX_SET_BUFFER_SIZE;
+extern unsigned IOCTL_DMX_GET_STC;
+extern unsigned IOCTL_DMX_ADD_PID;
+extern unsigned IOCTL_DMX_REMOVE_PID;
+extern unsigned IOCTL_DMX_GET_CAPS;
+extern unsigned IOCTL_DMX_SET_SOURCE;
+extern unsigned IOCTL_FE_READ_STATUS;
+extern unsigned IOCTL_FE_READ_BER;
+extern unsigned IOCTL_FE_READ_SNR;
+extern unsigned IOCTL_FE_READ_SIGNAL_STRENGTH;
+extern unsigned IOCTL_FE_READ_UNCORRECTED_BLOCKS;
+extern unsigned IOCTL_FE_SET_FRONTEND;
+extern unsigned IOCTL_FE_GET_FRONTEND;
+extern unsigned IOCTL_FE_GET_EVENT;
+extern unsigned IOCTL_FE_GET_INFO;
+extern unsigned IOCTL_FE_DISEQC_RESET_OVERLOAD;
+extern unsigned IOCTL_FE_DISEQC_SEND_MASTER_CMD;
+extern unsigned IOCTL_FE_DISEQC_RECV_SLAVE_REPLY;
+extern unsigned IOCTL_FE_DISEQC_SEND_BURST;
+extern unsigned IOCTL_FE_SET_TONE;
+extern unsigned IOCTL_FE_SET_VOLTAGE;
+extern unsigned IOCTL_FE_ENABLE_HIGH_LNB_VOLTAGE;
+extern unsigned IOCTL_FE_SET_FRONTEND_TUNE_MODE;
+extern unsigned IOCTL_FE_DISHNETWORK_SEND_LEGACY_CMD;
+extern unsigned IOCTL_FILEMON_SET_FD;
+extern unsigned IOCTL_FILEMON_SET_PID;
+extern unsigned IOCTL_HDAUDIO_FGRP_INFO;
+extern unsigned IOCTL_HDAUDIO_FGRP_GETCONFIG;
+extern unsigned IOCTL_HDAUDIO_FGRP_SETCONFIG;
+extern unsigned IOCTL_HDAUDIO_FGRP_WIDGET_INFO;
+extern unsigned IOCTL_HDAUDIO_FGRP_CODEC_INFO;
+extern unsigned IOCTL_HDAUDIO_AFG_WIDGET_INFO;
+extern unsigned IOCTL_HDAUDIO_AFG_CODEC_INFO;
+extern unsigned IOCTL_CEC_GET_PHYS_ADDR;
+extern unsigned IOCTL_CEC_GET_LOG_ADDRS;
+extern unsigned IOCTL_CEC_SET_LOG_ADDRS;
+extern unsigned IOCTL_CEC_GET_VENDOR_ID;
+extern unsigned IOCTL_HPCFBIO_GCONF;
+extern unsigned IOCTL_HPCFBIO_SCONF;
+extern unsigned IOCTL_HPCFBIO_GDSPCONF;
+extern unsigned IOCTL_HPCFBIO_SDSPCONF;
+extern unsigned IOCTL_HPCFBIO_GOP;
+extern unsigned IOCTL_HPCFBIO_SOP;
+extern unsigned IOCTL_IOPIOCPT;
+extern unsigned IOCTL_IOPIOCGLCT;
+extern unsigned IOCTL_IOPIOCGSTATUS;
+extern unsigned IOCTL_IOPIOCRECONFIG;
+extern unsigned IOCTL_IOPIOCGTIDMAP;
+extern unsigned IOCTL_SIOCGATHSTATS;
+extern unsigned IOCTL_SIOCGATHDIAG;
+extern unsigned IOCTL_METEORCAPTUR;
+extern unsigned IOCTL_METEORCAPFRM;
+extern unsigned IOCTL_METEORSETGEO;
+extern unsigned IOCTL_METEORGETGEO;
+extern unsigned IOCTL_METEORSTATUS;
+extern unsigned IOCTL_METEORSHUE;
+extern unsigned IOCTL_METEORGHUE;
+extern unsigned IOCTL_METEORSFMT;
+extern unsigned IOCTL_METEORGFMT;
+extern unsigned IOCTL_METEORSINPUT;
+extern unsigned IOCTL_METEORGINPUT;
+extern unsigned IOCTL_METEORSCHCV;
+extern unsigned IOCTL_METEORGCHCV;
+extern unsigned IOCTL_METEORSCOUNT;
+extern unsigned IOCTL_METEORGCOUNT;
+extern unsigned IOCTL_METEORSFPS;
+extern unsigned IOCTL_METEORGFPS;
+extern unsigned IOCTL_METEORSSIGNAL;
+extern unsigned IOCTL_METEORGSIGNAL;
+extern unsigned IOCTL_METEORSVIDEO;
+extern unsigned IOCTL_METEORGVIDEO;
+extern unsigned IOCTL_METEORSBRIG;
+extern unsigned IOCTL_METEORGBRIG;
+extern unsigned IOCTL_METEORSCSAT;
+extern unsigned IOCTL_METEORGCSAT;
+extern unsigned IOCTL_METEORSCONT;
+extern unsigned IOCTL_METEORGCONT;
+extern unsigned IOCTL_METEORSHWS;
+extern unsigned IOCTL_METEORGHWS;
+extern unsigned IOCTL_METEORSVWS;
+extern unsigned IOCTL_METEORGVWS;
+extern unsigned IOCTL_METEORSTS;
+extern unsigned IOCTL_METEORGTS;
+extern unsigned IOCTL_TVTUNER_SETCHNL;
+extern unsigned IOCTL_TVTUNER_GETCHNL;
+extern unsigned IOCTL_TVTUNER_SETTYPE;
+extern unsigned IOCTL_TVTUNER_GETTYPE;
+extern unsigned IOCTL_TVTUNER_GETSTATUS;
+extern unsigned IOCTL_TVTUNER_SETFREQ;
+extern unsigned IOCTL_TVTUNER_GETFREQ;
+extern unsigned IOCTL_TVTUNER_SETAFC;
+extern unsigned IOCTL_TVTUNER_GETAFC;
+extern unsigned IOCTL_RADIO_SETMODE;
+extern unsigned IOCTL_RADIO_GETMODE;
+extern unsigned IOCTL_RADIO_SETFREQ;
+extern unsigned IOCTL_RADIO_GETFREQ;
+extern unsigned IOCTL_METEORSACTPIXFMT;
+extern unsigned IOCTL_METEORGACTPIXFMT;
+extern unsigned IOCTL_METEORGSUPPIXFMT;
+extern unsigned IOCTL_TVTUNER_GETCHNLSET;
+extern unsigned IOCTL_REMOTE_GETKEY;
+extern unsigned IOCTL_GDT_IOCTL_GENERAL;
+extern unsigned IOCTL_GDT_IOCTL_DRVERS;
+extern unsigned IOCTL_GDT_IOCTL_CTRTYPE;
+extern unsigned IOCTL_GDT_IOCTL_OSVERS;
+extern unsigned IOCTL_GDT_IOCTL_CTRCNT;
+extern unsigned IOCTL_GDT_IOCTL_EVENT;
+extern unsigned IOCTL_GDT_IOCTL_STATIST;
+extern unsigned IOCTL_GDT_IOCTL_RESCAN;
+extern unsigned IOCTL_ISP_SDBLEV;
+extern unsigned IOCTL_ISP_RESETHBA;
+extern unsigned IOCTL_ISP_RESCAN;
+extern unsigned IOCTL_ISP_SETROLE;
+extern unsigned IOCTL_ISP_GETROLE;
+extern unsigned IOCTL_ISP_GET_STATS;
+extern unsigned IOCTL_ISP_CLR_STATS;
+extern unsigned IOCTL_ISP_FC_LIP;
+extern unsigned IOCTL_ISP_FC_GETDINFO;
+extern unsigned IOCTL_ISP_GET_FW_CRASH_DUMP;
+extern unsigned IOCTL_ISP_FORCE_CRASH_DUMP;
+extern unsigned IOCTL_ISP_FC_GETHINFO;
+extern unsigned IOCTL_ISP_TSK_MGMT;
+extern unsigned IOCTL_ISP_FC_GETDLIST;
+extern unsigned IOCTL_MLXD_STATUS;
+extern unsigned IOCTL_MLXD_CHECKASYNC;
+extern unsigned IOCTL_MLXD_DETACH;
+extern unsigned IOCTL_MLX_RESCAN_DRIVES;
+extern unsigned IOCTL_MLX_PAUSE_CHANNEL;
+extern unsigned IOCTL_MLX_COMMAND;
+extern unsigned IOCTL_MLX_REBUILDASYNC;
+extern unsigned IOCTL_MLX_REBUILDSTAT;
+extern unsigned IOCTL_MLX_GET_SYSDRIVE;
+extern unsigned IOCTL_MLX_GET_CINFO;
+extern unsigned IOCTL_NVME_PASSTHROUGH_CMD;
+extern unsigned IOCTL_FWCFGIO_SET_INDEX;
+extern unsigned IOCTL_IRDA_RESET_PARAMS;
+extern unsigned IOCTL_IRDA_SET_PARAMS;
+extern unsigned IOCTL_IRDA_GET_SPEEDMASK;
+extern unsigned IOCTL_IRDA_GET_TURNAROUNDMASK;
+extern unsigned IOCTL_IRFRAMETTY_GET_DEVICE;
+extern unsigned IOCTL_IRFRAMETTY_GET_DONGLE;
+extern unsigned IOCTL_IRFRAMETTY_SET_DONGLE;
+extern unsigned IOCTL_ISV_CMD;
+extern unsigned IOCTL_WTQICMD;
+extern unsigned IOCTL_ISCSI_GET_VERSION;
+extern unsigned IOCTL_ISCSI_LOGIN;
+extern unsigned IOCTL_ISCSI_LOGOUT;
+extern unsigned IOCTL_ISCSI_ADD_CONNECTION;
+extern unsigned IOCTL_ISCSI_RESTORE_CONNECTION;
+extern unsigned IOCTL_ISCSI_REMOVE_CONNECTION;
+extern unsigned IOCTL_ISCSI_CONNECTION_STATUS;
+extern unsigned IOCTL_ISCSI_SEND_TARGETS;
+extern unsigned IOCTL_ISCSI_SET_NODE_NAME;
+extern unsigned IOCTL_ISCSI_IO_COMMAND;
+extern unsigned IOCTL_ISCSI_REGISTER_EVENT;
+extern unsigned IOCTL_ISCSI_DEREGISTER_EVENT;
+extern unsigned IOCTL_ISCSI_WAIT_EVENT;
+extern unsigned IOCTL_ISCSI_POLL_EVENT;
+extern unsigned IOCTL_OFIOCGET;
+extern unsigned IOCTL_OFIOCSET;
+extern unsigned IOCTL_OFIOCNEXTPROP;
+extern unsigned IOCTL_OFIOCGETOPTNODE;
+extern unsigned IOCTL_OFIOCGETNEXT;
+extern unsigned IOCTL_OFIOCGETCHILD;
+extern unsigned IOCTL_OFIOCFINDDEVICE;
+extern unsigned IOCTL_AMR_IO_VERSION;
+extern unsigned IOCTL_AMR_IO_COMMAND;
+extern unsigned IOCTL_MLYIO_COMMAND;
+extern unsigned IOCTL_MLYIO_HEALTH;
+extern unsigned IOCTL_PCI_IOC_CFGREAD;
+extern unsigned IOCTL_PCI_IOC_CFGWRITE;
+extern unsigned IOCTL_PCI_IOC_BDF_CFGREAD;
+extern unsigned IOCTL_PCI_IOC_BDF_CFGWRITE;
+extern unsigned IOCTL_PCI_IOC_BUSINFO;
+extern unsigned IOCTL_PCI_IOC_DRVNAME;
+extern unsigned IOCTL_PCI_IOC_DRVNAMEONBUS;
+extern unsigned IOCTL_TWEIO_COMMAND;
+extern unsigned IOCTL_TWEIO_STATS;
+extern unsigned IOCTL_TWEIO_AEN_POLL;
+extern unsigned IOCTL_TWEIO_AEN_WAIT;
+extern unsigned IOCTL_TWEIO_SET_PARAM;
+extern unsigned IOCTL_TWEIO_GET_PARAM;
+extern unsigned IOCTL_TWEIO_RESET;
+extern unsigned IOCTL_TWEIO_ADD_UNIT;
+extern unsigned IOCTL_TWEIO_DEL_UNIT;
+extern unsigned IOCTL_SIOCSCNWDOMAIN;
+extern unsigned IOCTL_SIOCGCNWDOMAIN;
+extern unsigned IOCTL_SIOCSCNWKEY;
+extern unsigned IOCTL_SIOCGCNWSTATUS;
+extern unsigned IOCTL_SIOCGCNWSTATS;
+extern unsigned IOCTL_SIOCGCNWTRAIL;
+extern unsigned IOCTL_SIOCGRAYSIGLEV;
+extern unsigned IOCTL_RAIDFRAME_SHUTDOWN;
+extern unsigned IOCTL_RAIDFRAME_TUR;
+extern unsigned IOCTL_RAIDFRAME_FAIL_DISK;
+extern unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS;
+extern unsigned IOCTL_RAIDFRAME_REWRITEPARITY;
+extern unsigned IOCTL_RAIDFRAME_COPYBACK;
+extern unsigned IOCTL_RAIDFRAME_SPARET_WAIT;
+extern unsigned IOCTL_RAIDFRAME_SEND_SPARET;
+extern unsigned IOCTL_RAIDFRAME_ABORT_SPARET_WAIT;
+extern unsigned IOCTL_RAIDFRAME_START_ATRACE;
+extern unsigned IOCTL_RAIDFRAME_STOP_ATRACE;
+extern unsigned IOCTL_RAIDFRAME_GET_SIZE;
+extern unsigned IOCTL_RAIDFRAME_RESET_ACCTOTALS;
+extern unsigned IOCTL_RAIDFRAME_KEEP_ACCTOTALS;
+extern unsigned IOCTL_RAIDFRAME_GET_COMPONENT_LABEL;
+extern unsigned IOCTL_RAIDFRAME_SET_COMPONENT_LABEL;
+extern unsigned IOCTL_RAIDFRAME_INIT_LABELS;
+extern unsigned IOCTL_RAIDFRAME_ADD_HOT_SPARE;
+extern unsigned IOCTL_RAIDFRAME_REMOVE_HOT_SPARE;
+extern unsigned IOCTL_RAIDFRAME_REBUILD_IN_PLACE;
+extern unsigned IOCTL_RAIDFRAME_CHECK_PARITY;
+extern unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS;
+extern unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS;
+extern unsigned IOCTL_RAIDFRAME_SET_AUTOCONFIG;
+extern unsigned IOCTL_RAIDFRAME_SET_ROOT;
+extern unsigned IOCTL_RAIDFRAME_DELETE_COMPONENT;
+extern unsigned IOCTL_RAIDFRAME_INCORPORATE_HOT_SPARE;
+extern unsigned IOCTL_RAIDFRAME_CHECK_RECON_STATUS_EXT;
+extern unsigned IOCTL_RAIDFRAME_CHECK_PARITYREWRITE_STATUS_EXT;
+extern unsigned IOCTL_RAIDFRAME_CHECK_COPYBACK_STATUS_EXT;
+extern unsigned IOCTL_RAIDFRAME_CONFIGURE;
+extern unsigned IOCTL_RAIDFRAME_GET_INFO;
+extern unsigned IOCTL_RAIDFRAME_PARITYMAP_STATUS;
+extern unsigned IOCTL_RAIDFRAME_PARITYMAP_GET_DISABLE;
+extern unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_DISABLE;
+extern unsigned IOCTL_RAIDFRAME_PARITYMAP_SET_PARAMS;
+extern unsigned IOCTL_RAIDFRAME_SET_LAST_UNIT;
+extern unsigned IOCTL_MBPPIOCSPARAM;
+extern unsigned IOCTL_MBPPIOCGPARAM;
+extern unsigned IOCTL_MBPPIOCGSTAT;
+extern unsigned IOCTL_SESIOC_GETNOBJ;
+extern unsigned IOCTL_SESIOC_GETOBJMAP;
+extern unsigned IOCTL_SESIOC_GETENCSTAT;
+extern unsigned IOCTL_SESIOC_SETENCSTAT;
+extern unsigned IOCTL_SESIOC_GETOBJSTAT;
+extern unsigned IOCTL_SESIOC_SETOBJSTAT;
+extern unsigned IOCTL_SESIOC_GETTEXT;
+extern unsigned IOCTL_SESIOC_INIT;
+extern unsigned IOCTL_SUN_DKIOCGGEOM;
+extern unsigned IOCTL_SUN_DKIOCINFO;
+extern unsigned IOCTL_SUN_DKIOCGPART;
+extern unsigned IOCTL_FBIOGTYPE;
+extern unsigned IOCTL_FBIOPUTCMAP;
+extern unsigned IOCTL_FBIOGETCMAP;
+extern unsigned IOCTL_FBIOGATTR;
+extern unsigned IOCTL_FBIOSVIDEO;
+extern unsigned IOCTL_FBIOGVIDEO;
+extern unsigned IOCTL_FBIOSCURSOR;
+extern unsigned IOCTL_FBIOGCURSOR;
+extern unsigned IOCTL_FBIOSCURPOS;
+extern unsigned IOCTL_FBIOGCURPOS;
+extern unsigned IOCTL_FBIOGCURMAX;
+extern unsigned IOCTL_KIOCTRANS;
+extern unsigned IOCTL_KIOCSETKEY;
+extern unsigned IOCTL_KIOCGETKEY;
+extern unsigned IOCTL_KIOCGTRANS;
+extern unsigned IOCTL_KIOCCMD;
+extern unsigned IOCTL_KIOCTYPE;
+extern unsigned IOCTL_KIOCSDIRECT;
+extern unsigned IOCTL_KIOCSKEY;
+extern unsigned IOCTL_KIOCGKEY;
+extern unsigned IOCTL_KIOCSLED;
+extern unsigned IOCTL_KIOCGLED;
+extern unsigned IOCTL_KIOCLAYOUT;
+extern unsigned IOCTL_VUIDSFORMAT;
+extern unsigned IOCTL_VUIDGFORMAT;
+extern unsigned IOCTL_STICIO_GXINFO;
+extern unsigned IOCTL_STICIO_RESET;
+extern unsigned IOCTL_STICIO_STARTQ;
+extern unsigned IOCTL_STICIO_STOPQ;
+extern unsigned IOCTL_UKYOPON_IDENTIFY;
+extern unsigned IOCTL_URIO_SEND_COMMAND;
+extern unsigned IOCTL_URIO_RECV_COMMAND;
+extern unsigned IOCTL_USB_REQUEST;
+extern unsigned IOCTL_USB_SETDEBUG;
+extern unsigned IOCTL_USB_DISCOVER;
+extern unsigned IOCTL_USB_DEVICEINFO;
+extern unsigned IOCTL_USB_DEVICEINFO_OLD;
+extern unsigned IOCTL_USB_DEVICESTATS;
+extern unsigned IOCTL_USB_GET_REPORT_DESC;
+extern unsigned IOCTL_USB_SET_IMMED;
+extern unsigned IOCTL_USB_GET_REPORT;
+extern unsigned IOCTL_USB_SET_REPORT;
+extern unsigned IOCTL_USB_GET_REPORT_ID;
+extern unsigned IOCTL_USB_GET_CONFIG;
+extern unsigned IOCTL_USB_SET_CONFIG;
+extern unsigned IOCTL_USB_GET_ALTINTERFACE;
+extern unsigned IOCTL_USB_SET_ALTINTERFACE;
+extern unsigned IOCTL_USB_GET_NO_ALT;
+extern unsigned IOCTL_USB_GET_DEVICE_DESC;
+extern unsigned IOCTL_USB_GET_CONFIG_DESC;
+extern unsigned IOCTL_USB_GET_INTERFACE_DESC;
+extern unsigned IOCTL_USB_GET_ENDPOINT_DESC;
+extern unsigned IOCTL_USB_GET_FULL_DESC;
+extern unsigned IOCTL_USB_GET_STRING_DESC;
+extern unsigned IOCTL_USB_DO_REQUEST;
+extern unsigned IOCTL_USB_GET_DEVICEINFO;
+extern unsigned IOCTL_USB_GET_DEVICEINFO_OLD;
+extern unsigned IOCTL_USB_SET_SHORT_XFER;
+extern unsigned IOCTL_USB_SET_TIMEOUT;
+extern unsigned IOCTL_USB_SET_BULK_RA;
+extern unsigned IOCTL_USB_SET_BULK_WB;
+extern unsigned IOCTL_USB_SET_BULK_RA_OPT;
+extern unsigned IOCTL_USB_SET_BULK_WB_OPT;
+extern unsigned IOCTL_USB_GET_CM_OVER_DATA;
+extern unsigned IOCTL_USB_SET_CM_OVER_DATA;
+extern unsigned IOCTL_UTOPPYIOTURBO;
+extern unsigned IOCTL_UTOPPYIOCANCEL;
+extern unsigned IOCTL_UTOPPYIOREBOOT;
+extern unsigned IOCTL_UTOPPYIOSTATS;
+extern unsigned IOCTL_UTOPPYIORENAME;
+extern unsigned IOCTL_UTOPPYIOMKDIR;
+extern unsigned IOCTL_UTOPPYIODELETE;
+extern unsigned IOCTL_UTOPPYIOREADDIR;
+extern unsigned IOCTL_UTOPPYIOREADFILE;
+extern unsigned IOCTL_UTOPPYIOWRITEFILE;
+extern unsigned IOCTL_DIOSXDCMD;
+extern unsigned IOCTL_VT_OPENQRY;
+extern unsigned IOCTL_VT_SETMODE;
+extern unsigned IOCTL_VT_GETMODE;
+extern unsigned IOCTL_VT_RELDISP;
+extern unsigned IOCTL_VT_ACTIVATE;
+extern unsigned IOCTL_VT_WAITACTIVE;
+extern unsigned IOCTL_VT_GETACTIVE;
+extern unsigned IOCTL_VT_GETSTATE;
+extern unsigned IOCTL_KDGETKBENT;
+extern unsigned IOCTL_KDGKBMODE;
+extern unsigned IOCTL_KDSKBMODE;
+extern unsigned IOCTL_KDMKTONE;
+extern unsigned IOCTL_KDSETMODE;
+extern unsigned IOCTL_KDENABIO;
+extern unsigned IOCTL_KDDISABIO;
+extern unsigned IOCTL_KDGKBTYPE;
+extern unsigned IOCTL_KDGETLED;
+extern unsigned IOCTL_KDSETLED;
+extern unsigned IOCTL_KDSETRAD;
+extern unsigned IOCTL_VGAPCVTID;
+extern unsigned IOCTL_CONS_GETVERS;
+extern unsigned IOCTL_WSKBDIO_GTYPE;
+extern unsigned IOCTL_WSKBDIO_BELL;
+extern unsigned IOCTL_WSKBDIO_COMPLEXBELL;
+extern unsigned IOCTL_WSKBDIO_SETBELL;
+extern unsigned IOCTL_WSKBDIO_GETBELL;
+extern unsigned IOCTL_WSKBDIO_SETDEFAULTBELL;
+extern unsigned IOCTL_WSKBDIO_GETDEFAULTBELL;
+extern unsigned IOCTL_WSKBDIO_SETKEYREPEAT;
+extern unsigned IOCTL_WSKBDIO_GETKEYREPEAT;
+extern unsigned IOCTL_WSKBDIO_SETDEFAULTKEYREPEAT;
+extern unsigned IOCTL_WSKBDIO_GETDEFAULTKEYREPEAT;
+extern unsigned IOCTL_WSKBDIO_SETLEDS;
+extern unsigned IOCTL_WSKBDIO_GETLEDS;
+extern unsigned IOCTL_WSKBDIO_GETMAP;
+extern unsigned IOCTL_WSKBDIO_SETMAP;
+extern unsigned IOCTL_WSKBDIO_GETENCODING;
+extern unsigned IOCTL_WSKBDIO_SETENCODING;
+extern unsigned IOCTL_WSKBDIO_SETMODE;
+extern unsigned IOCTL_WSKBDIO_GETMODE;
+extern unsigned IOCTL_WSKBDIO_SETKEYCLICK;
+extern unsigned IOCTL_WSKBDIO_GETKEYCLICK;
+extern unsigned IOCTL_WSKBDIO_GETSCROLL;
+extern unsigned IOCTL_WSKBDIO_SETSCROLL;
+extern unsigned IOCTL_WSKBDIO_SETVERSION;
+extern unsigned IOCTL_WSMOUSEIO_GTYPE;
+extern unsigned IOCTL_WSMOUSEIO_SRES;
+extern unsigned IOCTL_WSMOUSEIO_SSCALE;
+extern unsigned IOCTL_WSMOUSEIO_SRATE;
+extern unsigned IOCTL_WSMOUSEIO_SCALIBCOORDS;
+extern unsigned IOCTL_WSMOUSEIO_GCALIBCOORDS;
+extern unsigned IOCTL_WSMOUSEIO_GETID;
+extern unsigned IOCTL_WSMOUSEIO_GETREPEAT;
+extern unsigned IOCTL_WSMOUSEIO_SETREPEAT;
+extern unsigned IOCTL_WSMOUSEIO_SETVERSION;
+extern unsigned IOCTL_WSDISPLAYIO_GTYPE;
+extern unsigned IOCTL_WSDISPLAYIO_GINFO;
+extern unsigned IOCTL_WSDISPLAYIO_GETCMAP;
+extern unsigned IOCTL_WSDISPLAYIO_PUTCMAP;
+extern unsigned IOCTL_WSDISPLAYIO_GVIDEO;
+extern unsigned IOCTL_WSDISPLAYIO_SVIDEO;
+extern unsigned IOCTL_WSDISPLAYIO_GCURPOS;
+extern unsigned IOCTL_WSDISPLAYIO_SCURPOS;
+extern unsigned IOCTL_WSDISPLAYIO_GCURMAX;
+extern unsigned IOCTL_WSDISPLAYIO_GCURSOR;
+extern unsigned IOCTL_WSDISPLAYIO_SCURSOR;
+extern unsigned IOCTL_WSDISPLAYIO_GMODE;
+extern unsigned IOCTL_WSDISPLAYIO_SMODE;
+extern unsigned IOCTL_WSDISPLAYIO_LDFONT;
+extern unsigned IOCTL_WSDISPLAYIO_ADDSCREEN;
+extern unsigned IOCTL_WSDISPLAYIO_DELSCREEN;
+extern unsigned IOCTL_WSDISPLAYIO_SFONT;
+extern unsigned IOCTL__O_WSDISPLAYIO_SETKEYBOARD;
+extern unsigned IOCTL_WSDISPLAYIO_GETPARAM;
+extern unsigned IOCTL_WSDISPLAYIO_SETPARAM;
+extern unsigned IOCTL_WSDISPLAYIO_GETACTIVESCREEN;
+extern unsigned IOCTL_WSDISPLAYIO_GETWSCHAR;
+extern unsigned IOCTL_WSDISPLAYIO_PUTWSCHAR;
+extern unsigned IOCTL_WSDISPLAYIO_DGSCROLL;
+extern unsigned IOCTL_WSDISPLAYIO_DSSCROLL;
+extern unsigned IOCTL_WSDISPLAYIO_GMSGATTRS;
+extern unsigned IOCTL_WSDISPLAYIO_SMSGATTRS;
+extern unsigned IOCTL_WSDISPLAYIO_GBORDER;
+extern unsigned IOCTL_WSDISPLAYIO_SBORDER;
+extern unsigned IOCTL_WSDISPLAYIO_SSPLASH;
+extern unsigned IOCTL_WSDISPLAYIO_SPROGRESS;
+extern unsigned IOCTL_WSDISPLAYIO_LINEBYTES;
+extern unsigned IOCTL_WSDISPLAYIO_SETVERSION;
+extern unsigned IOCTL_WSMUXIO_ADD_DEVICE;
+extern unsigned IOCTL_WSMUXIO_REMOVE_DEVICE;
+extern unsigned IOCTL_WSMUXIO_LIST_DEVICES;
+extern unsigned IOCTL_WSMUXIO_INJECTEVENT;
+extern unsigned IOCTL_WSDISPLAYIO_GET_BUSID;
+extern unsigned IOCTL_WSDISPLAYIO_GET_EDID;
+extern unsigned IOCTL_WSDISPLAYIO_SET_POLLING;
+extern unsigned IOCTL_WSDISPLAYIO_GET_FBINFO;
+extern unsigned IOCTL_WSDISPLAYIO_DOBLIT;
+extern unsigned IOCTL_WSDISPLAYIO_WAITBLIT;
+extern unsigned IOCTL_BIOCLOCATE;
+extern unsigned IOCTL_BIOCINQ;
+extern unsigned IOCTL_BIOCDISK_NOVOL;
+extern unsigned IOCTL_BIOCDISK;
+extern unsigned IOCTL_BIOCVOL;
+extern unsigned IOCTL_BIOCALARM;
+extern unsigned IOCTL_BIOCBLINK;
+extern unsigned IOCTL_BIOCSETSTATE;
+extern unsigned IOCTL_BIOCVOLOPS;
+extern unsigned IOCTL_MD_GETCONF;
+extern unsigned IOCTL_MD_SETCONF;
+extern unsigned IOCTL_CCDIOCSET;
+extern unsigned IOCTL_CCDIOCCLR;
+extern unsigned IOCTL_CGDIOCSET;
+extern unsigned IOCTL_CGDIOCCLR;
+extern unsigned IOCTL_CGDIOCGET;
+extern unsigned IOCTL_FSSIOCSET;
+extern unsigned IOCTL_FSSIOCGET;
+extern unsigned IOCTL_FSSIOCCLR;
+extern unsigned IOCTL_FSSIOFSET;
+extern unsigned IOCTL_FSSIOFGET;
+extern unsigned IOCTL_BTDEV_ATTACH;
+extern unsigned IOCTL_BTDEV_DETACH;
+extern unsigned IOCTL_BTSCO_GETINFO;
+extern unsigned IOCTL_KTTCP_IO_SEND;
+extern unsigned IOCTL_KTTCP_IO_RECV;
+extern unsigned IOCTL_IOC_LOCKSTAT_GVERSION;
+extern unsigned IOCTL_IOC_LOCKSTAT_ENABLE;
+extern unsigned IOCTL_IOC_LOCKSTAT_DISABLE;
+extern unsigned IOCTL_VNDIOCSET;
+extern unsigned IOCTL_VNDIOCCLR;
+extern unsigned IOCTL_VNDIOCGET;
+extern unsigned IOCTL_SPKRTONE;
+extern unsigned IOCTL_SPKRTUNE;
+extern unsigned IOCTL_SPKRGETVOL;
+extern unsigned IOCTL_SPKRSETVOL;
+#if defined(__x86_64__)
+extern unsigned IOCTL_NVMM_IOC_CAPABILITY;
+extern unsigned IOCTL_NVMM_IOC_MACHINE_CREATE;
+extern unsigned IOCTL_NVMM_IOC_MACHINE_DESTROY;
+extern unsigned IOCTL_NVMM_IOC_MACHINE_CONFIGURE;
+extern unsigned IOCTL_NVMM_IOC_VCPU_CREATE;
+extern unsigned IOCTL_NVMM_IOC_VCPU_DESTROY;
+extern unsigned IOCTL_NVMM_IOC_VCPU_SETSTATE;
+extern unsigned IOCTL_NVMM_IOC_VCPU_GETSTATE;
+extern unsigned IOCTL_NVMM_IOC_VCPU_INJECT;
+extern unsigned IOCTL_NVMM_IOC_VCPU_RUN;
+extern unsigned IOCTL_NVMM_IOC_GPA_MAP;
+extern unsigned IOCTL_NVMM_IOC_GPA_UNMAP;
+extern unsigned IOCTL_NVMM_IOC_HVA_MAP;
+extern unsigned IOCTL_NVMM_IOC_HVA_UNMAP;
+extern unsigned IOCTL_NVMM_IOC_CTL;
+#endif
+extern unsigned IOCTL_AUTOFSREQUEST;
+extern unsigned IOCTL_AUTOFSDONE;
+extern unsigned IOCTL_BIOCGBLEN;
+extern unsigned IOCTL_BIOCSBLEN;
+extern unsigned IOCTL_BIOCSETF;
+extern unsigned IOCTL_BIOCFLUSH;
+extern unsigned IOCTL_BIOCPROMISC;
+extern unsigned IOCTL_BIOCGDLT;
+extern unsigned IOCTL_BIOCGETIF;
+extern unsigned IOCTL_BIOCSETIF;
+extern unsigned IOCTL_BIOCGSTATS;
+extern unsigned IOCTL_BIOCGSTATSOLD;
+extern unsigned IOCTL_BIOCIMMEDIATE;
+extern unsigned IOCTL_BIOCVERSION;
+extern unsigned IOCTL_BIOCSTCPF;
+extern unsigned IOCTL_BIOCSUDPF;
+extern unsigned IOCTL_BIOCGHDRCMPLT;
+extern unsigned IOCTL_BIOCSHDRCMPLT;
+extern unsigned IOCTL_BIOCSDLT;
+extern unsigned IOCTL_BIOCGDLTLIST;
+extern unsigned IOCTL_BIOCGDIRECTION;
+extern unsigned IOCTL_BIOCSDIRECTION;
+extern unsigned IOCTL_BIOCSRTIMEOUT;
+extern unsigned IOCTL_BIOCGRTIMEOUT;
+extern unsigned IOCTL_BIOCGFEEDBACK;
+extern unsigned IOCTL_BIOCSFEEDBACK;
+extern unsigned IOCTL_GRESADDRS;
+extern unsigned IOCTL_GRESADDRD;
+extern unsigned IOCTL_GREGADDRS;
+extern unsigned IOCTL_GREGADDRD;
+extern unsigned IOCTL_GRESPROTO;
+extern unsigned IOCTL_GREGPROTO;
+extern unsigned IOCTL_GRESSOCK;
+extern unsigned IOCTL_GREDSOCK;
+extern unsigned IOCTL_PPPIOCGRAWIN;
+extern unsigned IOCTL_PPPIOCGFLAGS;
+extern unsigned IOCTL_PPPIOCSFLAGS;
+extern unsigned IOCTL_PPPIOCGASYNCMAP;
+extern unsigned IOCTL_PPPIOCSASYNCMAP;
+extern unsigned IOCTL_PPPIOCGUNIT;
+extern unsigned IOCTL_PPPIOCGRASYNCMAP;
+extern unsigned IOCTL_PPPIOCSRASYNCMAP;
+extern unsigned IOCTL_PPPIOCGMRU;
+extern unsigned IOCTL_PPPIOCSMRU;
+extern unsigned IOCTL_PPPIOCSMAXCID;
+extern unsigned IOCTL_PPPIOCGXASYNCMAP;
+extern unsigned IOCTL_PPPIOCSXASYNCMAP;
+extern unsigned IOCTL_PPPIOCXFERUNIT;
+extern unsigned IOCTL_PPPIOCSCOMPRESS;
+extern unsigned IOCTL_PPPIOCGNPMODE;
+extern unsigned IOCTL_PPPIOCSNPMODE;
+extern unsigned IOCTL_PPPIOCGIDLE;
+extern unsigned IOCTL_PPPIOCGMTU;
+extern unsigned IOCTL_PPPIOCSMTU;
+extern unsigned IOCTL_SIOCGPPPSTATS;
+extern unsigned IOCTL_SIOCGPPPCSTATS;
+extern unsigned IOCTL_IOC_NPF_VERSION;
+extern unsigned IOCTL_IOC_NPF_SWITCH;
+extern unsigned IOCTL_IOC_NPF_LOAD;
+extern unsigned IOCTL_IOC_NPF_TABLE;
+extern unsigned IOCTL_IOC_NPF_STATS;
+extern unsigned IOCTL_IOC_NPF_SAVE;
+extern unsigned IOCTL_IOC_NPF_RULE;
+extern unsigned IOCTL_IOC_NPF_CONN_LOOKUP;
+extern unsigned IOCTL_PPPOESETPARMS;
+extern unsigned IOCTL_PPPOEGETPARMS;
+extern unsigned IOCTL_PPPOEGETSESSION;
+extern unsigned IOCTL_SPPPGETAUTHCFG;
+extern unsigned IOCTL_SPPPSETAUTHCFG;
+extern unsigned IOCTL_SPPPGETLCPCFG;
+extern unsigned IOCTL_SPPPSETLCPCFG;
+extern unsigned IOCTL_SPPPGETSTATUS;
+extern unsigned IOCTL_SPPPGETSTATUSNCP;
+extern unsigned IOCTL_SPPPGETIDLETO;
+extern unsigned IOCTL_SPPPSETIDLETO;
+extern unsigned IOCTL_SPPPGETAUTHFAILURES;
+extern unsigned IOCTL_SPPPSETAUTHFAILURE;
+extern unsigned IOCTL_SPPPSETDNSOPTS;
+extern unsigned IOCTL_SPPPGETDNSOPTS;
+extern unsigned IOCTL_SPPPGETDNSADDRS;
+extern unsigned IOCTL_SPPPSETKEEPALIVE;
+extern unsigned IOCTL_SPPPGETKEEPALIVE;
+extern unsigned IOCTL_SRT_GETNRT;
+extern unsigned IOCTL_SRT_GETRT;
+extern unsigned IOCTL_SRT_SETRT;
+extern unsigned IOCTL_SRT_DELRT;
+extern unsigned IOCTL_SRT_SFLAGS;
+extern unsigned IOCTL_SRT_GFLAGS;
+extern unsigned IOCTL_SRT_SGFLAGS;
+extern unsigned IOCTL_SRT_DEBUG;
+extern unsigned IOCTL_TAPGIFNAME;
+extern unsigned IOCTL_TUNSDEBUG;
+extern unsigned IOCTL_TUNGDEBUG;
+extern unsigned IOCTL_TUNSIFMODE;
+extern unsigned IOCTL_TUNSLMODE;
+extern unsigned IOCTL_TUNSIFHEAD;
+extern unsigned IOCTL_TUNGIFHEAD;
+extern unsigned IOCTL_DIOCSTART;
+extern unsigned IOCTL_DIOCSTOP;
+extern unsigned IOCTL_DIOCADDRULE;
+extern unsigned IOCTL_DIOCGETRULES;
+extern unsigned IOCTL_DIOCGETRULE;
+extern unsigned IOCTL_DIOCSETLCK;
+extern unsigned IOCTL_DIOCCLRSTATES;
+extern unsigned IOCTL_DIOCGETSTATE;
+extern unsigned IOCTL_DIOCSETSTATUSIF;
+extern unsigned IOCTL_DIOCGETSTATUS;
+extern unsigned IOCTL_DIOCCLRSTATUS;
+extern unsigned IOCTL_DIOCNATLOOK;
+extern unsigned IOCTL_DIOCSETDEBUG;
+extern unsigned IOCTL_DIOCGETSTATES;
+extern unsigned IOCTL_DIOCCHANGERULE;
+extern unsigned IOCTL_DIOCSETTIMEOUT;
+extern unsigned IOCTL_DIOCGETTIMEOUT;
+extern unsigned IOCTL_DIOCADDSTATE;
+extern unsigned IOCTL_DIOCCLRRULECTRS;
+extern unsigned IOCTL_DIOCGETLIMIT;
+extern unsigned IOCTL_DIOCSETLIMIT;
+extern unsigned IOCTL_DIOCKILLSTATES;
+extern unsigned IOCTL_DIOCSTARTALTQ;
+extern unsigned IOCTL_DIOCSTOPALTQ;
+extern unsigned IOCTL_DIOCADDALTQ;
+extern unsigned IOCTL_DIOCGETALTQS;
+extern unsigned IOCTL_DIOCGETALTQ;
+extern unsigned IOCTL_DIOCCHANGEALTQ;
+extern unsigned IOCTL_DIOCGETQSTATS;
+extern unsigned IOCTL_DIOCBEGINADDRS;
+extern unsigned IOCTL_DIOCADDADDR;
+extern unsigned IOCTL_DIOCGETADDRS;
+extern unsigned IOCTL_DIOCGETADDR;
+extern unsigned IOCTL_DIOCCHANGEADDR;
+extern unsigned IOCTL_DIOCADDSTATES;
+extern unsigned IOCTL_DIOCGETRULESETS;
+extern unsigned IOCTL_DIOCGETRULESET;
+extern unsigned IOCTL_DIOCRCLRTABLES;
+extern unsigned IOCTL_DIOCRADDTABLES;
+extern unsigned IOCTL_DIOCRDELTABLES;
+extern unsigned IOCTL_DIOCRGETTABLES;
+extern unsigned IOCTL_DIOCRGETTSTATS;
+extern unsigned IOCTL_DIOCRCLRTSTATS;
+extern unsigned IOCTL_DIOCRCLRADDRS;
+extern unsigned IOCTL_DIOCRADDADDRS;
+extern unsigned IOCTL_DIOCRDELADDRS;
+extern unsigned IOCTL_DIOCRSETADDRS;
+extern unsigned IOCTL_DIOCRGETADDRS;
+extern unsigned IOCTL_DIOCRGETASTATS;
+extern unsigned IOCTL_DIOCRCLRASTATS;
+extern unsigned IOCTL_DIOCRTSTADDRS;
+extern unsigned IOCTL_DIOCRSETTFLAGS;
+extern unsigned IOCTL_DIOCRINADEFINE;
+extern unsigned IOCTL_DIOCOSFPFLUSH;
+extern unsigned IOCTL_DIOCOSFPADD;
+extern unsigned IOCTL_DIOCOSFPGET;
+extern unsigned IOCTL_DIOCXBEGIN;
+extern unsigned IOCTL_DIOCXCOMMIT;
+extern unsigned IOCTL_DIOCXROLLBACK;
+extern unsigned IOCTL_DIOCGETSRCNODES;
+extern unsigned IOCTL_DIOCCLRSRCNODES;
+extern unsigned IOCTL_DIOCSETHOSTID;
+extern unsigned IOCTL_DIOCIGETIFACES;
+extern unsigned IOCTL_DIOCSETIFFLAG;
+extern unsigned IOCTL_DIOCCLRIFFLAG;
+extern unsigned IOCTL_DIOCKILLSRCNODES;
+extern unsigned IOCTL_SLIOCGUNIT;
+extern unsigned IOCTL_SIOCGBTINFO;
+extern unsigned IOCTL_SIOCGBTINFOA;
+extern unsigned IOCTL_SIOCNBTINFO;
+extern unsigned IOCTL_SIOCSBTFLAGS;
+extern unsigned IOCTL_SIOCSBTPOLICY;
+extern unsigned IOCTL_SIOCSBTPTYPE;
+extern unsigned IOCTL_SIOCGBTSTATS;
+extern unsigned IOCTL_SIOCZBTSTATS;
+extern unsigned IOCTL_SIOCBTDUMP;
+extern unsigned IOCTL_SIOCSBTSCOMTU;
+extern unsigned IOCTL_SIOCGBTFEAT;
+extern unsigned IOCTL_SIOCADNAT;
+extern unsigned IOCTL_SIOCRMNAT;
+extern unsigned IOCTL_SIOCGNATS;
+extern unsigned IOCTL_SIOCGNATL;
+extern unsigned IOCTL_SIOCPURGENAT;
+extern unsigned IOCTL_SIOCCONNECTX;
+extern unsigned IOCTL_SIOCCONNECTXDEL;
+extern unsigned IOCTL_SIOCSIFINFO_FLAGS;
+extern unsigned IOCTL_SIOCAADDRCTL_POLICY;
+extern unsigned IOCTL_SIOCDADDRCTL_POLICY;
+extern unsigned IOCTL_SMBIOC_OPENSESSION;
+extern unsigned IOCTL_SMBIOC_OPENSHARE;
+extern unsigned IOCTL_SMBIOC_REQUEST;
+extern unsigned IOCTL_SMBIOC_SETFLAGS;
+extern unsigned IOCTL_SMBIOC_LOOKUP;
+extern unsigned IOCTL_SMBIOC_READ;
+extern unsigned IOCTL_SMBIOC_WRITE;
+extern unsigned IOCTL_AGPIOC_INFO;
+extern unsigned IOCTL_AGPIOC_ACQUIRE;
+extern unsigned IOCTL_AGPIOC_RELEASE;
+extern unsigned IOCTL_AGPIOC_SETUP;
+extern unsigned IOCTL_AGPIOC_ALLOCATE;
+extern unsigned IOCTL_AGPIOC_DEALLOCATE;
+extern unsigned IOCTL_AGPIOC_BIND;
+extern unsigned IOCTL_AGPIOC_UNBIND;
+extern unsigned IOCTL_AUDIO_GETINFO;
+extern unsigned IOCTL_AUDIO_SETINFO;
+extern unsigned IOCTL_AUDIO_DRAIN;
+extern unsigned IOCTL_AUDIO_FLUSH;
+extern unsigned IOCTL_AUDIO_WSEEK;
+extern unsigned IOCTL_AUDIO_RERROR;
+extern unsigned IOCTL_AUDIO_GETDEV;
+extern unsigned IOCTL_AUDIO_GETENC;
+extern unsigned IOCTL_AUDIO_GETFD;
+extern unsigned IOCTL_AUDIO_SETFD;
+extern unsigned IOCTL_AUDIO_PERROR;
+extern unsigned IOCTL_AUDIO_GETIOFFS;
+extern unsigned IOCTL_AUDIO_GETOOFFS;
+extern unsigned IOCTL_AUDIO_GETPROPS;
+extern unsigned IOCTL_AUDIO_GETBUFINFO;
+extern unsigned IOCTL_AUDIO_SETCHAN;
+extern unsigned IOCTL_AUDIO_GETCHAN;
+extern unsigned IOCTL_AUDIO_QUERYFORMAT;
+extern unsigned IOCTL_AUDIO_GETFORMAT;
+extern unsigned IOCTL_AUDIO_SETFORMAT;
+extern unsigned IOCTL_AUDIO_MIXER_READ;
+extern unsigned IOCTL_AUDIO_MIXER_WRITE;
+extern unsigned IOCTL_AUDIO_MIXER_DEVINFO;
+extern unsigned IOCTL_ATAIOCCOMMAND;
+extern unsigned IOCTL_ATABUSIOSCAN;
+extern unsigned IOCTL_ATABUSIORESET;
+extern unsigned IOCTL_ATABUSIODETACH;
+extern unsigned IOCTL_CDIOCPLAYTRACKS;
+extern unsigned IOCTL_CDIOCPLAYBLOCKS;
+extern unsigned IOCTL_CDIOCREADSUBCHANNEL;
+extern unsigned IOCTL_CDIOREADTOCHEADER;
+extern unsigned IOCTL_CDIOREADTOCENTRIES;
+extern unsigned IOCTL_CDIOREADMSADDR;
+extern unsigned IOCTL_CDIOCSETPATCH;
+extern unsigned IOCTL_CDIOCGETVOL;
+extern unsigned IOCTL_CDIOCSETVOL;
+extern unsigned IOCTL_CDIOCSETMONO;
+extern unsigned IOCTL_CDIOCSETSTEREO;
+extern unsigned IOCTL_CDIOCSETMUTE;
+extern unsigned IOCTL_CDIOCSETLEFT;
+extern unsigned IOCTL_CDIOCSETRIGHT;
+extern unsigned IOCTL_CDIOCSETDEBUG;
+extern unsigned IOCTL_CDIOCCLRDEBUG;
+extern unsigned IOCTL_CDIOCPAUSE;
+extern unsigned IOCTL_CDIOCRESUME;
+extern unsigned IOCTL_CDIOCRESET;
+extern unsigned IOCTL_CDIOCSTART;
+extern unsigned IOCTL_CDIOCSTOP;
+extern unsigned IOCTL_CDIOCEJECT;
+extern unsigned IOCTL_CDIOCALLOW;
+extern unsigned IOCTL_CDIOCPREVENT;
+extern unsigned IOCTL_CDIOCCLOSE;
+extern unsigned IOCTL_CDIOCPLAYMSF;
+extern unsigned IOCTL_CDIOCLOADUNLOAD;
+extern unsigned IOCTL_CHIOMOVE;
+extern unsigned IOCTL_CHIOEXCHANGE;
+extern unsigned IOCTL_CHIOPOSITION;
+extern unsigned IOCTL_CHIOGPICKER;
+extern unsigned IOCTL_CHIOSPICKER;
+extern unsigned IOCTL_CHIOGPARAMS;
+extern unsigned IOCTL_CHIOIELEM;
+extern unsigned IOCTL_OCHIOGSTATUS;
+extern unsigned IOCTL_CHIOGSTATUS;
+extern unsigned IOCTL_CHIOSVOLTAG;
+extern unsigned IOCTL_CLOCKCTL_SETTIMEOFDAY;
+extern unsigned IOCTL_CLOCKCTL_ADJTIME;
+extern unsigned IOCTL_CLOCKCTL_CLOCK_SETTIME;
+extern unsigned IOCTL_CLOCKCTL_NTP_ADJTIME;
+extern unsigned IOCTL_IOC_CPU_SETSTATE;
+extern unsigned IOCTL_IOC_CPU_GETSTATE;
+extern unsigned IOCTL_IOC_CPU_GETCOUNT;
+extern unsigned IOCTL_IOC_CPU_MAPID;
+extern unsigned IOCTL_IOC_CPU_UCODE_GET_VERSION;
+extern unsigned IOCTL_IOC_CPU_UCODE_APPLY;
+extern unsigned IOCTL_DIOCGDINFO;
+extern unsigned IOCTL_DIOCSDINFO;
+extern unsigned IOCTL_DIOCWDINFO;
+extern unsigned IOCTL_DIOCRFORMAT;
+extern unsigned IOCTL_DIOCWFORMAT;
+extern unsigned IOCTL_DIOCSSTEP;
+extern unsigned IOCTL_DIOCSRETRIES;
+extern unsigned IOCTL_DIOCKLABEL;
+extern unsigned IOCTL_DIOCWLABEL;
+extern unsigned IOCTL_DIOCSBAD;
+extern unsigned IOCTL_DIOCEJECT;
+extern unsigned IOCTL_ODIOCEJECT;
+extern unsigned IOCTL_DIOCLOCK;
+extern unsigned IOCTL_DIOCGDEFLABEL;
+extern unsigned IOCTL_DIOCCLRLABEL;
+extern unsigned IOCTL_DIOCGCACHE;
+extern unsigned IOCTL_DIOCSCACHE;
+extern unsigned IOCTL_DIOCCACHESYNC;
+extern unsigned IOCTL_DIOCBSLIST;
+extern unsigned IOCTL_DIOCBSFLUSH;
+extern unsigned IOCTL_DIOCAWEDGE;
+extern unsigned IOCTL_DIOCGWEDGEINFO;
+extern unsigned IOCTL_DIOCDWEDGE;
+extern unsigned IOCTL_DIOCLWEDGES;
+extern unsigned IOCTL_DIOCGSTRATEGY;
+extern unsigned IOCTL_DIOCSSTRATEGY;
+extern unsigned IOCTL_DIOCGDISKINFO;
+extern unsigned IOCTL_DIOCTUR;
+extern unsigned IOCTL_DIOCMWEDGES;
+extern unsigned IOCTL_DIOCGSECTORSIZE;
+extern unsigned IOCTL_DIOCGMEDIASIZE;
+extern unsigned IOCTL_DIOCRMWEDGES;
+extern unsigned IOCTL_DRVDETACHDEV;
+extern unsigned IOCTL_DRVRESCANBUS;
+extern unsigned IOCTL_DRVCTLCOMMAND;
+extern unsigned IOCTL_DRVRESUMEDEV;
+extern unsigned IOCTL_DRVLISTDEV;
+extern unsigned IOCTL_DRVGETEVENT;
+extern unsigned IOCTL_DRVSUSPENDDEV;
+extern unsigned IOCTL_DVD_READ_STRUCT;
+extern unsigned IOCTL_DVD_WRITE_STRUCT;
+extern unsigned IOCTL_DVD_AUTH;
+extern unsigned IOCTL_ENVSYS_GETDICTIONARY;
+extern unsigned IOCTL_ENVSYS_SETDICTIONARY;
+extern unsigned IOCTL_ENVSYS_REMOVEPROPS;
+extern unsigned IOCTL_ENVSYS_GTREDATA;
+extern unsigned IOCTL_ENVSYS_GTREINFO;
+extern unsigned IOCTL_KFILTER_BYFILTER;
+extern unsigned IOCTL_KFILTER_BYNAME;
+extern unsigned IOCTL_FDIOCGETOPTS;
+extern unsigned IOCTL_FDIOCSETOPTS;
+extern unsigned IOCTL_FDIOCSETFORMAT;
+extern unsigned IOCTL_FDIOCGETFORMAT;
+extern unsigned IOCTL_FDIOCFORMAT_TRACK;
+extern unsigned IOCTL_FIOCLEX;
+extern unsigned IOCTL_FIONCLEX;
+extern unsigned IOCTL_FIOSEEKDATA;
+extern unsigned IOCTL_FIOSEEKHOLE;
+extern unsigned IOCTL_FIONREAD;
+extern unsigned IOCTL_FIONBIO;
+extern unsigned IOCTL_FIOASYNC;
+extern unsigned IOCTL_FIOSETOWN;
+extern unsigned IOCTL_FIOGETOWN;
+extern unsigned IOCTL_OFIOGETBMAP;
+extern unsigned IOCTL_FIOGETBMAP;
+extern unsigned IOCTL_FIONWRITE;
+extern unsigned IOCTL_FIONSPACE;
+extern unsigned IOCTL_GPIOINFO;
+extern unsigned IOCTL_GPIOSET;
+extern unsigned IOCTL_GPIOUNSET;
+extern unsigned IOCTL_GPIOREAD;
+extern unsigned IOCTL_GPIOWRITE;
+extern unsigned IOCTL_GPIOTOGGLE;
+extern unsigned IOCTL_GPIOATTACH;
+extern unsigned IOCTL_PTIOCNETBSD;
+extern unsigned IOCTL_PTIOCSUNOS;
+extern unsigned IOCTL_PTIOCLINUX;
+extern unsigned IOCTL_PTIOCFREEBSD;
+extern unsigned IOCTL_PTIOCULTRIX;
+extern unsigned IOCTL_TIOCHPCL;
+extern unsigned IOCTL_TIOCGETP;
+extern unsigned IOCTL_TIOCSETP;
+extern unsigned IOCTL_TIOCSETN;
+extern unsigned IOCTL_TIOCSETC;
+extern unsigned IOCTL_TIOCGETC;
+extern unsigned IOCTL_TIOCLBIS;
+extern unsigned IOCTL_TIOCLBIC;
+extern unsigned IOCTL_TIOCLSET;
+extern unsigned IOCTL_TIOCLGET;
+extern unsigned IOCTL_TIOCSLTC;
+extern unsigned IOCTL_TIOCGLTC;
+extern unsigned IOCTL_OTIOCCONS;
+extern unsigned IOCTL_JOY_SETTIMEOUT;
+extern unsigned IOCTL_JOY_GETTIMEOUT;
+extern unsigned IOCTL_JOY_SET_X_OFFSET;
+extern unsigned IOCTL_JOY_SET_Y_OFFSET;
+extern unsigned IOCTL_JOY_GET_X_OFFSET;
+extern unsigned IOCTL_JOY_GET_Y_OFFSET;
+extern unsigned IOCTL_OKIOCGSYMBOL;
+extern unsigned IOCTL_OKIOCGVALUE;
+extern unsigned IOCTL_KIOCGSIZE;
+extern unsigned IOCTL_KIOCGVALUE;
+extern unsigned IOCTL_KIOCGSYMBOL;
+extern unsigned IOCTL_LUAINFO;
+extern unsigned IOCTL_LUACREATE;
+extern unsigned IOCTL_LUADESTROY;
+extern unsigned IOCTL_LUAREQUIRE;
+extern unsigned IOCTL_LUALOAD;
+extern unsigned IOCTL_MIDI_PRETIME;
+extern unsigned IOCTL_MIDI_MPUMODE;
+extern unsigned IOCTL_MIDI_MPUCMD;
+extern unsigned IOCTL_SEQUENCER_RESET;
+extern unsigned IOCTL_SEQUENCER_SYNC;
+extern unsigned IOCTL_SEQUENCER_INFO;
+extern unsigned IOCTL_SEQUENCER_CTRLRATE;
+extern unsigned IOCTL_SEQUENCER_GETOUTCOUNT;
+extern unsigned IOCTL_SEQUENCER_GETINCOUNT;
+extern unsigned IOCTL_SEQUENCER_RESETSAMPLES;
+extern unsigned IOCTL_SEQUENCER_NRSYNTHS;
+extern unsigned IOCTL_SEQUENCER_NRMIDIS;
+extern unsigned IOCTL_SEQUENCER_THRESHOLD;
+extern unsigned IOCTL_SEQUENCER_MEMAVL;
+extern unsigned IOCTL_SEQUENCER_PANIC;
+extern unsigned IOCTL_SEQUENCER_OUTOFBAND;
+extern unsigned IOCTL_SEQUENCER_GETTIME;
+extern unsigned IOCTL_SEQUENCER_TMR_TIMEBASE;
+extern unsigned IOCTL_SEQUENCER_TMR_START;
+extern unsigned IOCTL_SEQUENCER_TMR_STOP;
+extern unsigned IOCTL_SEQUENCER_TMR_CONTINUE;
+extern unsigned IOCTL_SEQUENCER_TMR_TEMPO;
+extern unsigned IOCTL_SEQUENCER_TMR_SOURCE;
+extern unsigned IOCTL_SEQUENCER_TMR_METRONOME;
+extern unsigned IOCTL_SEQUENCER_TMR_SELECT;
+extern unsigned IOCTL_SPI_IOCTL_CONFIGURE;
+extern unsigned IOCTL_SPI_IOCTL_TRANSFER;
+extern unsigned IOCTL_MTIOCTOP;
+extern unsigned IOCTL_MTIOCGET;
+extern unsigned IOCTL_MTIOCIEOT;
+extern unsigned IOCTL_MTIOCEEOT;
+extern unsigned IOCTL_MTIOCRDSPOS;
+extern unsigned IOCTL_MTIOCRDHPOS;
+extern unsigned IOCTL_MTIOCSLOCATE;
+extern unsigned IOCTL_MTIOCHLOCATE;
+extern unsigned IOCTL_POWER_EVENT_RECVDICT;
+extern unsigned IOCTL_POWER_IOC_GET_TYPE;
+extern unsigned IOCTL_RIOCGINFO;
+extern unsigned IOCTL_RIOCSINFO;
+extern unsigned IOCTL_RIOCSSRCH;
+extern unsigned IOCTL_RNDGETENTCNT;
+extern unsigned IOCTL_RNDGETSRCNUM;
+extern unsigned IOCTL_RNDGETSRCNAME;
+extern unsigned IOCTL_RNDCTL;
+extern unsigned IOCTL_RNDADDDATA;
+extern unsigned IOCTL_RNDGETPOOLSTAT;
+extern unsigned IOCTL_RNDGETESTNUM;
+extern unsigned IOCTL_RNDGETESTNAME;
+extern unsigned IOCTL_SCIOCGET;
+extern unsigned IOCTL_SCIOCSET;
+extern unsigned IOCTL_SCIOCRESTART;
+extern unsigned IOCTL_SCIOC_USE_ADF;
+extern unsigned IOCTL_SCIOCCOMMAND;
+extern unsigned IOCTL_SCIOCDEBUG;
+extern unsigned IOCTL_SCIOCIDENTIFY;
+extern unsigned IOCTL_OSCIOCIDENTIFY;
+extern unsigned IOCTL_SCIOCDECONFIG;
+extern unsigned IOCTL_SCIOCRECONFIG;
+extern unsigned IOCTL_SCIOCRESET;
+extern unsigned IOCTL_SCBUSIOSCAN;
+extern unsigned IOCTL_SCBUSIORESET;
+extern unsigned IOCTL_SCBUSIODETACH;
+extern unsigned IOCTL_SCBUSACCEL;
+extern unsigned IOCTL_SCBUSIOLLSCAN;
+extern unsigned IOCTL_SIOCSHIWAT;
+extern unsigned IOCTL_SIOCGHIWAT;
+extern unsigned IOCTL_SIOCSLOWAT;
+extern unsigned IOCTL_SIOCGLOWAT;
+extern unsigned IOCTL_SIOCATMARK;
+extern unsigned IOCTL_SIOCSPGRP;
+extern unsigned IOCTL_SIOCGPGRP;
+extern unsigned IOCTL_SIOCPEELOFF;
+extern unsigned IOCTL_SIOCADDRT;
+extern unsigned IOCTL_SIOCDELRT;
+extern unsigned IOCTL_SIOCSIFADDR;
+extern unsigned IOCTL_SIOCGIFADDR;
+extern unsigned IOCTL_SIOCSIFDSTADDR;
+extern unsigned IOCTL_SIOCGIFDSTADDR;
+extern unsigned IOCTL_SIOCSIFFLAGS;
+extern unsigned IOCTL_SIOCGIFFLAGS;
+extern unsigned IOCTL_SIOCGIFBRDADDR;
+extern unsigned IOCTL_SIOCSIFBRDADDR;
+extern unsigned IOCTL_SIOCGIFCONF;
+extern unsigned IOCTL_SIOCGIFNETMASK;
+extern unsigned IOCTL_SIOCSIFNETMASK;
+extern unsigned IOCTL_SIOCGIFMETRIC;
+extern unsigned IOCTL_SIOCSIFMETRIC;
+extern unsigned IOCTL_SIOCDIFADDR;
+extern unsigned IOCTL_SIOCAIFADDR;
+extern unsigned IOCTL_SIOCGIFALIAS;
+extern unsigned IOCTL_SIOCGIFAFLAG_IN;
+extern unsigned IOCTL_SIOCALIFADDR;
+extern unsigned IOCTL_SIOCGLIFADDR;
+extern unsigned IOCTL_SIOCDLIFADDR;
+extern unsigned IOCTL_SIOCSIFADDRPREF;
+extern unsigned IOCTL_SIOCGIFADDRPREF;
+extern unsigned IOCTL_SIOCADDMULTI;
+extern unsigned IOCTL_SIOCDELMULTI;
+extern unsigned IOCTL_SIOCGETVIFCNT;
+extern unsigned IOCTL_SIOCGETSGCNT;
+extern unsigned IOCTL_SIOCSIFMEDIA;
+extern unsigned IOCTL_SIOCGIFMEDIA;
+extern unsigned IOCTL_SIOCSIFGENERIC;
+extern unsigned IOCTL_SIOCGIFGENERIC;
+extern unsigned IOCTL_SIOCSIFPHYADDR;
+extern unsigned IOCTL_SIOCGIFPSRCADDR;
+extern unsigned IOCTL_SIOCGIFPDSTADDR;
+extern unsigned IOCTL_SIOCDIFPHYADDR;
+extern unsigned IOCTL_SIOCSLIFPHYADDR;
+extern unsigned IOCTL_SIOCGLIFPHYADDR;
+extern unsigned IOCTL_SIOCSIFMTU;
+extern unsigned IOCTL_SIOCGIFMTU;
+extern unsigned IOCTL_SIOCSDRVSPEC;
+extern unsigned IOCTL_SIOCGDRVSPEC;
+extern unsigned IOCTL_SIOCIFCREATE;
+extern unsigned IOCTL_SIOCIFDESTROY;
+extern unsigned IOCTL_SIOCIFGCLONERS;
+extern unsigned IOCTL_SIOCGIFDLT;
+extern unsigned IOCTL_SIOCGIFCAP;
+extern unsigned IOCTL_SIOCSIFCAP;
+extern unsigned IOCTL_SIOCSVH;
+extern unsigned IOCTL_SIOCGVH;
+extern unsigned IOCTL_SIOCINITIFADDR;
+extern unsigned IOCTL_SIOCGIFDATA;
+extern unsigned IOCTL_SIOCZIFDATA;
+extern unsigned IOCTL_SIOCGLINKSTR;
+extern unsigned IOCTL_SIOCSLINKSTR;
+extern unsigned IOCTL_SIOCGETHERCAP;
+extern unsigned IOCTL_SIOCGIFINDEX;
+extern unsigned IOCTL_SIOCSETHERCAP;
+extern unsigned IOCTL_SIOCSIFDESCR;
+extern unsigned IOCTL_SIOCGIFDESCR;
+extern unsigned IOCTL_SIOCGUMBINFO;
+extern unsigned IOCTL_SIOCSUMBPARAM;
+extern unsigned IOCTL_SIOCGUMBPARAM;
+extern unsigned IOCTL_SIOCSETPFSYNC;
+extern unsigned IOCTL_SIOCGETPFSYNC;
+extern unsigned IOCTL_PPS_IOC_CREATE;
+extern unsigned IOCTL_PPS_IOC_DESTROY;
+extern unsigned IOCTL_PPS_IOC_SETPARAMS;
+extern unsigned IOCTL_PPS_IOC_GETPARAMS;
+extern unsigned IOCTL_PPS_IOC_GETCAP;
+extern unsigned IOCTL_PPS_IOC_FETCH;
+extern unsigned IOCTL_PPS_IOC_KCBIND;
+extern unsigned IOCTL_TIOCEXCL;
+extern unsigned IOCTL_TIOCNXCL;
+extern unsigned IOCTL_TIOCFLUSH;
+extern unsigned IOCTL_TIOCGETA;
+extern unsigned IOCTL_TIOCSETA;
+extern unsigned IOCTL_TIOCSETAW;
+extern unsigned IOCTL_TIOCSETAF;
+extern unsigned IOCTL_TIOCGETD;
+extern unsigned IOCTL_TIOCSETD;
+extern unsigned IOCTL_TIOCGLINED;
+extern unsigned IOCTL_TIOCSLINED;
+extern unsigned IOCTL_TIOCSBRK;
+extern unsigned IOCTL_TIOCCBRK;
+extern unsigned IOCTL_TIOCSDTR;
+extern unsigned IOCTL_TIOCCDTR;
+extern unsigned IOCTL_TIOCGPGRP;
+extern unsigned IOCTL_TIOCSPGRP;
+extern unsigned IOCTL_TIOCOUTQ;
+extern unsigned IOCTL_TIOCSTI;
+extern unsigned IOCTL_TIOCNOTTY;
+extern unsigned IOCTL_TIOCPKT;
+extern unsigned IOCTL_TIOCSTOP;
+extern unsigned IOCTL_TIOCSTART;
+extern unsigned IOCTL_TIOCMSET;
+extern unsigned IOCTL_TIOCMBIS;
+extern unsigned IOCTL_TIOCMBIC;
+extern unsigned IOCTL_TIOCMGET;
+extern unsigned IOCTL_TIOCREMOTE;
+extern unsigned IOCTL_TIOCGWINSZ;
+extern unsigned IOCTL_TIOCSWINSZ;
+extern unsigned IOCTL_TIOCUCNTL;
+extern unsigned IOCTL_TIOCSTAT;
+extern unsigned IOCTL_TIOCGSID;
+extern unsigned IOCTL_TIOCCONS;
+extern unsigned IOCTL_TIOCSCTTY;
+extern unsigned IOCTL_TIOCEXT;
+extern unsigned IOCTL_TIOCSIG;
+extern unsigned IOCTL_TIOCDRAIN;
+extern unsigned IOCTL_TIOCGFLAGS;
+extern unsigned IOCTL_TIOCSFLAGS;
+extern unsigned IOCTL_TIOCDCDTIMESTAMP;
+extern unsigned IOCTL_TIOCRCVFRAME;
+extern unsigned IOCTL_TIOCXMTFRAME;
+extern unsigned IOCTL_TIOCPTMGET;
+extern unsigned IOCTL_TIOCGRANTPT;
+extern unsigned IOCTL_TIOCPTSNAME;
+extern unsigned IOCTL_TIOCSQSIZE;
+extern unsigned IOCTL_TIOCGQSIZE;
+extern unsigned IOCTL_VERIEXEC_LOAD;
+extern unsigned IOCTL_VERIEXEC_TABLESIZE;
+extern unsigned IOCTL_VERIEXEC_DELETE;
+extern unsigned IOCTL_VERIEXEC_QUERY;
+extern unsigned IOCTL_VERIEXEC_DUMP;
+extern unsigned IOCTL_VERIEXEC_FLUSH;
+extern unsigned IOCTL_VIDIOC_QUERYCAP;
+extern unsigned IOCTL_VIDIOC_RESERVED;
+extern unsigned IOCTL_VIDIOC_ENUM_FMT;
+extern unsigned IOCTL_VIDIOC_G_FMT;
+extern unsigned IOCTL_VIDIOC_S_FMT;
+extern unsigned IOCTL_VIDIOC_REQBUFS;
+extern unsigned IOCTL_VIDIOC_QUERYBUF;
+extern unsigned IOCTL_VIDIOC_G_FBUF;
+extern unsigned IOCTL_VIDIOC_S_FBUF;
+extern unsigned IOCTL_VIDIOC_OVERLAY;
+extern unsigned IOCTL_VIDIOC_QBUF;
+extern unsigned IOCTL_VIDIOC_DQBUF;
+extern unsigned IOCTL_VIDIOC_STREAMON;
+extern unsigned IOCTL_VIDIOC_STREAMOFF;
+extern unsigned IOCTL_VIDIOC_G_PARM;
+extern unsigned IOCTL_VIDIOC_S_PARM;
+extern unsigned IOCTL_VIDIOC_G_STD;
+extern unsigned IOCTL_VIDIOC_S_STD;
+extern unsigned IOCTL_VIDIOC_ENUMSTD;
+extern unsigned IOCTL_VIDIOC_ENUMINPUT;
+extern unsigned IOCTL_VIDIOC_G_CTRL;
+extern unsigned IOCTL_VIDIOC_S_CTRL;
+extern unsigned IOCTL_VIDIOC_G_TUNER;
+extern unsigned IOCTL_VIDIOC_S_TUNER;
+extern unsigned IOCTL_VIDIOC_G_AUDIO;
+extern unsigned IOCTL_VIDIOC_S_AUDIO;
+extern unsigned IOCTL_VIDIOC_QUERYCTRL;
+extern unsigned IOCTL_VIDIOC_QUERYMENU;
+extern unsigned IOCTL_VIDIOC_G_INPUT;
+extern unsigned IOCTL_VIDIOC_S_INPUT;
+extern unsigned IOCTL_VIDIOC_G_OUTPUT;
+extern unsigned IOCTL_VIDIOC_S_OUTPUT;
+extern unsigned IOCTL_VIDIOC_ENUMOUTPUT;
+extern unsigned IOCTL_VIDIOC_G_AUDOUT;
+extern unsigned IOCTL_VIDIOC_S_AUDOUT;
+extern unsigned IOCTL_VIDIOC_G_MODULATOR;
+extern unsigned IOCTL_VIDIOC_S_MODULATOR;
+extern unsigned IOCTL_VIDIOC_G_FREQUENCY;
+extern unsigned IOCTL_VIDIOC_S_FREQUENCY;
+extern unsigned IOCTL_VIDIOC_CROPCAP;
+extern unsigned IOCTL_VIDIOC_G_CROP;
+extern unsigned IOCTL_VIDIOC_S_CROP;
+extern unsigned IOCTL_VIDIOC_G_JPEGCOMP;
+extern unsigned IOCTL_VIDIOC_S_JPEGCOMP;
+extern unsigned IOCTL_VIDIOC_QUERYSTD;
+extern unsigned IOCTL_VIDIOC_TRY_FMT;
+extern unsigned IOCTL_VIDIOC_ENUMAUDIO;
+extern unsigned IOCTL_VIDIOC_ENUMAUDOUT;
+extern unsigned IOCTL_VIDIOC_G_PRIORITY;
+extern unsigned IOCTL_VIDIOC_S_PRIORITY;
+extern unsigned IOCTL_VIDIOC_ENUM_FRAMESIZES;
+extern unsigned IOCTL_VIDIOC_ENUM_FRAMEINTERVALS;
+extern unsigned IOCTL_WDOGIOC_GMODE;
+extern unsigned IOCTL_WDOGIOC_SMODE;
+extern unsigned IOCTL_WDOGIOC_WHICH;
+extern unsigned IOCTL_WDOGIOC_TICKLE;
+extern unsigned IOCTL_WDOGIOC_GTICKLER;
+extern unsigned IOCTL_WDOGIOC_GWDOGS;
+extern unsigned IOCTL_KCOV_IOC_SETBUFSIZE;
+extern unsigned IOCTL_KCOV_IOC_ENABLE;
+extern unsigned IOCTL_KCOV_IOC_DISABLE;
+extern unsigned IOCTL_IPMICTL_RECEIVE_MSG_TRUNC;
+extern unsigned IOCTL_IPMICTL_RECEIVE_MSG;
+extern unsigned IOCTL_IPMICTL_SEND_COMMAND;
+extern unsigned IOCTL_IPMICTL_REGISTER_FOR_CMD;
+extern unsigned IOCTL_IPMICTL_UNREGISTER_FOR_CMD;
+extern unsigned IOCTL_IPMICTL_SET_GETS_EVENTS_CMD;
+extern unsigned IOCTL_IPMICTL_SET_MY_ADDRESS_CMD;
+extern unsigned IOCTL_IPMICTL_GET_MY_ADDRESS_CMD;
+extern unsigned IOCTL_IPMICTL_SET_MY_LUN_CMD;
+extern unsigned IOCTL_IPMICTL_GET_MY_LUN_CMD;
+extern unsigned IOCTL_SNDCTL_DSP_RESET;
+extern unsigned IOCTL_SNDCTL_DSP_SYNC;
+extern unsigned IOCTL_SNDCTL_DSP_SPEED;
+extern unsigned IOCTL_SOUND_PCM_READ_RATE;
+extern unsigned IOCTL_SNDCTL_DSP_STEREO;
+extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
+extern unsigned IOCTL_SNDCTL_DSP_SETFMT;
+extern unsigned IOCTL_SOUND_PCM_READ_BITS;
+extern unsigned IOCTL_SNDCTL_DSP_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;
+extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
+extern unsigned IOCTL_SOUND_PCM_READ_FILTER;
+extern unsigned IOCTL_SNDCTL_DSP_POST;
+extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;
+extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;
+extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
+extern unsigned IOCTL_SNDCTL_DSP_GETOSPACE;
+extern unsigned IOCTL_SNDCTL_DSP_GETISPACE;
+extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
+extern unsigned IOCTL_SNDCTL_DSP_GETCAPS;
+extern unsigned IOCTL_SNDCTL_DSP_GETTRIGGER;
+extern unsigned IOCTL_SNDCTL_DSP_SETTRIGGER;
+extern unsigned IOCTL_SNDCTL_DSP_GETIPTR;
+extern unsigned IOCTL_SNDCTL_DSP_GETOPTR;
+extern unsigned IOCTL_SNDCTL_DSP_MAPINBUF;
+extern unsigned IOCTL_SNDCTL_DSP_MAPOUTBUF;
+extern unsigned IOCTL_SNDCTL_DSP_SETSYNCRO;
+extern unsigned IOCTL_SNDCTL_DSP_SETDUPLEX;
+extern unsigned IOCTL_SNDCTL_DSP_PROFILE;
+extern unsigned IOCTL_SNDCTL_DSP_GETODELAY;
+extern unsigned IOCTL_SOUND_MIXER_INFO;
+extern unsigned IOCTL_SOUND_OLD_MIXER_INFO;
+extern unsigned IOCTL_OSS_GETVERSION;
+extern unsigned IOCTL_SNDCTL_SYSINFO;
+extern unsigned IOCTL_SNDCTL_AUDIOINFO;
+extern unsigned IOCTL_SNDCTL_ENGINEINFO;
+extern unsigned IOCTL_SNDCTL_DSP_GETPLAYVOL;
+extern unsigned IOCTL_SNDCTL_DSP_SETPLAYVOL;
+extern unsigned IOCTL_SNDCTL_DSP_GETRECVOL;
+extern unsigned IOCTL_SNDCTL_DSP_SETRECVOL;
+extern unsigned IOCTL_SNDCTL_DSP_SKIP;
+extern unsigned IOCTL_SNDCTL_DSP_SILENCE;
+
+extern const int si_SEGV_MAPERR;
+extern const int si_SEGV_ACCERR;
+
+extern const unsigned SHA1_CTX_sz;
+extern const unsigned SHA1_return_length;
+
+extern const unsigned MD4_CTX_sz;
+extern const unsigned MD4_return_length;
+
+extern const unsigned RMD160_CTX_sz;
+extern const unsigned RMD160_return_length;
+
+extern const unsigned MD5_CTX_sz;
+extern const unsigned MD5_return_length;
+
+extern const unsigned fpos_t_sz;
+
+extern const unsigned MD2_CTX_sz;
+extern const unsigned MD2_return_length;
+
+#define SHA2_EXTERN(LEN) \
+ extern const unsigned SHA##LEN##_CTX_sz; \
+ extern const unsigned SHA##LEN##_return_length; \
+ extern const unsigned SHA##LEN##_block_length; \
+ extern const unsigned SHA##LEN##_digest_length
+
+SHA2_EXTERN(224);
+SHA2_EXTERN(256);
+SHA2_EXTERN(384);
+SHA2_EXTERN(512);
+
+#undef SHA2_EXTERN
+
+extern const int unvis_valid;
+extern const int unvis_validpush;
+
+struct __sanitizer_cdbr {
+ void (*unmap)(void *, void *, uptr);
+ void *cookie;
+ u8 *mmap_base;
+ uptr mmap_size;
+
+ u8 *hash_base;
+ u8 *offset_base;
+ u8 *data_base;
+
+ u32 data_size;
+ u32 entries;
+ u32 entries_index;
+ u32 seed;
+
+ u8 offset_size;
+ u8 index_size;
+
+ u32 entries_m;
+ u32 entries_index_m;
+ u8 entries_s1, entries_s2;
+ u8 entries_index_s1, entries_index_s2;
+};
+
+struct __sanitizer_cdbw {
+ uptr data_counter;
+ uptr data_allocated;
+ uptr data_size;
+ uptr *data_len;
+ void **data_ptr;
+ uptr hash_size;
+ void *hash;
+ uptr key_counter;
+};
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#define SIGACTION_SYMNAME __sigaction14
+
+#endif // SANITIZER_NETBSD
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_netbsd.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.cc (revision 351984)
@@ -0,0 +1,58 @@
+//===-- sancov_flags.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage runtime flags.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sancov_flags.h"
+#include "sanitizer_flag_parser.h"
+#include "sanitizer_platform.h"
+
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __sancov_default_options, void) {
+ return "";
+}
+
+using namespace __sanitizer;
+
+namespace __sancov {
+
+SancovFlags sancov_flags_dont_use_directly; // use via flags();
+
+void SancovFlags::SetDefaults() {
+#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "sancov_flags.inc"
+#undef SANCOV_FLAG
+}
+
+static void RegisterSancovFlags(FlagParser *parser, SancovFlags *f) {
+#define SANCOV_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "sancov_flags.inc"
+#undef SANCOV_FLAG
+}
+
+static const char *MaybeCallSancovDefaultOptions() {
+ return (&__sancov_default_options) ? __sancov_default_options() : "";
+}
+
+void InitializeSancovFlags() {
+ SancovFlags *f = sancov_flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterSancovFlags(&parser, f);
+
+ parser.ParseString(MaybeCallSancovDefaultOptions());
+ parser.ParseStringFromEnv("SANCOV_OPTIONS");
+
+ ReportUnrecognizedFlags();
+ if (f->help) parser.PrintFlagDescriptions();
+}
+
+} // namespace __sancov
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.h (revision 351984)
@@ -0,0 +1,39 @@
+//===-- sancov_flags.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANCOV_FLAGS_H
+#define SANCOV_FLAGS_H
+
+#include "sanitizer_flag_parser.h"
+#include "sanitizer_internal_defs.h"
+
+namespace __sancov {
+
+struct SancovFlags {
+#define SANCOV_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "sancov_flags.inc"
+#undef SANCOV_FLAG
+
+ void SetDefaults();
+};
+
+extern SancovFlags sancov_flags_dont_use_directly;
+
+inline SancovFlags* sancov_flags() { return &sancov_flags_dont_use_directly; }
+
+void InitializeSancovFlags();
+
+} // namespace __sancov
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char*
+__sancov_default_options();
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.inc (revision 351984)
@@ -0,0 +1,20 @@
+//===-- sancov_flags.inc ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANCOV_FLAG
+#error "Defnine SANCOV_FLAG prior to including this file!"
+#endif
+
+SANCOV_FLAG(bool, symbolize, true,
+ "If set, converage information will be symbolized by sancov tool "
+ "after dumping.")
+
+SANCOV_FLAG(bool, help, false, "Print flags help.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sancov_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_addrhashmap.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_addrhashmap.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_addrhashmap.h (revision 351984)
@@ -0,0 +1,353 @@
+//===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Concurrent uptr->T hashmap.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ADDRHASHMAP_H
+#define SANITIZER_ADDRHASHMAP_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_allocator_internal.h"
+
+namespace __sanitizer {
+
+// Concurrent uptr->T hashmap.
+// T must be a POD type, kSize is preferably a prime but can be any number.
+// Usage example:
+//
+// typedef AddrHashMap<uptr, 11> Map;
+// Map m;
+// {
+// Map::Handle h(&m, addr);
+// use h.operator->() to access the data
+// if h.created() then the element was just created, and the current thread
+// has exclusive access to it
+// otherwise the current thread has only read access to the data
+// }
+// {
+// Map::Handle h(&m, addr, true);
+// this will remove the data from the map in Handle dtor
+// the current thread has exclusive access to the data
+// if !h.exists() then the element never existed
+// }
+template<typename T, uptr kSize>
+class AddrHashMap {
+ private:
+ struct Cell {
+ atomic_uintptr_t addr;
+ T val;
+ };
+
+ struct AddBucket {
+ uptr cap;
+ uptr size;
+ Cell cells[1]; // variable len
+ };
+
+ static const uptr kBucketSize = 3;
+
+ struct Bucket {
+ RWMutex mtx;
+ atomic_uintptr_t add;
+ Cell cells[kBucketSize];
+ };
+
+ public:
+ AddrHashMap();
+
+ class Handle {
+ public:
+ Handle(AddrHashMap<T, kSize> *map, uptr addr);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove);
+ Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create);
+
+ ~Handle();
+ T *operator->();
+ T &operator*();
+ const T &operator*() const;
+ bool created() const;
+ bool exists() const;
+
+ private:
+ friend AddrHashMap<T, kSize>;
+ AddrHashMap<T, kSize> *map_;
+ Bucket *bucket_;
+ Cell *cell_;
+ uptr addr_;
+ uptr addidx_;
+ bool created_;
+ bool remove_;
+ bool create_;
+ };
+
+ private:
+ friend class Handle;
+ Bucket *table_;
+
+ void acquire(Handle *h);
+ void release(Handle *h);
+ uptr calcHash(uptr addr);
+};
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = false;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = true;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr,
+ bool remove, bool create) {
+ map_ = map;
+ addr_ = addr;
+ remove_ = remove;
+ create_ = create;
+ map_->acquire(this);
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::Handle::~Handle() {
+ map_->release(this);
+}
+
+template <typename T, uptr kSize>
+T *AddrHashMap<T, kSize>::Handle::operator->() {
+ return &cell_->val;
+}
+
+template <typename T, uptr kSize>
+const T &AddrHashMap<T, kSize>::Handle::operator*() const {
+ return cell_->val;
+}
+
+template <typename T, uptr kSize>
+T &AddrHashMap<T, kSize>::Handle::operator*() {
+ return cell_->val;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::created() const {
+ return created_;
+}
+
+template<typename T, uptr kSize>
+bool AddrHashMap<T, kSize>::Handle::exists() const {
+ return cell_ != nullptr;
+}
+
+template<typename T, uptr kSize>
+AddrHashMap<T, kSize>::AddrHashMap() {
+ table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::acquire(Handle *h) {
+ uptr addr = h->addr_;
+ uptr hash = calcHash(addr);
+ Bucket *b = &table_[hash];
+
+ h->created_ = false;
+ h->addidx_ = -1U;
+ h->bucket_ = b;
+ h->cell_ = nullptr;
+
+ // If we want to remove the element, we need exclusive access to the bucket,
+ // so skip the lock-free phase.
+ if (h->remove_)
+ goto locked;
+
+ retry:
+ // First try to find an existing element w/o read mutex.
+ CHECK(!h->remove_);
+ // Check the embed cells.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
+ if (addr1 == addr) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Check the add cells with read lock.
+ if (atomic_load(&b->add, memory_order_relaxed)) {
+ b->mtx.ReadLock();
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ }
+ b->mtx.ReadUnlock();
+ }
+
+ locked:
+ // Re-check existence under write lock.
+ // Embed cells.
+ b->mtx.Lock();
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+
+ // Add cells.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (add) {
+ for (uptr i = 0; i < add->size; i++) {
+ Cell *c = &add->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == addr) {
+ if (h->remove_) {
+ h->addidx_ = i;
+ h->cell_ = c;
+ return;
+ }
+ b->mtx.Unlock();
+ goto retry;
+ }
+ }
+ }
+
+ // The element does not exist, no need to create it if we want to remove.
+ if (h->remove_ || !h->create_) {
+ b->mtx.Unlock();
+ return;
+ }
+
+ // Now try to create it under the mutex.
+ h->created_ = true;
+ // See if we have a free embed cell.
+ for (uptr i = 0; i < kBucketSize; i++) {
+ Cell *c = &b->cells[i];
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (addr1 == 0) {
+ h->cell_ = c;
+ return;
+ }
+ }
+
+ // Store in the add cells.
+ if (!add) {
+ // Allocate a new add array.
+ const uptr kInitSize = 64;
+ add = (AddBucket*)InternalAlloc(kInitSize);
+ internal_memset(add, 0, kInitSize);
+ add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add->size = 0;
+ atomic_store(&b->add, (uptr)add, memory_order_relaxed);
+ }
+ if (add->size == add->cap) {
+ // Grow existing add array.
+ uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);
+ uptr newsize = oldsize * 2;
+ AddBucket *add1 = (AddBucket*)InternalAlloc(newsize);
+ internal_memset(add1, 0, newsize);
+ add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;
+ add1->size = add->size;
+ internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));
+ InternalFree(add);
+ atomic_store(&b->add, (uptr)add1, memory_order_relaxed);
+ add = add1;
+ }
+ // Store.
+ uptr i = add->size++;
+ Cell *c = &add->cells[i];
+ CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
+ h->addidx_ = i;
+ h->cell_ = c;
+}
+
+template<typename T, uptr kSize>
+void AddrHashMap<T, kSize>::release(Handle *h) {
+ if (!h->cell_)
+ return;
+ Bucket *b = h->bucket_;
+ Cell *c = h->cell_;
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+ if (h->created_) {
+ // Denote completion of insertion.
+ CHECK_EQ(addr1, 0);
+ // After the following store, the element becomes available
+ // for lock-free reads.
+ atomic_store(&c->addr, h->addr_, memory_order_release);
+ b->mtx.Unlock();
+ } else if (h->remove_) {
+ // Denote that the cell is empty now.
+ CHECK_EQ(addr1, h->addr_);
+ atomic_store(&c->addr, 0, memory_order_release);
+ // See if we need to compact the bucket.
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
+ if (h->addidx_ == -1U) {
+ // Removed from embed array, move an add element into the freed cell.
+ if (add && add->size != 0) {
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ c->val = c1->val;
+ uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
+ atomic_store(&c->addr, addr1, memory_order_release);
+ atomic_store(&c1->addr, 0, memory_order_release);
+ }
+ } else {
+ // Removed from add array, compact it.
+ uptr last = --add->size;
+ Cell *c1 = &add->cells[last];
+ if (c != c1) {
+ *c = *c1;
+ atomic_store(&c1->addr, 0, memory_order_relaxed);
+ }
+ }
+ if (add && add->size == 0) {
+ // FIXME(dvyukov): free add?
+ }
+ b->mtx.Unlock();
+ } else {
+ CHECK_EQ(addr1, h->addr_);
+ if (h->addidx_ != -1U)
+ b->mtx.ReadUnlock();
+ }
+}
+
+template<typename T, uptr kSize>
+uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {
+ addr += addr << 10;
+ addr ^= addr >> 6;
+ return addr % kSize;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ADDRHASHMAP_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_addrhashmap.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator.cc (revision 351984)
@@ -0,0 +1,267 @@
+//===-- sanitizer_allocator.cc --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// This allocator is used inside run-times.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator.h"
+
+#include "sanitizer_allocator_checks.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+// Default allocator names.
+const char *PrimaryAllocatorName = "SizeClassAllocator";
+const char *SecondaryAllocatorName = "LargeMmapAllocator";
+
+// ThreadSanitizer for Go uses libc malloc/free.
+#if SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
+# if SANITIZER_LINUX && !SANITIZER_ANDROID
+extern "C" void *__libc_malloc(uptr size);
+# if !SANITIZER_GO
+extern "C" void *__libc_memalign(uptr alignment, uptr size);
+# endif
+extern "C" void *__libc_realloc(void *ptr, uptr size);
+extern "C" void __libc_free(void *ptr);
+# else
+# include <stdlib.h>
+# define __libc_malloc malloc
+# if !SANITIZER_GO
+static void *__libc_memalign(uptr alignment, uptr size) {
+ void *p;
+ uptr error = posix_memalign(&p, alignment, size);
+ if (error) return nullptr;
+ return p;
+}
+# endif
+# define __libc_realloc realloc
+# define __libc_free free
+# endif
+
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
+ uptr alignment) {
+ (void)cache;
+#if !SANITIZER_GO
+ if (alignment == 0)
+ return __libc_malloc(size);
+ else
+ return __libc_memalign(alignment, size);
+#else
+ // Windows does not provide __libc_memalign/posix_memalign. It provides
+ // __aligned_malloc, but the allocated blocks can't be passed to free,
+ // they need to be passed to __aligned_free. InternalAlloc interface does
+ // not account for such requirement. Alignemnt does not seem to be used
+ // anywhere in runtime, so just call __libc_malloc for now.
+ DCHECK_EQ(alignment, 0);
+ return __libc_malloc(size);
+#endif
+}
+
+static void *RawInternalRealloc(void *ptr, uptr size,
+ InternalAllocatorCache *cache) {
+ (void)cache;
+ return __libc_realloc(ptr, size);
+}
+
+static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
+ (void)cache;
+ __libc_free(ptr);
+}
+
+InternalAllocator *internal_allocator() {
+ return 0;
+}
+
+#else // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
+
+static ALIGNED(64) char internal_alloc_placeholder[sizeof(InternalAllocator)];
+static atomic_uint8_t internal_allocator_initialized;
+static StaticSpinMutex internal_alloc_init_mu;
+
+static InternalAllocatorCache internal_allocator_cache;
+static StaticSpinMutex internal_allocator_cache_mu;
+
+InternalAllocator *internal_allocator() {
+ InternalAllocator *internal_allocator_instance =
+ reinterpret_cast<InternalAllocator *>(&internal_alloc_placeholder);
+ if (atomic_load(&internal_allocator_initialized, memory_order_acquire) == 0) {
+ SpinMutexLock l(&internal_alloc_init_mu);
+ if (atomic_load(&internal_allocator_initialized, memory_order_relaxed) ==
+ 0) {
+ internal_allocator_instance->Init(kReleaseToOSIntervalNever);
+ atomic_store(&internal_allocator_initialized, 1, memory_order_release);
+ }
+ }
+ return internal_allocator_instance;
+}
+
+static void *RawInternalAlloc(uptr size, InternalAllocatorCache *cache,
+ uptr alignment) {
+ if (alignment == 0) alignment = 8;
+ if (cache == 0) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Allocate(&internal_allocator_cache, size,
+ alignment);
+ }
+ return internal_allocator()->Allocate(cache, size, alignment);
+}
+
+static void *RawInternalRealloc(void *ptr, uptr size,
+ InternalAllocatorCache *cache) {
+ uptr alignment = 8;
+ if (cache == 0) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Reallocate(&internal_allocator_cache, ptr,
+ size, alignment);
+ }
+ return internal_allocator()->Reallocate(cache, ptr, size, alignment);
+}
+
+static void RawInternalFree(void *ptr, InternalAllocatorCache *cache) {
+ if (!cache) {
+ SpinMutexLock l(&internal_allocator_cache_mu);
+ return internal_allocator()->Deallocate(&internal_allocator_cache, ptr);
+ }
+ internal_allocator()->Deallocate(cache, ptr);
+}
+
+#endif // SANITIZER_GO || defined(SANITIZER_USE_MALLOC)
+
+const u64 kBlockMagic = 0x6A6CB03ABCEBC041ull;
+
+static void NORETURN ReportInternalAllocatorOutOfMemory(uptr requested_size) {
+ SetAllocatorOutOfMemory();
+ Report("FATAL: %s: internal allocator is out of memory trying to allocate "
+ "0x%zx bytes\n", SanitizerToolName, requested_size);
+ Die();
+}
+
+void *InternalAlloc(uptr size, InternalAllocatorCache *cache, uptr alignment) {
+ if (size + sizeof(u64) < size)
+ return nullptr;
+ void *p = RawInternalAlloc(size + sizeof(u64), cache, alignment);
+ if (UNLIKELY(!p))
+ ReportInternalAllocatorOutOfMemory(size + sizeof(u64));
+ ((u64*)p)[0] = kBlockMagic;
+ return (char*)p + sizeof(u64);
+}
+
+void *InternalRealloc(void *addr, uptr size, InternalAllocatorCache *cache) {
+ if (!addr)
+ return InternalAlloc(size, cache);
+ if (size + sizeof(u64) < size)
+ return nullptr;
+ addr = (char*)addr - sizeof(u64);
+ size = size + sizeof(u64);
+ CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
+ void *p = RawInternalRealloc(addr, size, cache);
+ if (UNLIKELY(!p))
+ ReportInternalAllocatorOutOfMemory(size);
+ return (char*)p + sizeof(u64);
+}
+
+void *InternalReallocArray(void *addr, uptr count, uptr size,
+ InternalAllocatorCache *cache) {
+ if (UNLIKELY(CheckForCallocOverflow(count, size))) {
+ Report(
+ "FATAL: %s: reallocarray parameters overflow: count * size (%zd * %zd) "
+ "cannot be represented in type size_t\n",
+ SanitizerToolName, count, size);
+ Die();
+ }
+ return InternalRealloc(addr, count * size, cache);
+}
+
+void *InternalCalloc(uptr count, uptr size, InternalAllocatorCache *cache) {
+ if (UNLIKELY(CheckForCallocOverflow(count, size))) {
+ Report("FATAL: %s: calloc parameters overflow: count * size (%zd * %zd) "
+ "cannot be represented in type size_t\n", SanitizerToolName, count,
+ size);
+ Die();
+ }
+ void *p = InternalAlloc(count * size, cache);
+ if (LIKELY(p))
+ internal_memset(p, 0, count * size);
+ return p;
+}
+
+void InternalFree(void *addr, InternalAllocatorCache *cache) {
+ if (!addr)
+ return;
+ addr = (char*)addr - sizeof(u64);
+ CHECK_EQ(kBlockMagic, ((u64*)addr)[0]);
+ ((u64*)addr)[0] = 0;
+ RawInternalFree(addr, cache);
+}
+
+// LowLevelAllocator
+constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
+static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;
+static LowLevelAllocateCallback low_level_alloc_callback;
+
+void *LowLevelAllocator::Allocate(uptr size) {
+ // Align allocation size.
+ size = RoundUpTo(size, low_level_alloc_min_alignment);
+ if (allocated_end_ - allocated_current_ < (sptr)size) {
+ uptr size_to_allocate = Max(size, GetPageSizeCached());
+ allocated_current_ =
+ (char*)MmapOrDie(size_to_allocate, __func__);
+ allocated_end_ = allocated_current_ + size_to_allocate;
+ if (low_level_alloc_callback) {
+ low_level_alloc_callback((uptr)allocated_current_,
+ size_to_allocate);
+ }
+ }
+ CHECK(allocated_end_ - allocated_current_ >= (sptr)size);
+ void *res = allocated_current_;
+ allocated_current_ += size;
+ return res;
+}
+
+void SetLowLevelAllocateMinAlignment(uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ low_level_alloc_min_alignment = Max(alignment, low_level_alloc_min_alignment);
+}
+
+void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback) {
+ low_level_alloc_callback = callback;
+}
+
+// Allocator's OOM and other errors handling support.
+
+static atomic_uint8_t allocator_out_of_memory = {0};
+static atomic_uint8_t allocator_may_return_null = {0};
+
+bool IsAllocatorOutOfMemory() {
+ return atomic_load_relaxed(&allocator_out_of_memory);
+}
+
+void SetAllocatorOutOfMemory() {
+ atomic_store_relaxed(&allocator_out_of_memory, 1);
+}
+
+bool AllocatorMayReturnNull() {
+ return atomic_load(&allocator_may_return_null, memory_order_relaxed);
+}
+
+void SetAllocatorMayReturnNull(bool may_return_null) {
+ atomic_store(&allocator_may_return_null, may_return_null,
+ memory_order_relaxed);
+}
+
+void PrintHintAllocatorCannotReturnNull() {
+ Report("HINT: if you don't care about these errors you may set "
+ "allocator_may_return_null=1\n");
+}
+
+} // namespace __sanitizer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator.h (revision 351984)
@@ -0,0 +1,81 @@
+//===-- sanitizer_allocator.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Specialized memory allocator for ThreadSanitizer, MemorySanitizer, etc.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_H
+#define SANITIZER_ALLOCATOR_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_lfstack.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_local_address_space_view.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_type_traits.h"
+
+namespace __sanitizer {
+
+// Allows the tools to name their allocations appropriately.
+extern const char *PrimaryAllocatorName;
+extern const char *SecondaryAllocatorName;
+
+// Since flags are immutable and allocator behavior can be changed at runtime
+// (unit tests or ASan on Android are some examples), allocator_may_return_null
+// flag value is cached here and can be altered later.
+bool AllocatorMayReturnNull();
+void SetAllocatorMayReturnNull(bool may_return_null);
+
+// Returns true if allocator detected OOM condition. Can be used to avoid memory
+// hungry operations.
+bool IsAllocatorOutOfMemory();
+// Should be called by a particular allocator when OOM is detected.
+void SetAllocatorOutOfMemory();
+
+void PrintHintAllocatorCannotReturnNull();
+
+// Allocators call these callbacks on mmap/munmap.
+struct NoOpMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const { }
+};
+
+// Callback type for iterating over chunks.
+typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
+
+INLINE u32 Rand(u32 *state) { // ANSI C linear congruential PRNG.
+ return (*state = *state * 1103515245 + 12345) >> 16;
+}
+
+INLINE u32 RandN(u32 *state, u32 n) { return Rand(state) % n; } // [0, n)
+
+template<typename T>
+INLINE void RandomShuffle(T *a, u32 n, u32 *rand_state) {
+ if (n <= 1) return;
+ u32 state = *rand_state;
+ for (u32 i = n - 1; i > 0; i--)
+ Swap(a[i], a[RandN(&state, i + 1)]);
+ *rand_state = state;
+}
+
+#include "sanitizer_allocator_size_class_map.h"
+#include "sanitizer_allocator_stats.h"
+#include "sanitizer_allocator_primary64.h"
+#include "sanitizer_allocator_bytemap.h"
+#include "sanitizer_allocator_primary32.h"
+#include "sanitizer_allocator_local_cache.h"
+#include "sanitizer_allocator_secondary.h"
+#include "sanitizer_allocator_combined.h"
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_bytemap.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_bytemap.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_bytemap.h (revision 351984)
@@ -0,0 +1,107 @@
+//===-- sanitizer_allocator_bytemap.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Maps integers in rage [0, kSize) to u8 values.
+template <u64 kSize, typename AddressSpaceViewTy = LocalAddressSpaceView>
+class FlatByteMap {
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void Init() {
+ internal_memset(map_, 0, sizeof(map_));
+ }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize);
+ CHECK_EQ(0U, map_[idx]);
+ map_[idx] = val;
+ }
+ u8 operator[] (uptr idx) {
+ CHECK_LT(idx, kSize);
+ // FIXME: CHECK may be too expensive here.
+ return map_[idx];
+ }
+ private:
+ u8 map_[kSize];
+};
+
+// TwoLevelByteMap maps integers in range [0, kSize1*kSize2) to u8 values.
+// It is implemented as a two-dimensional array: array of kSize1 pointers
+// to kSize2-byte arrays. The secondary arrays are mmaped on demand.
+// Each value is initially zero and can be set to something else only once.
+// Setting and getting values from multiple threads is safe w/o extra locking.
+template <u64 kSize1, u64 kSize2,
+ typename AddressSpaceViewTy = LocalAddressSpaceView,
+ class MapUnmapCallback = NoOpMapUnmapCallback>
+class TwoLevelByteMap {
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void Init() {
+ internal_memset(map1_, 0, sizeof(map1_));
+ mu_.Init();
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kSize1; i++) {
+ u8 *p = Get(i);
+ if (!p) continue;
+ MapUnmapCallback().OnUnmap(reinterpret_cast<uptr>(p), kSize2);
+ UnmapOrDie(p, kSize2);
+ }
+ }
+
+ uptr size() const { return kSize1 * kSize2; }
+ uptr size1() const { return kSize1; }
+ uptr size2() const { return kSize2; }
+
+ void set(uptr idx, u8 val) {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = GetOrCreate(idx / kSize2);
+ CHECK_EQ(0U, map2[idx % kSize2]);
+ map2[idx % kSize2] = val;
+ }
+
+ u8 operator[] (uptr idx) const {
+ CHECK_LT(idx, kSize1 * kSize2);
+ u8 *map2 = Get(idx / kSize2);
+ if (!map2) return 0;
+ auto value_ptr = AddressSpaceView::Load(&map2[idx % kSize2]);
+ return *value_ptr;
+ }
+
+ private:
+ u8 *Get(uptr idx) const {
+ CHECK_LT(idx, kSize1);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&map1_[idx], memory_order_acquire));
+ }
+
+ u8 *GetOrCreate(uptr idx) {
+ u8 *res = Get(idx);
+ if (!res) {
+ SpinMutexLock l(&mu_);
+ if (!(res = Get(idx))) {
+ res = (u8*)MmapOrDie(kSize2, "TwoLevelByteMap");
+ MapUnmapCallback().OnMap(reinterpret_cast<uptr>(res), kSize2);
+ atomic_store(&map1_[idx], reinterpret_cast<uptr>(res),
+ memory_order_release);
+ }
+ }
+ return res;
+ }
+
+ atomic_uintptr_t map1_[kSize1];
+ StaticSpinMutex mu_;
+};
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_bytemap.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_checks.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_checks.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_checks.cc (revision 351984)
@@ -0,0 +1,22 @@
+//===-- sanitizer_allocator_checks.cc ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
+// allocators.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_errno.h"
+
+namespace __sanitizer {
+
+void SetErrnoToENOMEM() {
+ errno = errno_ENOMEM;
+}
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_checks.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_checks.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_checks.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_checks.h (revision 351984)
@@ -0,0 +1,76 @@
+//===-- sanitizer_allocator_checks.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Various checks shared between ThreadSanitizer, MemorySanitizer, etc. memory
+// allocators.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_CHECKS_H
+#define SANITIZER_ALLOCATOR_CHECKS_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+
+// The following is defined in a separate compilation unit to avoid pulling in
+// sanitizer_errno.h in this header, which leads to conflicts when other system
+// headers include errno.h. This is usually the result of an unlikely event,
+// and as such we do not care as much about having it inlined.
+void SetErrnoToENOMEM();
+
+// A common errno setting logic shared by almost all sanitizer allocator APIs.
+INLINE void *SetErrnoOnNull(void *ptr) {
+ if (UNLIKELY(!ptr))
+ SetErrnoToENOMEM();
+ return ptr;
+}
+
+// In case of the check failure, the caller of the following Check... functions
+// should "return POLICY::OnBadRequest();" where POLICY is the current allocator
+// failure handling policy.
+
+// Checks aligned_alloc() parameters, verifies that the alignment is a power of
+// two and that the size is a multiple of alignment for POSIX implementation,
+// and a bit relaxed requirement for non-POSIX ones, that the size is a multiple
+// of alignment.
+INLINE bool CheckAlignedAllocAlignmentAndSize(uptr alignment, uptr size) {
+#if SANITIZER_POSIX
+ return alignment != 0 && IsPowerOfTwo(alignment) &&
+ (size & (alignment - 1)) == 0;
+#else
+ return alignment != 0 && size % alignment == 0;
+#endif
+}
+
+// Checks posix_memalign() parameters, verifies that alignment is a power of two
+// and a multiple of sizeof(void *).
+INLINE bool CheckPosixMemalignAlignment(uptr alignment) {
+ return alignment != 0 && IsPowerOfTwo(alignment) &&
+ (alignment % sizeof(void *)) == 0; // NOLINT
+}
+
+// Returns true if calloc(size, n) call overflows on size*n calculation.
+INLINE bool CheckForCallocOverflow(uptr size, uptr n) {
+ if (!size)
+ return false;
+ uptr max = (uptr)-1L;
+ return (max / size) < n;
+}
+
+// Returns true if the size passed to pvalloc overflows when rounded to the next
+// multiple of page_size.
+INLINE bool CheckForPvallocOverflow(uptr size, uptr page_size) {
+ return RoundUpTo(size, page_size) < size;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_CHECKS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_checks.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_combined.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_combined.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_combined.h (revision 351984)
@@ -0,0 +1,201 @@
+//===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// This class implements a complete memory allocator by using two
+// internal allocators:
+// PrimaryAllocator is efficient, but may not allocate some sizes (alignments).
+// When allocating 2^x bytes it should return 2^x aligned chunk.
+// PrimaryAllocator is used via a local AllocatorCache.
+// SecondaryAllocator can allocate anything, but is not efficient.
+template <class PrimaryAllocator,
+ class LargeMmapAllocatorPtrArray = DefaultLargeMmapAllocatorPtrArray>
+class CombinedAllocator {
+ public:
+ using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
+ using SecondaryAllocator =
+ LargeMmapAllocator<typename PrimaryAllocator::MapUnmapCallback,
+ LargeMmapAllocatorPtrArray,
+ typename PrimaryAllocator::AddressSpaceView>;
+
+ void InitLinkerInitialized(s32 release_to_os_interval_ms) {
+ stats_.InitLinkerInitialized();
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.InitLinkerInitialized();
+ }
+
+ void Init(s32 release_to_os_interval_ms) {
+ stats_.Init();
+ primary_.Init(release_to_os_interval_ms);
+ secondary_.Init();
+ }
+
+ void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) {
+ // Returning 0 on malloc(0) may break a lot of code.
+ if (size == 0)
+ size = 1;
+ if (size + alignment < size) {
+ Report("WARNING: %s: CombinedAllocator allocation overflow: "
+ "0x%zx bytes with 0x%zx alignment requested\n",
+ SanitizerToolName, size, alignment);
+ return nullptr;
+ }
+ uptr original_size = size;
+ // If alignment requirements are to be fulfilled by the frontend allocator
+ // rather than by the primary or secondary, passing an alignment lower than
+ // or equal to 8 will prevent any further rounding up, as well as the later
+ // alignment check.
+ if (alignment > 8)
+ size = RoundUpTo(size, alignment);
+ // The primary allocator should return a 2^x aligned allocation when
+ // requested 2^x bytes, hence using the rounded up 'size' when being
+ // serviced by the primary (this is no longer true when the primary is
+ // using a non-fixed base address). The secondary takes care of the
+ // alignment without such requirement, and allocating 'size' would use
+ // extraneous memory, so we employ 'original_size'.
+ void *res;
+ if (primary_.CanAllocate(size, alignment))
+ res = cache->Allocate(&primary_, primary_.ClassID(size));
+ else
+ res = secondary_.Allocate(&stats_, original_size, alignment);
+ if (alignment > 8)
+ CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
+ return res;
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return primary_.ReleaseToOSIntervalMs();
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ }
+
+ void ForceReleaseToOS() {
+ primary_.ForceReleaseToOS();
+ }
+
+ void Deallocate(AllocatorCache *cache, void *p) {
+ if (!p) return;
+ if (primary_.PointerIsMine(p))
+ cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
+ else
+ secondary_.Deallocate(&stats_, p);
+ }
+
+ void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
+ uptr alignment) {
+ if (!p)
+ return Allocate(cache, new_size, alignment);
+ if (!new_size) {
+ Deallocate(cache, p);
+ return nullptr;
+ }
+ CHECK(PointerIsMine(p));
+ uptr old_size = GetActuallyAllocatedSize(p);
+ uptr memcpy_size = Min(new_size, old_size);
+ void *new_p = Allocate(cache, new_size, alignment);
+ if (new_p)
+ internal_memcpy(new_p, p, memcpy_size);
+ Deallocate(cache, p);
+ return new_p;
+ }
+
+ bool PointerIsMine(void *p) {
+ if (primary_.PointerIsMine(p))
+ return true;
+ return secondary_.PointerIsMine(p);
+ }
+
+ bool FromPrimary(void *p) {
+ return primary_.PointerIsMine(p);
+ }
+
+ void *GetMetaData(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetMetaData(p);
+ return secondary_.GetMetaData(p);
+ }
+
+ void *GetBlockBegin(const void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBegin(p);
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBeginFastLocked(p);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetActuallyAllocatedSize(p);
+ return secondary_.GetActuallyAllocatedSize(p);
+ }
+
+ uptr TotalMemoryUsed() {
+ return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed();
+ }
+
+ void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
+
+ void InitCache(AllocatorCache *cache) {
+ cache->Init(&stats_);
+ }
+
+ void DestroyCache(AllocatorCache *cache) {
+ cache->Destroy(&primary_, &stats_);
+ }
+
+ void SwallowCache(AllocatorCache *cache) {
+ cache->Drain(&primary_);
+ }
+
+ void GetStats(AllocatorStatCounters s) const {
+ stats_.Get(s);
+ }
+
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ primary_.ForceLock();
+ secondary_.ForceLock();
+ }
+
+ void ForceUnlock() {
+ secondary_.ForceUnlock();
+ primary_.ForceUnlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ primary_.ForEachChunk(callback, arg);
+ secondary_.ForEachChunk(callback, arg);
+ }
+
+ private:
+ PrimaryAllocator primary_;
+ SecondaryAllocator secondary_;
+ AllocatorGlobalStats stats_;
+};
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_combined.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_interface.h (revision 351984)
@@ -0,0 +1,47 @@
+//===-- sanitizer_allocator_interface.h ------------------------- C++ -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Re-declaration of functions from public sanitizer allocator interface.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include "sanitizer_internal_defs.h"
+
+using __sanitizer::uptr;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_get_ownership(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr
+__sanitizer_get_allocated_size(const void *p);
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_current_allocated_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_heap_size();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_free_bytes();
+SANITIZER_INTERFACE_ATTRIBUTE uptr __sanitizer_get_unmapped_bytes();
+
+SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_install_malloc_and_free_hooks(
+ void (*malloc_hook)(const void *, uptr),
+ void (*free_hook)(const void *));
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_malloc_hook(void *ptr, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_free_hook(void *ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_purge_allocator();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+__sanitizer_print_memory_profile(uptr top_percent, uptr max_number_of_contexts);
+} // extern "C"
+
+#endif // SANITIZER_ALLOCATOR_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_internal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_internal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_internal.h (revision 351984)
@@ -0,0 +1,55 @@
+//===-- sanitizer_allocator_internal.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This allocator is used inside run-times.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_INTERNAL_H
+#define SANITIZER_ALLOCATOR_INTERNAL_H
+
+#include "sanitizer_allocator.h"
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// FIXME: Check if we may use even more compact size class map for internal
+// purposes.
+typedef CompactSizeClassMap InternalSizeClassMap;
+
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 0;
+ typedef InternalSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryInternalAllocator;
+
+typedef CombinedAllocator<PrimaryInternalAllocator,
+ LargeMmapAllocatorPtrArrayStatic>
+ InternalAllocator;
+typedef InternalAllocator::AllocatorCache InternalAllocatorCache;
+
+void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
+ uptr alignment = 0);
+void *InternalRealloc(void *p, uptr size,
+ InternalAllocatorCache *cache = nullptr);
+void *InternalReallocArray(void *p, uptr count, uptr size,
+ InternalAllocatorCache *cache = nullptr);
+void *InternalCalloc(uptr count, uptr size,
+ InternalAllocatorCache *cache = nullptr);
+void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
+InternalAllocator *internal_allocator();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_INTERNAL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_internal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_local_cache.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_local_cache.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_local_cache.h (revision 351984)
@@ -0,0 +1,264 @@
+//===-- sanitizer_allocator_local_cache.h -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Cache used by SizeClassAllocator64.
+template <class SizeClassAllocator>
+struct SizeClassAllocator64LocalCache {
+ typedef SizeClassAllocator Allocator;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0)) {
+ if (UNLIKELY(!Refill(c, allocator, class_id)))
+ return nullptr;
+ DCHECK_GT(c->count, 0);
+ }
+ CompactPtrT chunk = c->chunks[--c->count];
+ stats_.Add(AllocatorStatAllocated, c->class_size);
+ return reinterpret_cast<void *>(allocator->CompactPtrToPointer(
+ allocator->GetRegionBeginBySizeClass(class_id), chunk));
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ PerClass *c = &per_class_[class_id];
+ InitCache(c);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(c, allocator, class_id, c->max_count / 2);
+ CompactPtrT chunk = allocator->PointerToCompactPtr(
+ allocator->GetRegionBeginBySizeClass(class_id),
+ reinterpret_cast<uptr>(p));
+ c->chunks[c->count++] = chunk;
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr i = 1; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ while (c->count > 0)
+ Drain(c, allocator, i, c->count);
+ }
+ }
+
+ private:
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ typedef typename Allocator::CompactPtrT CompactPtrT;
+
+ struct PerClass {
+ u32 count;
+ u32 max_count;
+ uptr class_size;
+ CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache(PerClass *c) {
+ if (LIKELY(c->max_count))
+ return;
+ for (uptr i = 1; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ const uptr size = Allocator::ClassIdToSize(i);
+ c->max_count = 2 * SizeClassMap::MaxCachedHint(size);
+ c->class_size = size;
+ }
+ DCHECK_NE(c->max_count, 0UL);
+ }
+
+ NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ InitCache(c);
+ const uptr num_requested_chunks = c->max_count / 2;
+ if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks,
+ num_requested_chunks)))
+ return false;
+ c->count = num_requested_chunks;
+ return true;
+ }
+
+ NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id,
+ uptr count) {
+ CHECK_GE(c->count, count);
+ const uptr first_idx_to_drain = c->count - count;
+ c->count -= count;
+ allocator->ReturnToAllocator(&stats_, class_id,
+ &c->chunks[first_idx_to_drain], count);
+ }
+};
+
+// Cache used by SizeClassAllocator32.
+template <class SizeClassAllocator>
+struct SizeClassAllocator32LocalCache {
+ typedef SizeClassAllocator Allocator;
+ typedef typename Allocator::TransferBatch TransferBatch;
+
+ void Init(AllocatorGlobalStats *s) {
+ stats_.Init();
+ if (s)
+ s->Register(&stats_);
+ }
+
+ // Returns a TransferBatch suitable for class_id.
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = per_class_[class_id].batch_class_id)
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
+ return b;
+ }
+
+ // Destroys TransferBatch b.
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = per_class_[class_id].batch_class_id)
+ Deallocate(allocator, batch_class_id, b);
+ }
+
+ void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+ Drain(allocator);
+ if (s)
+ s->Unregister(&stats_);
+ }
+
+ void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ PerClass *c = &per_class_[class_id];
+ if (UNLIKELY(c->count == 0)) {
+ if (UNLIKELY(!Refill(c, allocator, class_id)))
+ return nullptr;
+ DCHECK_GT(c->count, 0);
+ }
+ void *res = c->batch[--c->count];
+ PREFETCH(c->batch[c->count - 1]);
+ stats_.Add(AllocatorStatAllocated, c->class_size);
+ return res;
+ }
+
+ void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+ CHECK_NE(class_id, 0UL);
+ CHECK_LT(class_id, kNumClasses);
+ // If the first allocator call on a new thread is a deallocation, then
+ // max_count will be zero, leading to check failure.
+ PerClass *c = &per_class_[class_id];
+ InitCache(c);
+ if (UNLIKELY(c->count == c->max_count))
+ Drain(c, allocator, class_id);
+ c->batch[c->count++] = p;
+ stats_.Sub(AllocatorStatAllocated, c->class_size);
+ }
+
+ void Drain(SizeClassAllocator *allocator) {
+ for (uptr i = 1; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ while (c->count > 0)
+ Drain(c, allocator, i);
+ }
+ }
+
+ private:
+ typedef typename Allocator::SizeClassMapT SizeClassMap;
+ static const uptr kBatchClassID = SizeClassMap::kBatchClassID;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ // If kUseSeparateSizeClassForBatch is true, all TransferBatch objects are
+ // allocated from kBatchClassID size class (except for those that are needed
+ // for kBatchClassID itself). The goal is to have TransferBatches in a totally
+ // different region of RAM to improve security.
+ static const bool kUseSeparateSizeClassForBatch =
+ Allocator::kUseSeparateSizeClassForBatch;
+
+ struct PerClass {
+ uptr count;
+ uptr max_count;
+ uptr class_size;
+ uptr batch_class_id;
+ void *batch[2 * TransferBatch::kMaxNumCached];
+ };
+ PerClass per_class_[kNumClasses];
+ AllocatorStats stats_;
+
+ void InitCache(PerClass *c) {
+ if (LIKELY(c->max_count))
+ return;
+ const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch));
+ for (uptr i = 1; i < kNumClasses; i++) {
+ PerClass *c = &per_class_[i];
+ const uptr size = Allocator::ClassIdToSize(i);
+ const uptr max_cached = TransferBatch::MaxCached(size);
+ c->max_count = 2 * max_cached;
+ c->class_size = size;
+ // Precompute the class id to use to store batches for the current class
+ // id. 0 means the class size is large enough to store a batch within one
+ // of the chunks. If using a separate size class, it will always be
+ // kBatchClassID, except for kBatchClassID itself.
+ if (kUseSeparateSizeClassForBatch) {
+ c->batch_class_id = (i == kBatchClassID) ? 0 : kBatchClassID;
+ } else {
+ c->batch_class_id = (size <
+ TransferBatch::AllocationSizeRequiredForNElements(max_cached)) ?
+ batch_class_id : 0;
+ }
+ }
+ DCHECK_NE(c->max_count, 0UL);
+ }
+
+ NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ InitCache(c);
+ TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ if (UNLIKELY(!b))
+ return false;
+ CHECK_GT(b->Count(), 0);
+ b->CopyToArray(c->batch);
+ c->count = b->Count();
+ DestroyBatch(class_id, allocator, b);
+ return true;
+ }
+
+ NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,
+ uptr class_id) {
+ const uptr count = Min(c->max_count / 2, c->count);
+ const uptr first_idx_to_drain = c->count - count;
+ TransferBatch *b = CreateBatch(
+ class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
+ // Failure to allocate a batch while releasing memory is non recoverable.
+ // TODO(alekseys): Figure out how to do it without allocating a new batch.
+ if (UNLIKELY(!b)) {
+ Report("FATAL: Internal error: %s's allocator failed to allocate a "
+ "transfer batch.\n", SanitizerToolName);
+ Die();
+ }
+ b->SetFromArray(&c->batch[first_idx_to_drain], count);
+ c->count -= count;
+ allocator->DeallocateBatch(&stats_, class_id, b);
+ }
+};
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_local_cache.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_primary32.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_primary32.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_primary32.h (revision 351984)
@@ -0,0 +1,380 @@
+//===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
+
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
+// kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// a ByteMap possible_regions to store the size classes of each Region.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+
+struct SizeClassAllocator32FlagMasks { // Bit masks.
+ enum {
+ kRandomShuffleChunks = 1,
+ kUseSeparateSizeClassForBatch = 2,
+ };
+};
+
+template <class Params>
+class SizeClassAllocator32 {
+ private:
+ static const u64 kTwoLevelByteMapSize1 =
+ (Params::kSpaceSize >> Params::kRegionSizeLog) >> 12;
+ static const u64 kMinFirstMapSizeTwoLevelByteMap = 4;
+
+ public:
+ using AddressSpaceView = typename Params::AddressSpaceView;
+ static const uptr kSpaceBeg = Params::kSpaceBeg;
+ static const u64 kSpaceSize = Params::kSpaceSize;
+ static const uptr kMetadataSize = Params::kMetadataSize;
+ typedef typename Params::SizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = Params::kRegionSizeLog;
+ typedef typename Params::MapUnmapCallback MapUnmapCallback;
+ using ByteMap = typename conditional<
+ (kTwoLevelByteMapSize1 < kMinFirstMapSizeTwoLevelByteMap),
+ FlatByteMap<(Params::kSpaceSize >> Params::kRegionSizeLog),
+ AddressSpaceView>,
+ TwoLevelByteMap<kTwoLevelByteMapSize1, 1 << 12, AddressSpaceView>>::type;
+
+ COMPILER_CHECK(!SANITIZER_SIGN_EXTENDED_ADDRESSES ||
+ (kSpaceSize & (kSpaceSize - 1)) == 0);
+
+ static const bool kRandomShuffleChunks = Params::kFlags &
+ SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
+ static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
+ SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
+
+ struct TransferBatch {
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
+ void SetFromArray(void *batch[], uptr count) {
+ DCHECK_LE(count, kMaxNumCached);
+ count_ = count;
+ for (uptr i = 0; i < count; i++)
+ batch_[i] = batch[i];
+ }
+ uptr Count() const { return count_; }
+ void Clear() { count_ = 0; }
+ void Add(void *ptr) {
+ batch_[count_++] = ptr;
+ DCHECK_LE(count_, kMaxNumCached);
+ }
+ void CopyToArray(void *to_batch[]) const {
+ for (uptr i = 0, n = Count(); i < n; i++)
+ to_batch[i] = batch_[i];
+ }
+
+ // How much memory do we need for a batch containing n elements.
+ static uptr AllocationSizeRequiredForNElements(uptr n) {
+ return sizeof(uptr) * 2 + sizeof(void *) * n;
+ }
+ static uptr MaxCached(uptr size) {
+ return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
+ }
+
+ TransferBatch *next;
+
+ private:
+ uptr count_;
+ void *batch_[kMaxNumCached];
+ };
+
+ static const uptr kBatchSize = sizeof(TransferBatch);
+ COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+ COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return (class_id == SizeClassMap::kBatchClassID) ?
+ kBatchSize : SizeClassMap::Size(class_id);
+ }
+
+ typedef SizeClassAllocator32<Params> ThisT;
+ typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
+
+ void Init(s32 release_to_os_interval_ms) {
+ possible_regions.Init();
+ internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return kReleaseToOSIntervalNever;
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ // This is empty here. Currently only implemented in 64-bit allocator.
+ }
+
+ void ForceReleaseToOS() {
+ // Currently implemented in 64-bit allocator only.
+ }
+
+ void *MapWithCallback(uptr size) {
+ void *res = MmapOrDie(size, PrimaryAllocatorName);
+ MapUnmapCallback().OnMap((uptr)res, size);
+ return res;
+ }
+
+ void UnmapWithCallback(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ UnmapOrDie(reinterpret_cast<void *>(beg), size);
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *GetMetaData(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return reinterpret_cast<void*>(meta);
+ }
+
+ NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
+ DCHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ if (sci->free_list.empty()) {
+ if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
+ return nullptr;
+ DCHECK(!sci->free_list.empty());
+ }
+ TransferBatch *b = sci->free_list.front();
+ sci->free_list.pop_front();
+ return b;
+ }
+
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
+ TransferBatch *b) {
+ DCHECK_LT(class_id, kNumClasses);
+ CHECK_GT(b->Count(), 0);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ sci->free_list.push_front(b);
+ }
+
+ bool PointerIsMine(const void *p) {
+ uptr mem = reinterpret_cast<uptr>(p);
+ if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
+ mem &= (kSpaceSize - 1);
+ if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
+ return false;
+ return GetSizeClass(p) != 0;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
+ }
+
+ void *GetBlockBegin(const void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = ClassIdToSize(GetSizeClass(p));
+ u32 offset = mem - beg;
+ u32 n = offset / (u32)size; // 32-bit division
+ uptr res = beg + (n * (u32)size);
+ return reinterpret_cast<void*>(res);
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions[i])
+ UnmapWithCallback((i * kRegionSize), kRegionSize);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetSizeClassInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = kNumClasses - 1; i >= 0; i--) {
+ GetSizeClassInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr region = 0; region < kNumPossibleRegions; region++)
+ if (possible_regions[region]) {
+ uptr chunk_size = ClassIdToSize(possible_regions[region]);
+ uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
+ uptr region_beg = region * kRegionSize;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + max_chunks_in_region * chunk_size;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ void PrintStats() {}
+
+ static uptr AdditionalSize() { return 0; }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+
+ private:
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+
+ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
+ StaticSpinMutex mutex;
+ IntrusiveList<TransferBatch> free_list;
+ u32 rand_state;
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
+
+ uptr ComputeRegionId(uptr mem) const {
+ if (SANITIZER_SIGN_EXTENDED_ADDRESSES)
+ mem &= (kSpaceSize - 1);
+ const uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
+ DCHECK_LT(class_id, kNumClasses);
+ const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
+ kRegionSize, kRegionSize, PrimaryAllocatorName));
+ if (UNLIKELY(!res))
+ return 0;
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
+ CHECK(IsAligned(res, kRegionSize));
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ DCHECK_LT(class_id, kNumClasses);
+ return &size_class_info_array[class_id];
+ }
+
+ bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
+ TransferBatch **current_batch, uptr max_count,
+ uptr *pointers_array, uptr count) {
+ // If using a separate class for batches, we do not need to shuffle it.
+ if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
+ class_id != SizeClassMap::kBatchClassID))
+ RandomShuffle(pointers_array, count, &sci->rand_state);
+ TransferBatch *b = *current_batch;
+ for (uptr i = 0; i < count; i++) {
+ if (!b) {
+ b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
+ if (UNLIKELY(!b))
+ return false;
+ b->Clear();
+ }
+ b->Add((void*)pointers_array[i]);
+ if (b->Count() == max_count) {
+ sci->free_list.push_back(b);
+ b = nullptr;
+ }
+ }
+ *current_batch = b;
+ return true;
+ }
+
+ bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+ SizeClassInfo *sci, uptr class_id) {
+ const uptr region = AllocateRegion(stat, class_id);
+ if (UNLIKELY(!region))
+ return false;
+ if (kRandomShuffleChunks)
+ if (UNLIKELY(sci->rand_state == 0))
+ // The random state is initialized from ASLR (PIE) and time.
+ sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
+ const uptr size = ClassIdToSize(class_id);
+ const uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ const uptr max_count = TransferBatch::MaxCached(size);
+ DCHECK_GT(max_count, 0);
+ TransferBatch *b = nullptr;
+ constexpr uptr kShuffleArraySize = 48;
+ uptr shuffle_array[kShuffleArraySize];
+ uptr count = 0;
+ for (uptr i = region; i < region + n_chunks * size; i += size) {
+ shuffle_array[count++] = i;
+ if (count == kShuffleArraySize) {
+ if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
+ shuffle_array, count)))
+ return false;
+ count = 0;
+ }
+ }
+ if (count) {
+ if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
+ shuffle_array, count)))
+ return false;
+ }
+ if (b) {
+ CHECK_GT(b->Count(), 0);
+ sci->free_list.push_back(b);
+ }
+ return true;
+ }
+
+ ByteMap possible_regions;
+ SizeClassInfo size_class_info_array[kNumClasses];
+};
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_primary32.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_primary64.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_primary64.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_primary64.h (revision 351984)
@@ -0,0 +1,859 @@
+//===-- sanitizer_allocator_primary64.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+template<class SizeClassAllocator> struct SizeClassAllocator64LocalCache;
+
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+// The template parameter Params is a class containing the actual parameters.
+//
+// Space: a portion of address space of kSpaceSize bytes starting at SpaceBeg.
+// If kSpaceBeg is ~0 then SpaceBeg is chosen dynamically my mmap.
+// Otherwise SpaceBeg=kSpaceBeg (fixed address).
+// kSpaceSize is a power of two.
+// At the beginning the entire space is mprotect-ed, then small parts of it
+// are mapped on demand.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+
+// FreeArray is an array free-d chunks (stored as 4-byte offsets)
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray
+
+struct SizeClassAllocator64FlagMasks { // Bit masks.
+ enum {
+ kRandomShuffleChunks = 1,
+ };
+};
+
+template <class Params>
+class SizeClassAllocator64 {
+ public:
+ using AddressSpaceView = typename Params::AddressSpaceView;
+ static const uptr kSpaceBeg = Params::kSpaceBeg;
+ static const uptr kSpaceSize = Params::kSpaceSize;
+ static const uptr kMetadataSize = Params::kMetadataSize;
+ typedef typename Params::SizeClassMap SizeClassMap;
+ typedef typename Params::MapUnmapCallback MapUnmapCallback;
+
+ static const bool kRandomShuffleChunks =
+ Params::kFlags & SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+
+ typedef SizeClassAllocator64<Params> ThisT;
+ typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;
+
+ // When we know the size class (the region base) we can represent a pointer
+ // as a 4-byte integer (offset from the region start shifted right by 4).
+ typedef u32 CompactPtrT;
+ static const uptr kCompactPtrScale = 4;
+ CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) const {
+ return static_cast<CompactPtrT>((ptr - base) >> kCompactPtrScale);
+ }
+ uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) const {
+ return base + (static_cast<uptr>(ptr32) << kCompactPtrScale);
+ }
+
+ void Init(s32 release_to_os_interval_ms) {
+ uptr TotalSpaceSize = kSpaceSize + AdditionalSize();
+ if (kUsingConstantSpaceBeg) {
+ CHECK_EQ(kSpaceBeg, address_range.Init(TotalSpaceSize,
+ PrimaryAllocatorName, kSpaceBeg));
+ } else {
+ NonConstSpaceBeg = address_range.Init(TotalSpaceSize,
+ PrimaryAllocatorName);
+ CHECK_NE(NonConstSpaceBeg, ~(uptr)0);
+ }
+ SetReleaseToOSIntervalMs(release_to_os_interval_ms);
+ MapWithCallbackOrDie(SpaceEnd(), AdditionalSize(),
+ "SizeClassAllocator: region info");
+ // Check that the RegionInfo array is aligned on the CacheLine size.
+ DCHECK_EQ(SpaceEnd() % kCacheLineSize, 0);
+ }
+
+ s32 ReleaseToOSIntervalMs() const {
+ return atomic_load(&release_to_os_interval_ms_, memory_order_relaxed);
+ }
+
+ void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
+ atomic_store(&release_to_os_interval_ms_, release_to_os_interval_ms,
+ memory_order_relaxed);
+ }
+
+ void ForceReleaseToOS() {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ BlockingMutexLock l(&GetRegionInfo(class_id)->mutex);
+ MaybeReleaseToOS(class_id, true /*force*/);
+ }
+ }
+
+ static bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,
+ const CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ uptr old_num_chunks = region->num_freed_chunks;
+ uptr new_num_freed_chunks = old_num_chunks + n_chunks;
+ // Failure to allocate free array space while releasing memory is non
+ // recoverable.
+ if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg,
+ new_num_freed_chunks))) {
+ Report("FATAL: Internal error: %s's allocator exhausted the free list "
+ "space for size class %zd (%zd bytes).\n", SanitizerToolName,
+ class_id, ClassIdToSize(class_id));
+ Die();
+ }
+ for (uptr i = 0; i < n_chunks; i++)
+ free_array[old_num_chunks + i] = chunks[i];
+ region->num_freed_chunks = new_num_freed_chunks;
+ region->stats.n_freed += n_chunks;
+
+ MaybeReleaseToOS(class_id, false /*force*/);
+ }
+
+ NOINLINE bool GetFromAllocator(AllocatorStats *stat, uptr class_id,
+ CompactPtrT *chunks, uptr n_chunks) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+
+ BlockingMutexLock l(&region->mutex);
+ if (UNLIKELY(region->num_freed_chunks < n_chunks)) {
+ if (UNLIKELY(!PopulateFreeArray(stat, class_id, region,
+ n_chunks - region->num_freed_chunks)))
+ return false;
+ CHECK_GE(region->num_freed_chunks, n_chunks);
+ }
+ region->num_freed_chunks -= n_chunks;
+ uptr base_idx = region->num_freed_chunks;
+ for (uptr i = 0; i < n_chunks; i++)
+ chunks[i] = free_array[base_idx + i];
+ region->stats.n_allocated += n_chunks;
+ return true;
+ }
+
+ bool PointerIsMine(const void *p) const {
+ uptr P = reinterpret_cast<uptr>(p);
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return P / kSpaceSize == kSpaceBeg / kSpaceSize;
+ return P >= SpaceBeg() && P < SpaceEnd();
+ }
+
+ uptr GetRegionBegin(const void *p) {
+ if (kUsingConstantSpaceBeg)
+ return reinterpret_cast<uptr>(p) & ~(kRegionSize - 1);
+ uptr space_beg = SpaceBeg();
+ return ((reinterpret_cast<uptr>(p) - space_beg) & ~(kRegionSize - 1)) +
+ space_beg;
+ }
+
+ uptr GetRegionBeginBySizeClass(uptr class_id) const {
+ return SpaceBeg() + kRegionSize * class_id;
+ }
+
+ uptr GetSizeClass(const void *p) {
+ if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)
+ return ((reinterpret_cast<uptr>(p)) / kRegionSize) % kNumClassesRounded;
+ return ((reinterpret_cast<uptr>(p) - SpaceBeg()) / kRegionSize) %
+ kNumClassesRounded;
+ }
+
+ void *GetBlockBegin(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ if (!size) return nullptr;
+ uptr chunk_idx = GetChunkIdx((uptr)p, size);
+ uptr reg_beg = GetRegionBegin(p);
+ uptr beg = chunk_idx * size;
+ uptr next_beg = beg + size;
+ if (class_id >= kNumClasses) return nullptr;
+ const RegionInfo *region = AddressSpaceView::Load(GetRegionInfo(class_id));
+ if (region->mapped_user >= next_beg)
+ return reinterpret_cast<void*>(reg_beg + beg);
+ return nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return ClassIdToSize(GetSizeClass(p));
+ }
+
+ static uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(const void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = ClassIdToSize(class_id);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
+ uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapWithCallbackOrDie(SpaceBeg(), kSpaceSize + AdditionalSize());
+ }
+
+ static void FillMemoryProfile(uptr start, uptr rss, bool file, uptr *stats,
+ uptr stats_size) {
+ for (uptr class_id = 0; class_id < stats_size; class_id++)
+ if (stats[class_id] == start)
+ stats[class_id] = rss;
+ }
+
+ void PrintStats(uptr class_id, uptr rss) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) return;
+ uptr in_use = region->stats.n_allocated - region->stats.n_freed;
+ uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
+ Printf(
+ "%s %02zd (%6zd): mapped: %6zdK allocs: %7zd frees: %7zd inuse: %6zd "
+ "num_freed_chunks %7zd avail: %6zd rss: %6zdK releases: %6zd "
+ "last released: %6zdK region: 0x%zx\n",
+ region->exhausted ? "F" : " ", class_id, ClassIdToSize(class_id),
+ region->mapped_user >> 10, region->stats.n_allocated,
+ region->stats.n_freed, in_use, region->num_freed_chunks, avail_chunks,
+ rss >> 10, region->rtoi.num_releases,
+ region->rtoi.last_released_bytes >> 10,
+ SpaceBeg() + kRegionSize * class_id);
+ }
+
+ void PrintStats() {
+ uptr rss_stats[kNumClasses];
+ for (uptr class_id = 0; class_id < kNumClasses; class_id++)
+ rss_stats[class_id] = SpaceBeg() + kRegionSize * class_id;
+ GetMemoryProfile(FillMemoryProfile, rss_stats, kNumClasses);
+
+ uptr total_mapped = 0;
+ uptr total_rss = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user != 0) {
+ total_mapped += region->mapped_user;
+ total_rss += rss_stats[class_id];
+ }
+ n_allocated += region->stats.n_allocated;
+ n_freed += region->stats.n_freed;
+ }
+
+ Printf("Stats: SizeClassAllocator64: %zdM mapped (%zdM rss) in "
+ "%zd allocations; remains %zd\n", total_mapped >> 20,
+ total_rss >> 20, n_allocated, n_allocated - n_freed);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++)
+ PrintStats(class_id, rss_stats[class_id]);
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ for (uptr i = 0; i < kNumClasses; i++) {
+ GetRegionInfo(i)->mutex.Lock();
+ }
+ }
+
+ void ForceUnlock() {
+ for (int i = (int)kNumClasses - 1; i >= 0; i--) {
+ GetRegionInfo(i)->mutex.Unlock();
+ }
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ uptr chunk_size = ClassIdToSize(class_id);
+ uptr region_beg = SpaceBeg() + class_id * kRegionSize;
+ uptr region_allocated_user_size =
+ AddressSpaceView::Load(region)->allocated_user;
+ for (uptr chunk = region_beg;
+ chunk < region_beg + region_allocated_user_size;
+ chunk += chunk_size) {
+ // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+ callback(chunk, arg);
+ }
+ }
+ }
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return SizeClassMap::Size(class_id);
+ }
+
+ static uptr AdditionalSize() {
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses;
+ static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
+
+ // A packed array of counters. Each counter occupies 2^n bits, enough to store
+ // counter's max_value. Ctor will try to allocate the required buffer via
+ // mapper->MapPackedCounterArrayBuffer and the caller is expected to check
+ // whether the initialization was successful by checking IsAllocated() result.
+ // For the performance sake, none of the accessors check the validity of the
+ // arguments, it is assumed that index is always in [0, n) range and the value
+ // is not incremented past max_value.
+ template<class MemoryMapperT>
+ class PackedCounterArray {
+ public:
+ PackedCounterArray(u64 num_counters, u64 max_value, MemoryMapperT *mapper)
+ : n(num_counters), memory_mapper(mapper) {
+ CHECK_GT(num_counters, 0);
+ CHECK_GT(max_value, 0);
+ constexpr u64 kMaxCounterBits = sizeof(*buffer) * 8ULL;
+ // Rounding counter storage size up to the power of two allows for using
+ // bit shifts calculating particular counter's index and offset.
+ uptr counter_size_bits =
+ RoundUpToPowerOfTwo(MostSignificantSetBitIndex(max_value) + 1);
+ CHECK_LE(counter_size_bits, kMaxCounterBits);
+ counter_size_bits_log = Log2(counter_size_bits);
+ counter_mask = ~0ULL >> (kMaxCounterBits - counter_size_bits);
+
+ uptr packing_ratio = kMaxCounterBits >> counter_size_bits_log;
+ CHECK_GT(packing_ratio, 0);
+ packing_ratio_log = Log2(packing_ratio);
+ bit_offset_mask = packing_ratio - 1;
+
+ buffer_size =
+ (RoundUpTo(n, 1ULL << packing_ratio_log) >> packing_ratio_log) *
+ sizeof(*buffer);
+ buffer = reinterpret_cast<u64*>(
+ memory_mapper->MapPackedCounterArrayBuffer(buffer_size));
+ }
+ ~PackedCounterArray() {
+ if (buffer) {
+ memory_mapper->UnmapPackedCounterArrayBuffer(
+ reinterpret_cast<uptr>(buffer), buffer_size);
+ }
+ }
+
+ bool IsAllocated() const {
+ return !!buffer;
+ }
+
+ u64 GetCount() const {
+ return n;
+ }
+
+ uptr Get(uptr i) const {
+ DCHECK_LT(i, n);
+ uptr index = i >> packing_ratio_log;
+ uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
+ return (buffer[index] >> bit_offset) & counter_mask;
+ }
+
+ void Inc(uptr i) const {
+ DCHECK_LT(Get(i), counter_mask);
+ uptr index = i >> packing_ratio_log;
+ uptr bit_offset = (i & bit_offset_mask) << counter_size_bits_log;
+ buffer[index] += 1ULL << bit_offset;
+ }
+
+ void IncRange(uptr from, uptr to) const {
+ DCHECK_LE(from, to);
+ for (uptr i = from; i <= to; i++)
+ Inc(i);
+ }
+
+ private:
+ const u64 n;
+ u64 counter_size_bits_log;
+ u64 counter_mask;
+ u64 packing_ratio_log;
+ u64 bit_offset_mask;
+
+ MemoryMapperT* const memory_mapper;
+ u64 buffer_size;
+ u64* buffer;
+ };
+
+ template<class MemoryMapperT>
+ class FreePagesRangeTracker {
+ public:
+ explicit FreePagesRangeTracker(MemoryMapperT* mapper)
+ : memory_mapper(mapper),
+ page_size_scaled_log(Log2(GetPageSizeCached() >> kCompactPtrScale)),
+ in_the_range(false), current_page(0), current_range_start_page(0) {}
+
+ void NextPage(bool freed) {
+ if (freed) {
+ if (!in_the_range) {
+ current_range_start_page = current_page;
+ in_the_range = true;
+ }
+ } else {
+ CloseOpenedRange();
+ }
+ current_page++;
+ }
+
+ void Done() {
+ CloseOpenedRange();
+ }
+
+ private:
+ void CloseOpenedRange() {
+ if (in_the_range) {
+ memory_mapper->ReleasePageRangeToOS(
+ current_range_start_page << page_size_scaled_log,
+ current_page << page_size_scaled_log);
+ in_the_range = false;
+ }
+ }
+
+ MemoryMapperT* const memory_mapper;
+ const uptr page_size_scaled_log;
+ bool in_the_range;
+ uptr current_page;
+ uptr current_range_start_page;
+ };
+
+ // Iterates over the free_array to identify memory pages containing freed
+ // chunks only and returns these pages back to OS.
+ // allocated_pages_count is the total number of pages allocated for the
+ // current bucket.
+ template<class MemoryMapperT>
+ static void ReleaseFreeMemoryToOS(CompactPtrT *free_array,
+ uptr free_array_count, uptr chunk_size,
+ uptr allocated_pages_count,
+ MemoryMapperT *memory_mapper) {
+ const uptr page_size = GetPageSizeCached();
+
+ // Figure out the number of chunks per page and whether we can take a fast
+ // path (the number of chunks per page is the same for all pages).
+ uptr full_pages_chunk_count_max;
+ bool same_chunk_count_per_page;
+ if (chunk_size <= page_size && page_size % chunk_size == 0) {
+ // Same number of chunks per page, no cross overs.
+ full_pages_chunk_count_max = page_size / chunk_size;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size <= page_size && page_size % chunk_size != 0 &&
+ chunk_size % (page_size % chunk_size) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ full_pages_chunk_count_max = page_size / chunk_size + 1;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size <= page_size) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ full_pages_chunk_count_max = page_size / chunk_size + 2;
+ same_chunk_count_per_page = false;
+ } else if (chunk_size > page_size && chunk_size % page_size == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ full_pages_chunk_count_max = 1;
+ same_chunk_count_per_page = true;
+ } else if (chunk_size > page_size) {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ full_pages_chunk_count_max = 2;
+ same_chunk_count_per_page = false;
+ } else {
+ UNREACHABLE("All chunk_size/page_size ratios must be handled.");
+ }
+
+ PackedCounterArray<MemoryMapperT> counters(allocated_pages_count,
+ full_pages_chunk_count_max,
+ memory_mapper);
+ if (!counters.IsAllocated())
+ return;
+
+ const uptr chunk_size_scaled = chunk_size >> kCompactPtrScale;
+ const uptr page_size_scaled = page_size >> kCompactPtrScale;
+ const uptr page_size_scaled_log = Log2(page_size_scaled);
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (chunk_size <= page_size && page_size % chunk_size == 0) {
+ // Each chunk affects one page only.
+ for (uptr i = 0; i < free_array_count; i++)
+ counters.Inc(free_array[i] >> page_size_scaled_log);
+ } else {
+ // In all other cases chunks might affect more than one page.
+ for (uptr i = 0; i < free_array_count; i++) {
+ counters.IncRange(
+ free_array[i] >> page_size_scaled_log,
+ (free_array[i] + chunk_size_scaled - 1) >> page_size_scaled_log);
+ }
+ }
+
+ // Iterate over pages detecting ranges of pages with chunk counters equal
+ // to the expected number of chunks for the particular page.
+ FreePagesRangeTracker<MemoryMapperT> range_tracker(memory_mapper);
+ if (same_chunk_count_per_page) {
+ // Fast path, every page has the same number of chunks affecting it.
+ for (uptr i = 0; i < counters.GetCount(); i++)
+ range_tracker.NextPage(counters.Get(i) == full_pages_chunk_count_max);
+ } else {
+ // Show path, go through the pages keeping count how many chunks affect
+ // each page.
+ const uptr pn =
+ chunk_size < page_size ? page_size_scaled / chunk_size_scaled : 1;
+ const uptr pnc = pn * chunk_size_scaled;
+ // The idea is to increment the current page pointer by the first chunk
+ // size, middle portion size (the portion of the page covered by chunks
+ // except the first and the last one) and then the last chunk size, adding
+ // up the number of chunks on the current page and checking on every step
+ // whether the page boundary was crossed.
+ uptr prev_page_boundary = 0;
+ uptr current_boundary = 0;
+ for (uptr i = 0; i < counters.GetCount(); i++) {
+ uptr page_boundary = prev_page_boundary + page_size_scaled;
+ uptr chunks_per_page = pn;
+ if (current_boundary < page_boundary) {
+ if (current_boundary > prev_page_boundary)
+ chunks_per_page++;
+ current_boundary += pnc;
+ if (current_boundary < page_boundary) {
+ chunks_per_page++;
+ current_boundary += chunk_size_scaled;
+ }
+ }
+ prev_page_boundary = page_boundary;
+
+ range_tracker.NextPage(counters.Get(i) == chunks_per_page);
+ }
+ }
+ range_tracker.Done();
+ }
+
+ private:
+ friend class MemoryMapper;
+
+ ReservedAddressRange address_range;
+
+ static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;
+ // FreeArray is the array of free-d chunks (stored as 4-byte offsets).
+ // In the worst case it may reguire kRegionSize/SizeClassMap::kMinSize
+ // elements, but in reality this will not happen. For simplicity we
+ // dedicate 1/8 of the region's virtual space to FreeArray.
+ static const uptr kFreeArraySize = kRegionSize / 8;
+
+ static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;
+ uptr NonConstSpaceBeg;
+ uptr SpaceBeg() const {
+ return kUsingConstantSpaceBeg ? kSpaceBeg : NonConstSpaceBeg;
+ }
+ uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // kRegionSize must be <= 2^36, see CompactPtrT.
+ COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2 + 4)));
+ // Call mmap for user memory with at least this size.
+ static const uptr kUserMapSize = 1 << 16;
+ // Call mmap for metadata memory with at least this size.
+ static const uptr kMetaMapSize = 1 << 16;
+ // Call mmap for free array memory with at least this size.
+ static const uptr kFreeArrayMapSize = 1 << 16;
+
+ atomic_sint32_t release_to_os_interval_ms_;
+
+ struct Stats {
+ uptr n_allocated;
+ uptr n_freed;
+ };
+
+ struct ReleaseToOsInfo {
+ uptr n_freed_at_last_release;
+ uptr num_releases;
+ u64 last_release_at_ns;
+ u64 last_released_bytes;
+ };
+
+ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) RegionInfo {
+ BlockingMutex mutex;
+ uptr num_freed_chunks; // Number of elements in the freearray.
+ uptr mapped_free_array; // Bytes mapped for freearray.
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ uptr mapped_user; // Bytes mapped for user memory.
+ uptr mapped_meta; // Bytes mapped for metadata.
+ u32 rand_state; // Seed for random shuffle, used if kRandomShuffleChunks.
+ bool exhausted; // Whether region is out of space for new chunks.
+ Stats stats;
+ ReleaseToOsInfo rtoi;
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) % kCacheLineSize == 0);
+
+ RegionInfo *GetRegionInfo(uptr class_id) const {
+ DCHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions = reinterpret_cast<RegionInfo *>(SpaceEnd());
+ return &regions[class_id];
+ }
+
+ uptr GetMetadataEnd(uptr region_beg) const {
+ return region_beg + kRegionSize - kFreeArraySize;
+ }
+
+ uptr GetChunkIdx(uptr chunk, uptr size) const {
+ if (!kUsingConstantSpaceBeg)
+ chunk -= SpaceBeg();
+
+ uptr offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // size always fits into 32-bits. If the offset fits too, use 32-bit div.
+ if (offset >> (SANITIZER_WORDSIZE / 2))
+ return offset / size;
+ return (u32)offset / (u32)size;
+ }
+
+ CompactPtrT *GetFreeArray(uptr region_beg) const {
+ return reinterpret_cast<CompactPtrT *>(GetMetadataEnd(region_beg));
+ }
+
+ bool MapWithCallback(uptr beg, uptr size, const char *name) {
+ uptr mapped = address_range.Map(beg, size, name);
+ if (UNLIKELY(!mapped))
+ return false;
+ CHECK_EQ(beg, mapped);
+ MapUnmapCallback().OnMap(beg, size);
+ return true;
+ }
+
+ void MapWithCallbackOrDie(uptr beg, uptr size, const char *name) {
+ CHECK_EQ(beg, address_range.MapOrDie(beg, size, name));
+ MapUnmapCallback().OnMap(beg, size);
+ }
+
+ void UnmapWithCallbackOrDie(uptr beg, uptr size) {
+ MapUnmapCallback().OnUnmap(beg, size);
+ address_range.Unmap(beg, size);
+ }
+
+ bool EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,
+ uptr num_freed_chunks) {
+ uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);
+ if (region->mapped_free_array < needed_space) {
+ uptr new_mapped_free_array = RoundUpTo(needed_space, kFreeArrayMapSize);
+ CHECK_LE(new_mapped_free_array, kFreeArraySize);
+ uptr current_map_end = reinterpret_cast<uptr>(GetFreeArray(region_beg)) +
+ region->mapped_free_array;
+ uptr new_map_size = new_mapped_free_array - region->mapped_free_array;
+ if (UNLIKELY(!MapWithCallback(current_map_end, new_map_size,
+ "SizeClassAllocator: freearray")))
+ return false;
+ region->mapped_free_array = new_mapped_free_array;
+ }
+ return true;
+ }
+
+ // Check whether this size class is exhausted.
+ bool IsRegionExhausted(RegionInfo *region, uptr class_id,
+ uptr additional_map_size) {
+ if (LIKELY(region->mapped_user + region->mapped_meta +
+ additional_map_size <= kRegionSize - kFreeArraySize))
+ return false;
+ if (!region->exhausted) {
+ region->exhausted = true;
+ Printf("%s: Out of memory. ", SanitizerToolName);
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize >> 20, ClassIdToSize(class_id));
+ }
+ return true;
+ }
+
+ NOINLINE bool PopulateFreeArray(AllocatorStats *stat, uptr class_id,
+ RegionInfo *region, uptr requested_count) {
+ // region->mutex is held.
+ const uptr region_beg = GetRegionBeginBySizeClass(class_id);
+ const uptr size = ClassIdToSize(class_id);
+
+ const uptr total_user_bytes =
+ region->allocated_user + requested_count * size;
+ // Map more space for chunks, if necessary.
+ if (LIKELY(total_user_bytes > region->mapped_user)) {
+ if (UNLIKELY(region->mapped_user == 0)) {
+ if (!kUsingConstantSpaceBeg && kRandomShuffleChunks)
+ // The random state is initialized from ASLR.
+ region->rand_state = static_cast<u32>(region_beg >> 12);
+ // Postpone the first release to OS attempt for ReleaseToOSIntervalMs,
+ // preventing just allocated memory from being released sooner than
+ // necessary and also preventing extraneous ReleaseMemoryPagesToOS calls
+ // for short lived processes.
+ // Do it only when the feature is turned on, to avoid a potentially
+ // extraneous syscall.
+ if (ReleaseToOSIntervalMs() >= 0)
+ region->rtoi.last_release_at_ns = MonotonicNanoTime();
+ }
+ // Do the mmap for the user memory.
+ const uptr user_map_size =
+ RoundUpTo(total_user_bytes - region->mapped_user, kUserMapSize);
+ if (UNLIKELY(IsRegionExhausted(region, class_id, user_map_size)))
+ return false;
+ if (UNLIKELY(!MapWithCallback(region_beg + region->mapped_user,
+ user_map_size,
+ "SizeClassAllocator: region data")))
+ return false;
+ stat->Add(AllocatorStatMapped, user_map_size);
+ region->mapped_user += user_map_size;
+ }
+ const uptr new_chunks_count =
+ (region->mapped_user - region->allocated_user) / size;
+
+ if (kMetadataSize) {
+ // Calculate the required space for metadata.
+ const uptr total_meta_bytes =
+ region->allocated_meta + new_chunks_count * kMetadataSize;
+ const uptr meta_map_size = (total_meta_bytes > region->mapped_meta) ?
+ RoundUpTo(total_meta_bytes - region->mapped_meta, kMetaMapSize) : 0;
+ // Map more space for metadata, if necessary.
+ if (meta_map_size) {
+ if (UNLIKELY(IsRegionExhausted(region, class_id, meta_map_size)))
+ return false;
+ if (UNLIKELY(!MapWithCallback(
+ GetMetadataEnd(region_beg) - region->mapped_meta - meta_map_size,
+ meta_map_size, "SizeClassAllocator: region metadata")))
+ return false;
+ region->mapped_meta += meta_map_size;
+ }
+ }
+
+ // If necessary, allocate more space for the free array and populate it with
+ // newly allocated chunks.
+ const uptr total_freed_chunks = region->num_freed_chunks + new_chunks_count;
+ if (UNLIKELY(!EnsureFreeArraySpace(region, region_beg, total_freed_chunks)))
+ return false;
+ CompactPtrT *free_array = GetFreeArray(region_beg);
+ for (uptr i = 0, chunk = region->allocated_user; i < new_chunks_count;
+ i++, chunk += size)
+ free_array[total_freed_chunks - 1 - i] = PointerToCompactPtr(0, chunk);
+ if (kRandomShuffleChunks)
+ RandomShuffle(&free_array[region->num_freed_chunks], new_chunks_count,
+ &region->rand_state);
+
+ // All necessary memory is mapped and now it is safe to advance all
+ // 'allocated_*' counters.
+ region->num_freed_chunks += new_chunks_count;
+ region->allocated_user += new_chunks_count * size;
+ CHECK_LE(region->allocated_user, region->mapped_user);
+ region->allocated_meta += new_chunks_count * kMetadataSize;
+ CHECK_LE(region->allocated_meta, region->mapped_meta);
+ region->exhausted = false;
+
+ // TODO(alekseyshl): Consider bumping last_release_at_ns here to prevent
+ // MaybeReleaseToOS from releasing just allocated pages or protect these
+ // not yet used chunks some other way.
+
+ return true;
+ }
+
+ class MemoryMapper {
+ public:
+ MemoryMapper(const ThisT& base_allocator, uptr class_id)
+ : allocator(base_allocator),
+ region_base(base_allocator.GetRegionBeginBySizeClass(class_id)),
+ released_ranges_count(0),
+ released_bytes(0) {
+ }
+
+ uptr GetReleasedRangesCount() const {
+ return released_ranges_count;
+ }
+
+ uptr GetReleasedBytes() const {
+ return released_bytes;
+ }
+
+ uptr MapPackedCounterArrayBuffer(uptr buffer_size) {
+ // TODO(alekseyshl): The idea to explore is to check if we have enough
+ // space between num_freed_chunks*sizeof(CompactPtrT) and
+ // mapped_free_array to fit buffer_size bytes and use that space instead
+ // of mapping a temporary one.
+ return reinterpret_cast<uptr>(
+ MmapOrDieOnFatalError(buffer_size, "ReleaseToOSPageCounters"));
+ }
+
+ void UnmapPackedCounterArrayBuffer(uptr buffer, uptr buffer_size) {
+ UnmapOrDie(reinterpret_cast<void *>(buffer), buffer_size);
+ }
+
+ // Releases [from, to) range of pages back to OS.
+ void ReleasePageRangeToOS(CompactPtrT from, CompactPtrT to) {
+ const uptr from_page = allocator.CompactPtrToPointer(region_base, from);
+ const uptr to_page = allocator.CompactPtrToPointer(region_base, to);
+ ReleaseMemoryPagesToOS(from_page, to_page);
+ released_ranges_count++;
+ released_bytes += to_page - from_page;
+ }
+
+ private:
+ const ThisT& allocator;
+ const uptr region_base;
+ uptr released_ranges_count;
+ uptr released_bytes;
+ };
+
+ // Attempts to release RAM occupied by freed chunks back to OS. The region is
+ // expected to be locked.
+ void MaybeReleaseToOS(uptr class_id, bool force) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ const uptr chunk_size = ClassIdToSize(class_id);
+ const uptr page_size = GetPageSizeCached();
+
+ uptr n = region->num_freed_chunks;
+ if (n * chunk_size < page_size)
+ return; // No chance to release anything.
+ if ((region->stats.n_freed -
+ region->rtoi.n_freed_at_last_release) * chunk_size < page_size) {
+ return; // Nothing new to release.
+ }
+
+ if (!force) {
+ s32 interval_ms = ReleaseToOSIntervalMs();
+ if (interval_ms < 0)
+ return;
+
+ if (region->rtoi.last_release_at_ns + interval_ms * 1000000ULL >
+ MonotonicNanoTime()) {
+ return; // Memory was returned recently.
+ }
+ }
+
+ MemoryMapper memory_mapper(*this, class_id);
+
+ ReleaseFreeMemoryToOS<MemoryMapper>(
+ GetFreeArray(GetRegionBeginBySizeClass(class_id)), n, chunk_size,
+ RoundUpTo(region->allocated_user, page_size) / page_size,
+ &memory_mapper);
+
+ if (memory_mapper.GetReleasedRangesCount() > 0) {
+ region->rtoi.n_freed_at_last_release = region->stats.n_freed;
+ region->rtoi.num_releases += memory_mapper.GetReleasedRangesCount();
+ region->rtoi.last_released_bytes = memory_mapper.GetReleasedBytes();
+ }
+ region->rtoi.last_release_at_ns = MonotonicNanoTime();
+ }
+};
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_primary64.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_report.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_report.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_report.cc (revision 351984)
@@ -0,0 +1,136 @@
+//===-- sanitizer_allocator_report.cc ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator.h"
+#include "sanitizer_allocator_report.h"
+#include "sanitizer_common.h"
+#include "sanitizer_report_decorator.h"
+
+namespace __sanitizer {
+
+class ScopedAllocatorErrorReport {
+ public:
+ ScopedAllocatorErrorReport(const char *error_summary_,
+ const StackTrace *stack_)
+ : error_summary(error_summary_),
+ stack(stack_) {
+ Printf("%s", d.Error());
+ }
+ ~ScopedAllocatorErrorReport() {
+ Printf("%s", d.Default());
+ stack->Print();
+ PrintHintAllocatorCannotReturnNull();
+ ReportErrorSummary(error_summary, stack);
+ }
+
+ private:
+ ScopedErrorReportLock lock;
+ const char *error_summary;
+ const StackTrace* const stack;
+ const SanitizerCommonDecorator d;
+};
+
+void NORETURN ReportCallocOverflow(uptr count, uptr size,
+ const StackTrace *stack) {
+ {
+ ScopedAllocatorErrorReport report("calloc-overflow", stack);
+ Report("ERROR: %s: calloc parameters overflow: count * size (%zd * %zd) "
+ "cannot be represented in type size_t\n", SanitizerToolName, count,
+ size);
+ }
+ Die();
+}
+
+void NORETURN ReportReallocArrayOverflow(uptr count, uptr size,
+ const StackTrace *stack) {
+ {
+ ScopedAllocatorErrorReport report("reallocarray-overflow", stack);
+ Report(
+ "ERROR: %s: reallocarray parameters overflow: count * size (%zd * %zd) "
+ "cannot be represented in type size_t\n",
+ SanitizerToolName, count, size);
+ }
+ Die();
+}
+
+void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack) {
+ {
+ ScopedAllocatorErrorReport report("pvalloc-overflow", stack);
+ Report("ERROR: %s: pvalloc parameters overflow: size 0x%zx rounded up to "
+ "system page size 0x%zx cannot be represented in type size_t\n",
+ SanitizerToolName, size, GetPageSizeCached());
+ }
+ Die();
+}
+
+void NORETURN ReportInvalidAllocationAlignment(uptr alignment,
+ const StackTrace *stack) {
+ {
+ ScopedAllocatorErrorReport report("invalid-allocation-alignment", stack);
+ Report("ERROR: %s: invalid allocation alignment: %zd, alignment must be a "
+ "power of two\n", SanitizerToolName, alignment);
+ }
+ Die();
+}
+
+void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
+ const StackTrace *stack) {
+ {
+ ScopedAllocatorErrorReport report("invalid-aligned-alloc-alignment", stack);
+#if SANITIZER_POSIX
+ Report("ERROR: %s: invalid alignment requested in "
+ "aligned_alloc: %zd, alignment must be a power of two and the "
+ "requested size 0x%zx must be a multiple of alignment\n",
+ SanitizerToolName, alignment, size);
+#else
+ Report("ERROR: %s: invalid alignment requested in aligned_alloc: %zd, "
+ "the requested size 0x%zx must be a multiple of alignment\n",
+ SanitizerToolName, alignment, size);
+#endif
+ }
+ Die();
+}
+
+void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
+ const StackTrace *stack) {
+ {
+ ScopedAllocatorErrorReport report("invalid-posix-memalign-alignment",
+ stack);
+ Report("ERROR: %s: invalid alignment requested in "
+ "posix_memalign: %zd, alignment must be a power of two and a "
+ "multiple of sizeof(void*) == %zd\n", SanitizerToolName, alignment,
+ sizeof(void*)); // NOLINT
+ }
+ Die();
+}
+
+void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
+ const StackTrace *stack) {
+ {
+ ScopedAllocatorErrorReport report("allocation-size-too-big", stack);
+ Report("ERROR: %s: requested allocation size 0x%zx exceeds maximum "
+ "supported size of 0x%zx\n", SanitizerToolName, user_size, max_size);
+ }
+ Die();
+}
+
+void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack) {
+ {
+ ScopedAllocatorErrorReport report("out-of-memory", stack);
+ Report("ERROR: %s: allocator is out of memory trying to allocate 0x%zx "
+ "bytes\n", SanitizerToolName, requested_size);
+ }
+ Die();
+}
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_report.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_report.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_report.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_report.h (revision 351984)
@@ -0,0 +1,39 @@
+//===-- sanitizer_allocator_report.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// Shared allocator error reporting for ThreadSanitizer, MemorySanitizer, etc.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ALLOCATOR_REPORT_H
+#define SANITIZER_ALLOCATOR_REPORT_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+void NORETURN ReportCallocOverflow(uptr count, uptr size,
+ const StackTrace *stack);
+void NORETURN ReportReallocArrayOverflow(uptr count, uptr size,
+ const StackTrace *stack);
+void NORETURN ReportPvallocOverflow(uptr size, const StackTrace *stack);
+void NORETURN ReportInvalidAllocationAlignment(uptr alignment,
+ const StackTrace *stack);
+void NORETURN ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment,
+ const StackTrace *stack);
+void NORETURN ReportInvalidPosixMemalignAlignment(uptr alignment,
+ const StackTrace *stack);
+void NORETURN ReportAllocationSizeTooBig(uptr user_size, uptr max_size,
+ const StackTrace *stack);
+void NORETURN ReportOutOfMemory(uptr requested_size, const StackTrace *stack);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ALLOCATOR_REPORT_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_report.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_secondary.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_secondary.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_secondary.h (revision 351984)
@@ -0,0 +1,326 @@
+//===-- sanitizer_allocator_secondary.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
+// allocated chunks. To be used in memory constrained or not memory hungry cases
+// (currently, 32 bits and internal allocator).
+class LargeMmapAllocatorPtrArrayStatic {
+ public:
+ INLINE void *Init() { return &p_[0]; }
+ INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
+ private:
+ static const int kMaxNumChunks = 1 << 15;
+ uptr p_[kMaxNumChunks];
+};
+
+// Much less restricted LargeMmapAllocator chunks list (comparing to
+// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
+// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
+// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
+class LargeMmapAllocatorPtrArrayDynamic {
+ public:
+ INLINE void *Init() {
+ uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
+ SecondaryAllocatorName);
+ CHECK(p);
+ return reinterpret_cast<void*>(p);
+ }
+
+ INLINE void EnsureSpace(uptr n) {
+ CHECK_LT(n, kMaxNumChunks);
+ DCHECK(n <= n_reserved_);
+ if (UNLIKELY(n == n_reserved_)) {
+ address_range_.MapOrDie(
+ reinterpret_cast<uptr>(address_range_.base()) +
+ n_reserved_ * sizeof(uptr),
+ kChunksBlockCount * sizeof(uptr));
+ n_reserved_ += kChunksBlockCount;
+ }
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << 20;
+ static const int kChunksBlockCount = 1 << 14;
+ ReservedAddressRange address_range_;
+ uptr n_reserved_;
+};
+
+#if SANITIZER_WORDSIZE == 32
+typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
+#else
+typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
+#endif
+
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+template <class MapUnmapCallback = NoOpMapUnmapCallback,
+ class PtrArrayT = DefaultLargeMmapAllocatorPtrArray,
+ class AddressSpaceViewTy = LocalAddressSpaceView>
+class LargeMmapAllocator {
+ public:
+ using AddressSpaceView = AddressSpaceViewTy;
+ void InitLinkerInitialized() {
+ page_size_ = GetPageSizeCached();
+ chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
+ }
+
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
+
+ void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = RoundUpMapSize(size);
+ if (alignment > page_size_)
+ map_size += alignment;
+ // Overflow.
+ if (map_size < size) {
+ Report("WARNING: %s: LargeMmapAllocator allocation overflow: "
+ "0x%zx bytes with 0x%zx alignment requested\n",
+ SanitizerToolName, map_size, alignment);
+ return nullptr;
+ }
+ uptr map_beg = reinterpret_cast<uptr>(
+ MmapOrDieOnFatalError(map_size, SecondaryAllocatorName));
+ if (!map_beg)
+ return nullptr;
+ CHECK(IsAligned(map_beg, page_size_));
+ MapUnmapCallback().OnMap(map_beg, map_size);
+ uptr map_end = map_beg + map_size;
+ uptr res = map_beg + page_size_;
+ if (res & (alignment - 1)) // Align.
+ res += alignment - (res & (alignment - 1));
+ CHECK(IsAligned(res, alignment));
+ CHECK(IsAligned(res, page_size_));
+ CHECK_GE(res + size, map_beg);
+ CHECK_LE(res + size, map_end);
+ Header *h = GetHeader(res);
+ h->size = size;
+ h->map_beg = map_beg;
+ h->map_size = map_size;
+ uptr size_log = MostSignificantSetBitIndex(map_size);
+ CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
+ {
+ SpinMutexLock l(&mutex_);
+ ptr_array_.EnsureSpace(n_chunks_);
+ uptr idx = n_chunks_++;
+ h->chunk_idx = idx;
+ chunks_[idx] = h;
+ chunks_sorted_ = false;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
+ stats.by_size_log[size_log]++;
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ }
+ return reinterpret_cast<void*>(res);
+ }
+
+ void Deallocate(AllocatorStats *stat, void *p) {
+ Header *h = GetHeader(p);
+ {
+ SpinMutexLock l(&mutex_);
+ uptr idx = h->chunk_idx;
+ CHECK_EQ(chunks_[idx], h);
+ CHECK_LT(idx, n_chunks_);
+ chunks_[idx] = chunks_[--n_chunks_];
+ chunks_[idx]->chunk_idx = idx;
+ chunks_sorted_ = false;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
+ }
+ MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
+ UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
+ }
+
+ uptr TotalMemoryUsed() {
+ SpinMutexLock l(&mutex_);
+ uptr res = 0;
+ for (uptr i = 0; i < n_chunks_; i++) {
+ Header *h = chunks_[i];
+ CHECK_EQ(h->chunk_idx, i);
+ res += RoundUpMapSize(h->size);
+ }
+ return res;
+ }
+
+ bool PointerIsMine(const void *p) {
+ return GetBlockBegin(p) != nullptr;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ return RoundUpTo(GetHeader(p)->size, page_size_);
+ }
+
+ // At least page_size_/2 metadata bytes is available.
+ void *GetMetaData(const void *p) {
+ // Too slow: CHECK_EQ(p, GetBlockBegin(p));
+ if (!IsAligned(reinterpret_cast<uptr>(p), page_size_)) {
+ Printf("%s: bad pointer %p\n", SanitizerToolName, p);
+ CHECK(IsAligned(reinterpret_cast<uptr>(p), page_size_));
+ }
+ return GetHeader(p) + 1;
+ }
+
+ void *GetBlockBegin(const void *ptr) {
+ uptr p = reinterpret_cast<uptr>(ptr);
+ SpinMutexLock l(&mutex_);
+ uptr nearest_chunk = 0;
+ Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
+ // Cache-friendly linear search.
+ for (uptr i = 0; i < n_chunks_; i++) {
+ uptr ch = reinterpret_cast<uptr>(chunks[i]);
+ if (p < ch) continue; // p is at left to this chunk, skip it.
+ if (p - ch < p - nearest_chunk)
+ nearest_chunk = ch;
+ }
+ if (!nearest_chunk)
+ return nullptr;
+ const Header *h =
+ AddressSpaceView::Load(reinterpret_cast<Header *>(nearest_chunk));
+ Header *h_ptr = reinterpret_cast<Header *>(nearest_chunk);
+ CHECK_GE(nearest_chunk, h->map_beg);
+ CHECK_LT(nearest_chunk, h->map_beg + h->map_size);
+ CHECK_LE(nearest_chunk, p);
+ if (h->map_beg + h->map_size <= p)
+ return nullptr;
+ return GetUser(h_ptr);
+ }
+
+ void EnsureSortedChunks() {
+ if (chunks_sorted_) return;
+ Header **chunks = AddressSpaceView::LoadWritable(chunks_, n_chunks_);
+ Sort(reinterpret_cast<uptr *>(chunks), n_chunks_);
+ for (uptr i = 0; i < n_chunks_; i++)
+ AddressSpaceView::LoadWritable(chunks[i])->chunk_idx = i;
+ chunks_sorted_ = true;
+ }
+
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *ptr) {
+ mutex_.CheckLocked();
+ uptr p = reinterpret_cast<uptr>(ptr);
+ uptr n = n_chunks_;
+ if (!n) return nullptr;
+ EnsureSortedChunks();
+ Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
+ auto min_mmap_ = reinterpret_cast<uptr>(chunks[0]);
+ auto max_mmap_ = reinterpret_cast<uptr>(chunks[n - 1]) +
+ AddressSpaceView::Load(chunks[n - 1])->map_size;
+ if (p < min_mmap_ || p >= max_mmap_)
+ return nullptr;
+ uptr beg = 0, end = n - 1;
+ // This loop is a log(n) lower_bound. It does not check for the exact match
+ // to avoid expensive cache-thrashing loads.
+ while (end - beg >= 2) {
+ uptr mid = (beg + end) / 2; // Invariant: mid >= beg + 1
+ if (p < reinterpret_cast<uptr>(chunks[mid]))
+ end = mid - 1; // We are not interested in chunks[mid].
+ else
+ beg = mid; // chunks[mid] may still be what we want.
+ }
+
+ if (beg < end) {
+ CHECK_EQ(beg + 1, end);
+ // There are 2 chunks left, choose one.
+ if (p >= reinterpret_cast<uptr>(chunks[end]))
+ beg = end;
+ }
+
+ const Header *h = AddressSpaceView::Load(chunks[beg]);
+ Header *h_ptr = chunks[beg];
+ if (h->map_beg + h->map_size <= p || p < h->map_beg)
+ return nullptr;
+ return GetUser(h_ptr);
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M; by size logs: ",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ for (uptr i = 0; i < ARRAY_SIZE(stats.by_size_log); i++) {
+ uptr c = stats.by_size_log[i];
+ if (!c) continue;
+ Printf("%zd:%zd; ", i, c);
+ }
+ Printf("\n");
+ }
+
+ // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
+ // introspection API.
+ void ForceLock() {
+ mutex_.Lock();
+ }
+
+ void ForceUnlock() {
+ mutex_.Unlock();
+ }
+
+ // Iterate over all existing chunks.
+ // The allocator must be locked when calling this function.
+ void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ EnsureSortedChunks(); // Avoid doing the sort while iterating.
+ const Header *const *chunks = AddressSpaceView::Load(chunks_, n_chunks_);
+ for (uptr i = 0; i < n_chunks_; i++) {
+ const Header *t = chunks[i];
+ callback(reinterpret_cast<uptr>(GetUser(t)), arg);
+ // Consistency check: verify that the array did not change.
+ CHECK_EQ(chunks[i], t);
+ CHECK_EQ(AddressSpaceView::Load(chunks[i])->chunk_idx, i);
+ }
+ }
+
+ private:
+ struct Header {
+ uptr map_beg;
+ uptr map_size;
+ uptr size;
+ uptr chunk_idx;
+ };
+
+ Header *GetHeader(uptr p) {
+ CHECK(IsAligned(p, page_size_));
+ return reinterpret_cast<Header*>(p - page_size_);
+ }
+ Header *GetHeader(const void *p) {
+ return GetHeader(reinterpret_cast<uptr>(p));
+ }
+
+ void *GetUser(const Header *h) {
+ CHECK(IsAligned((uptr)h, page_size_));
+ return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + page_size_);
+ }
+
+ uptr RoundUpMapSize(uptr size) {
+ return RoundUpTo(size, page_size_) + page_size_;
+ }
+
+ uptr page_size_;
+ Header **chunks_;
+ PtrArrayT ptr_array_;
+ uptr n_chunks_;
+ bool chunks_sorted_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated, by_size_log[64];
+ } stats;
+ StaticSpinMutex mutex_;
+};
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_secondary.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_size_class_map.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_size_class_map.h (revision 351984)
@@ -0,0 +1,241 @@
+//===-- sanitizer_allocator_size_class_map.h --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// SizeClassMap maps allocation sizes into size classes and back.
+// Class 0 always corresponds to size 0.
+// The other sizes are controlled by the template parameters:
+// kMinSizeLog: defines the class 1 as 2^kMinSizeLog.
+// kMaxSizeLog: defines the last class as 2^kMaxSizeLog.
+// kMidSizeLog: the classes starting from 1 increase with step
+// 2^kMinSizeLog until 2^kMidSizeLog.
+// kNumBits: the number of non-zero bits in sizes after 2^kMidSizeLog.
+// E.g. with kNumBits==3 all size classes after 2^kMidSizeLog
+// look like 0b1xx0..0, where x is either 0 or 1.
+//
+// Example: kNumBits=3, kMidSizeLog=4, kMidSizeLog=8, kMaxSizeLog=17:
+//
+// Classes 1 - 16 correspond to sizes 16 to 256 (size = class_id * 16).
+// Next 4 classes: 256 + i * 64 (i = 1 to 4).
+// Next 4 classes: 512 + i * 128 (i = 1 to 4).
+// ...
+// Next 4 classes: 2^k + i * 2^(k-2) (i = 1 to 4).
+// Last class corresponds to kMaxSize = 1 << kMaxSizeLog.
+//
+// This structure of the size class map gives us:
+// - Efficient table-free class-to-size and size-to-class functions.
+// - Difference between two consequent size classes is between 14% and 25%
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that need to be cached per-thread:
+// - kMaxNumCachedHint is a hint for maximal number of chunks per size class.
+// The actual number is computed in TransferBatch.
+// - (1 << kMaxBytesCachedLog) is the maximal number of bytes per size class.
+//
+// Part of output of SizeClassMap::Print():
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 16 diff: +16 00% l 4 cached: 256 4096; id 1
+// c02 => s: 32 diff: +16 100% l 5 cached: 256 8192; id 2
+// c03 => s: 48 diff: +16 50% l 5 cached: 256 12288; id 3
+// c04 => s: 64 diff: +16 33% l 6 cached: 256 16384; id 4
+// c05 => s: 80 diff: +16 25% l 6 cached: 256 20480; id 5
+// c06 => s: 96 diff: +16 20% l 6 cached: 256 24576; id 6
+// c07 => s: 112 diff: +16 16% l 6 cached: 256 28672; id 7
+//
+// c08 => s: 128 diff: +16 14% l 7 cached: 256 32768; id 8
+// c09 => s: 144 diff: +16 12% l 7 cached: 256 36864; id 9
+// c10 => s: 160 diff: +16 11% l 7 cached: 256 40960; id 10
+// c11 => s: 176 diff: +16 10% l 7 cached: 256 45056; id 11
+// c12 => s: 192 diff: +16 09% l 7 cached: 256 49152; id 12
+// c13 => s: 208 diff: +16 08% l 7 cached: 256 53248; id 13
+// c14 => s: 224 diff: +16 07% l 7 cached: 256 57344; id 14
+// c15 => s: 240 diff: +16 07% l 7 cached: 256 61440; id 15
+//
+// c16 => s: 256 diff: +16 06% l 8 cached: 256 65536; id 16
+// c17 => s: 320 diff: +64 25% l 8 cached: 204 65280; id 17
+// c18 => s: 384 diff: +64 20% l 8 cached: 170 65280; id 18
+// c19 => s: 448 diff: +64 16% l 8 cached: 146 65408; id 19
+//
+// c20 => s: 512 diff: +64 14% l 9 cached: 128 65536; id 20
+// c21 => s: 640 diff: +128 25% l 9 cached: 102 65280; id 21
+// c22 => s: 768 diff: +128 20% l 9 cached: 85 65280; id 22
+// c23 => s: 896 diff: +128 16% l 9 cached: 73 65408; id 23
+//
+// c24 => s: 1024 diff: +128 14% l 10 cached: 64 65536; id 24
+// c25 => s: 1280 diff: +256 25% l 10 cached: 51 65280; id 25
+// c26 => s: 1536 diff: +256 20% l 10 cached: 42 64512; id 26
+// c27 => s: 1792 diff: +256 16% l 10 cached: 36 64512; id 27
+//
+// ...
+//
+// c48 => s: 65536 diff: +8192 14% l 16 cached: 1 65536; id 48
+// c49 => s: 81920 diff: +16384 25% l 16 cached: 1 81920; id 49
+// c50 => s: 98304 diff: +16384 20% l 16 cached: 1 98304; id 50
+// c51 => s: 114688 diff: +16384 16% l 16 cached: 1 114688; id 51
+//
+// c52 => s: 131072 diff: +16384 14% l 17 cached: 1 131072; id 52
+//
+//
+// Another example (kNumBits=2):
+// c00 => s: 0 diff: +0 00% l 0 cached: 0 0; id 0
+// c01 => s: 32 diff: +32 00% l 5 cached: 64 2048; id 1
+// c02 => s: 64 diff: +32 100% l 6 cached: 64 4096; id 2
+// c03 => s: 96 diff: +32 50% l 6 cached: 64 6144; id 3
+// c04 => s: 128 diff: +32 33% l 7 cached: 64 8192; id 4
+// c05 => s: 160 diff: +32 25% l 7 cached: 64 10240; id 5
+// c06 => s: 192 diff: +32 20% l 7 cached: 64 12288; id 6
+// c07 => s: 224 diff: +32 16% l 7 cached: 64 14336; id 7
+// c08 => s: 256 diff: +32 14% l 8 cached: 64 16384; id 8
+// c09 => s: 384 diff: +128 50% l 8 cached: 42 16128; id 9
+// c10 => s: 512 diff: +128 33% l 9 cached: 32 16384; id 10
+// c11 => s: 768 diff: +256 50% l 9 cached: 21 16128; id 11
+// c12 => s: 1024 diff: +256 33% l 10 cached: 16 16384; id 12
+// c13 => s: 1536 diff: +512 50% l 10 cached: 10 15360; id 13
+// c14 => s: 2048 diff: +512 33% l 11 cached: 8 16384; id 14
+// c15 => s: 3072 diff: +1024 50% l 11 cached: 5 15360; id 15
+// c16 => s: 4096 diff: +1024 33% l 12 cached: 4 16384; id 16
+// c17 => s: 6144 diff: +2048 50% l 12 cached: 2 12288; id 17
+// c18 => s: 8192 diff: +2048 33% l 13 cached: 2 16384; id 18
+// c19 => s: 12288 diff: +4096 50% l 13 cached: 1 12288; id 19
+// c20 => s: 16384 diff: +4096 33% l 14 cached: 1 16384; id 20
+// c21 => s: 24576 diff: +8192 50% l 14 cached: 1 24576; id 21
+// c22 => s: 32768 diff: +8192 33% l 15 cached: 1 32768; id 22
+// c23 => s: 49152 diff: +16384 50% l 15 cached: 1 49152; id 23
+// c24 => s: 65536 diff: +16384 33% l 16 cached: 1 65536; id 24
+// c25 => s: 98304 diff: +32768 50% l 16 cached: 1 98304; id 25
+// c26 => s: 131072 diff: +32768 33% l 17 cached: 1 131072; id 26
+
+template <uptr kNumBits, uptr kMinSizeLog, uptr kMidSizeLog, uptr kMaxSizeLog,
+ uptr kMaxNumCachedHintT, uptr kMaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr kMinSize = 1 << kMinSizeLog;
+ static const uptr kMidSize = 1 << kMidSizeLog;
+ static const uptr kMidClass = kMidSize / kMinSize;
+ static const uptr S = kNumBits - 1;
+ static const uptr M = (1 << S) - 1;
+
+ public:
+ // kMaxNumCachedHintT is a power of two. It serves as a hint
+ // for the size of TransferBatch, the actual size could be a bit smaller.
+ static const uptr kMaxNumCachedHint = kMaxNumCachedHintT;
+ COMPILER_CHECK((kMaxNumCachedHint & (kMaxNumCachedHint - 1)) == 0);
+
+ static const uptr kMaxSize = 1UL << kMaxSizeLog;
+ static const uptr kNumClasses =
+ kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
+ static const uptr kLargestClassID = kNumClasses - 2;
+ static const uptr kBatchClassID = kNumClasses - 1;
+ COMPILER_CHECK(kNumClasses >= 16 && kNumClasses <= 256);
+ static const uptr kNumClassesRounded =
+ kNumClasses <= 32 ? 32 :
+ kNumClasses <= 64 ? 64 :
+ kNumClasses <= 128 ? 128 : 256;
+
+ static uptr Size(uptr class_id) {
+ // Estimate the result for kBatchClassID because this class does not know
+ // the exact size of TransferBatch. It's OK since we are using the actual
+ // sizeof(TransferBatch) where it matters.
+ if (UNLIKELY(class_id == kBatchClassID))
+ return kMaxNumCachedHint * sizeof(uptr);
+ if (class_id <= kMidClass)
+ return kMinSize * class_id;
+ class_id -= kMidClass;
+ uptr t = kMidSize << (class_id >> S);
+ return t + (t >> S) * (class_id & M);
+ }
+
+ static uptr ClassID(uptr size) {
+ if (UNLIKELY(size > kMaxSize))
+ return 0;
+ if (size <= kMidSize)
+ return (size + kMinSize - 1) >> kMinSizeLog;
+ const uptr l = MostSignificantSetBitIndex(size);
+ const uptr hbits = (size >> (l - S)) & M;
+ const uptr lbits = size & ((1U << (l - S)) - 1);
+ const uptr l1 = l - kMidSizeLog;
+ return kMidClass + (l1 << S) + hbits + (lbits > 0);
+ }
+
+ static uptr MaxCachedHint(uptr size) {
+ DCHECK_LE(size, kMaxSize);
+ if (UNLIKELY(size == 0))
+ return 0;
+ uptr n;
+ // Force a 32-bit division if the template parameters allow for it.
+ if (kMaxBytesCachedLog > 31 || kMaxSizeLog > 31)
+ n = (1UL << kMaxBytesCachedLog) / size;
+ else
+ n = (1U << kMaxBytesCachedLog) / static_cast<u32>(size);
+ return Max<uptr>(1U, Min(kMaxNumCachedHint, n));
+ }
+
+ static void Print() {
+ uptr prev_s = 0;
+ uptr total_cached = 0;
+ for (uptr i = 0; i < kNumClasses; i++) {
+ uptr s = Size(i);
+ if (s >= kMidSize / 2 && (s & (s - 1)) == 0)
+ Printf("\n");
+ uptr d = s - prev_s;
+ uptr p = prev_s ? (d * 100 / prev_s) : 0;
+ uptr l = s ? MostSignificantSetBitIndex(s) : 0;
+ uptr cached = MaxCachedHint(s) * s;
+ if (i == kBatchClassID)
+ d = p = l = 0;
+ Printf("c%02zd => s: %zd diff: +%zd %02zd%% l %zd "
+ "cached: %zd %zd; id %zd\n",
+ i, Size(i), d, p, l, MaxCachedHint(s), cached, ClassID(s));
+ total_cached += cached;
+ prev_s = s;
+ }
+ Printf("Total cached: %zd\n", total_cached);
+ }
+
+ static void Validate() {
+ for (uptr c = 1; c < kNumClasses; c++) {
+ // Printf("Validate: c%zd\n", c);
+ uptr s = Size(c);
+ CHECK_NE(s, 0U);
+ if (c == kBatchClassID)
+ continue;
+ CHECK_EQ(ClassID(s), c);
+ if (c < kLargestClassID)
+ CHECK_EQ(ClassID(s + 1), c + 1);
+ CHECK_EQ(ClassID(s - 1), c);
+ CHECK_GT(Size(c), Size(c - 1));
+ }
+ CHECK_EQ(ClassID(kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= kMaxSize; s++) {
+ uptr c = ClassID(s);
+ // Printf("s%zd => c%zd\n", s, c);
+ CHECK_LT(c, kNumClasses);
+ CHECK_GE(Size(c), s);
+ if (c > 0)
+ CHECK_LT(Size(c - 1), s);
+ }
+ }
+};
+
+typedef SizeClassMap<3, 4, 8, 17, 128, 16> DefaultSizeClassMap;
+typedef SizeClassMap<3, 4, 8, 17, 64, 14> CompactSizeClassMap;
+typedef SizeClassMap<2, 5, 9, 16, 64, 14> VeryCompactSizeClassMap;
+
+// The following SizeClassMap only holds a way small number of cached entries,
+// allowing for denser per-class arrays, smaller memory footprint and usually
+// better performances in threaded environments.
+typedef SizeClassMap<3, 4, 8, 17, 8, 10> DenseSizeClassMap;
+// Similar to VeryCompact map above, this one has a small number of different
+// size classes, and also reduced thread-local caches.
+typedef SizeClassMap<2, 5, 9, 16, 8, 10> VeryDenseSizeClassMap;
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_stats.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_stats.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_stats.h (revision 351984)
@@ -0,0 +1,106 @@
+//===-- sanitizer_allocator_stats.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Part of the Sanitizer Allocator.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_H
+#error This file must be included inside sanitizer_allocator.h
+#endif
+
+// Memory allocator statistics
+enum AllocatorStat {
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
+ AllocatorStatCount
+};
+
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
+
+// Per-thread stats, live in per-thread cache.
+class AllocatorStats {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+ void InitLinkerInitialized() {}
+
+ void Add(AllocatorStat i, uptr v) {
+ v += atomic_load(&stats_[i], memory_order_relaxed);
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
+ return atomic_load(&stats_[i], memory_order_relaxed);
+ }
+
+ private:
+ friend class AllocatorGlobalStats;
+ AllocatorStats *next_;
+ AllocatorStats *prev_;
+ atomic_uintptr_t stats_[AllocatorStatCount];
+};
+
+// Global stats, used for aggregation and querying.
+class AllocatorGlobalStats : public AllocatorStats {
+ public:
+ void InitLinkerInitialized() {
+ next_ = this;
+ prev_ = this;
+ }
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ InitLinkerInitialized();
+ }
+
+ void Register(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->next_ = next_;
+ s->prev_ = this;
+ next_->prev_ = s;
+ next_ = s;
+ }
+
+ void Unregister(AllocatorStats *s) {
+ SpinMutexLock l(&mu_);
+ s->prev_->next_ = s->next_;
+ s->next_->prev_ = s->prev_;
+ for (int i = 0; i < AllocatorStatCount; i++)
+ Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
+ }
+
+ void Get(AllocatorStatCounters s) const {
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
+ SpinMutexLock l(&mu_);
+ const AllocatorStats *stats = this;
+ for (;;) {
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] += stats->Get(AllocatorStat(i));
+ stats = stats->next_;
+ if (stats == this)
+ break;
+ }
+ // All stats must be non-negative.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) >= 0 ? s[i] : 0;
+ }
+
+ private:
+ mutable StaticSpinMutex mu_;
+};
+
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_allocator_stats.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_asm.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_asm.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_asm.h (revision 351984)
@@ -0,0 +1,66 @@
+//===-- sanitizer_asm.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Various support for assemebler.
+//
+//===----------------------------------------------------------------------===//
+
+// Some toolchains do not support .cfi asm directives, so we have to hide
+// them inside macros.
+#if defined(__clang__) || \
+ (defined(__GNUC__) && defined(__GCC_HAVE_DWARF2_CFI_ASM))
+ // GCC defined __GCC_HAVE_DWARF2_CFI_ASM if it supports CFI.
+ // Clang seems to support CFI by default (or not?).
+ // We need two versions of macros: for inline asm and standalone asm files.
+# define CFI_INL_ADJUST_CFA_OFFSET(n) ".cfi_adjust_cfa_offset " #n ";"
+
+# define CFI_STARTPROC .cfi_startproc
+# define CFI_ENDPROC .cfi_endproc
+# define CFI_ADJUST_CFA_OFFSET(n) .cfi_adjust_cfa_offset n
+# define CFI_DEF_CFA_OFFSET(n) .cfi_def_cfa_offset n
+# define CFI_REL_OFFSET(reg, n) .cfi_rel_offset reg, n
+# define CFI_OFFSET(reg, n) .cfi_offset reg, n
+# define CFI_DEF_CFA_REGISTER(reg) .cfi_def_cfa_register reg
+# define CFI_DEF_CFA(reg, n) .cfi_def_cfa reg, n
+# define CFI_RESTORE(reg) .cfi_restore reg
+
+#else // No CFI
+# define CFI_INL_ADJUST_CFA_OFFSET(n)
+# define CFI_STARTPROC
+# define CFI_ENDPROC
+# define CFI_ADJUST_CFA_OFFSET(n)
+# define CFI_DEF_CFA_OFFSET(n)
+# define CFI_REL_OFFSET(reg, n)
+# define CFI_OFFSET(reg, n)
+# define CFI_DEF_CFA_REGISTER(reg)
+# define CFI_DEF_CFA(reg, n)
+# define CFI_RESTORE(reg)
+#endif
+
+#if !defined(__APPLE__)
+# define ASM_HIDDEN(symbol) .hidden symbol
+# define ASM_TYPE_FUNCTION(symbol) .type symbol, %function
+# define ASM_SIZE(symbol) .size symbol, .-symbol
+# define ASM_SYMBOL(symbol) symbol
+# define ASM_SYMBOL_INTERCEPTOR(symbol) symbol
+# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+#else
+# define ASM_HIDDEN(symbol)
+# define ASM_TYPE_FUNCTION(symbol)
+# define ASM_SIZE(symbol)
+# define ASM_SYMBOL(symbol) _##symbol
+# define ASM_SYMBOL_INTERCEPTOR(symbol) _wrap_##symbol
+# define ASM_WRAPPER_NAME(symbol) __interceptor_##symbol
+#endif
+
+#if defined(__ELF__) && (defined(__GNU__) || defined(__FreeBSD__) || \
+ defined(__Fuchsia__) || defined(__linux__))
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits // NOLINT
+#else
+#define NO_EXEC_STACK_DIRECTIVE
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_asm.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic.h (revision 351984)
@@ -0,0 +1,86 @@
+//===-- sanitizer_atomic.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_H
+#define SANITIZER_ATOMIC_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+enum memory_order {
+ memory_order_relaxed = 1 << 0,
+ memory_order_consume = 1 << 1,
+ memory_order_acquire = 1 << 2,
+ memory_order_release = 1 << 3,
+ memory_order_acq_rel = 1 << 4,
+ memory_order_seq_cst = 1 << 5
+};
+
+struct atomic_uint8_t {
+ typedef u8 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint16_t {
+ typedef u16 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_sint32_t {
+ typedef s32 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint32_t {
+ typedef u32 Type;
+ volatile Type val_dont_use;
+};
+
+struct atomic_uint64_t {
+ typedef u64 Type;
+ // On 32-bit platforms u64 is not necessary aligned on 8 bytes.
+ volatile ALIGNED(8) Type val_dont_use;
+};
+
+struct atomic_uintptr_t {
+ typedef uptr Type;
+ volatile Type val_dont_use;
+};
+
+} // namespace __sanitizer
+
+#if defined(__clang__) || defined(__GNUC__)
+# include "sanitizer_atomic_clang.h"
+#elif defined(_MSC_VER)
+# include "sanitizer_atomic_msvc.h"
+#else
+# error "Unsupported compiler"
+#endif
+
+namespace __sanitizer {
+
+// Clutter-reducing helpers.
+
+template<typename T>
+INLINE typename T::Type atomic_load_relaxed(const volatile T *a) {
+ return atomic_load(a, memory_order_relaxed);
+}
+
+template<typename T>
+INLINE void atomic_store_relaxed(volatile T *a, typename T::Type v) {
+ atomic_store(a, v, memory_order_relaxed);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang.h (revision 351984)
@@ -0,0 +1,105 @@
+//===-- sanitizer_atomic_clang.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_H
+#define SANITIZER_ATOMIC_CLANG_H
+
+#if defined(__i386__) || defined(__x86_64__)
+# include "sanitizer_atomic_clang_x86.h"
+#else
+# include "sanitizer_atomic_clang_other.h"
+#endif
+
+namespace __sanitizer {
+
+// We would like to just use compiler builtin atomic operations
+// for loads and stores, but they are mostly broken in clang:
+// - they lead to vastly inefficient code generation
+// (http://llvm.org/bugs/show_bug.cgi?id=17281)
+// - 64-bit atomic operations are not implemented on x86_32
+// (http://llvm.org/bugs/show_bug.cgi?id=15034)
+// - they are not implemented on ARM
+// error: undefined reference to '__atomic_load_4'
+
+// See http://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html
+// for mappings of the memory model to different processors.
+
+INLINE void atomic_signal_fence(memory_order) {
+ __asm__ __volatile__("" ::: "memory");
+}
+
+INLINE void atomic_thread_fence(memory_order) {
+ __sync_synchronize();
+}
+
+template<typename T>
+INLINE typename T::Type atomic_fetch_add(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __sync_fetch_and_add(&a->val_dont_use, v);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_fetch_sub(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return __sync_fetch_and_add(&a->val_dont_use, -v);
+}
+
+template<typename T>
+INLINE typename T::Type atomic_exchange(volatile T *a,
+ typename T::Type v, memory_order mo) {
+ DCHECK(!((uptr)a % sizeof(*a)));
+ if (mo & (memory_order_release | memory_order_acq_rel | memory_order_seq_cst))
+ __sync_synchronize();
+ v = __sync_lock_test_and_set(&a->val_dont_use, v);
+ if (mo == memory_order_seq_cst)
+ __sync_synchronize();
+ return v;
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *a, typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ typedef typename T::Type Type;
+ Type cmpv = *cmp;
+ Type prev;
+ prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
+ if (prev == cmpv) return true;
+ *cmp = prev;
+ return false;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ return atomic_compare_exchange_strong(a, cmp, xchg, mo);
+}
+
+} // namespace __sanitizer
+
+// This include provides explicit template instantiations for atomic_uint64_t
+// on MIPS32, which does not directly support 8 byte atomics. It has to
+// proceed the template definitions above.
+#if defined(_MIPS_SIM) && defined(_ABIO32)
+ #include "sanitizer_atomic_clang_mips.h"
+#endif
+
+#undef ATOMIC_ORDER
+
+#endif // SANITIZER_ATOMIC_CLANG_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_mips.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_mips.h (revision 351984)
@@ -0,0 +1,117 @@
+//===-- sanitizer_atomic_clang_mips.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_MIPS_H
+#define SANITIZER_ATOMIC_CLANG_MIPS_H
+
+namespace __sanitizer {
+
+// MIPS32 does not support atomics > 4 bytes. To address this lack of
+// functionality, the sanitizer library provides helper methods which use an
+// internal spin lock mechanism to emulate atomic oprations when the size is
+// 8 bytes.
+static void __spin_lock(volatile int *lock) {
+ while (__sync_lock_test_and_set(lock, 1))
+ while (*lock) {
+ }
+}
+
+static void __spin_unlock(volatile int *lock) { __sync_lock_release(lock); }
+
+// Make sure the lock is on its own cache line to prevent false sharing.
+// Put it inside a struct that is aligned and padded to the typical MIPS
+// cacheline which is 32 bytes.
+static struct {
+ int lock;
+ char pad[32 - sizeof(int)];
+} __attribute__((aligned(32))) lock = {0, {0}};
+
+template <>
+INLINE atomic_uint64_t::Type atomic_fetch_add(volatile atomic_uint64_t *ptr,
+ atomic_uint64_t::Type val,
+ memory_order mo) {
+ DCHECK(mo &
+ (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ DCHECK(!((uptr)ptr % sizeof(*ptr)));
+
+ atomic_uint64_t::Type ret;
+
+ __spin_lock(&lock.lock);
+ ret = *(const_cast<atomic_uint64_t::Type volatile *>(&ptr->val_dont_use));
+ ptr->val_dont_use = ret + val;
+ __spin_unlock(&lock.lock);
+
+ return ret;
+}
+
+template <>
+INLINE atomic_uint64_t::Type atomic_fetch_sub(volatile atomic_uint64_t *ptr,
+ atomic_uint64_t::Type val,
+ memory_order mo) {
+ return atomic_fetch_add(ptr, -val, mo);
+}
+
+template <>
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *ptr,
+ atomic_uint64_t::Type *cmp,
+ atomic_uint64_t::Type xchg,
+ memory_order mo) {
+ DCHECK(mo &
+ (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ DCHECK(!((uptr)ptr % sizeof(*ptr)));
+
+ typedef atomic_uint64_t::Type Type;
+ Type cmpv = *cmp;
+ Type prev;
+ bool ret = false;
+
+ __spin_lock(&lock.lock);
+ prev = *(const_cast<Type volatile *>(&ptr->val_dont_use));
+ if (prev == cmpv) {
+ ret = true;
+ ptr->val_dont_use = xchg;
+ }
+ __spin_unlock(&lock.lock);
+
+ return ret;
+}
+
+template <>
+INLINE atomic_uint64_t::Type atomic_load(const volatile atomic_uint64_t *ptr,
+ memory_order mo) {
+ DCHECK(mo &
+ (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ DCHECK(!((uptr)ptr % sizeof(*ptr)));
+
+ atomic_uint64_t::Type zero = 0;
+ volatile atomic_uint64_t *Newptr =
+ const_cast<volatile atomic_uint64_t *>(ptr);
+ return atomic_fetch_add(Newptr, zero, mo);
+}
+
+template <>
+INLINE void atomic_store(volatile atomic_uint64_t *ptr, atomic_uint64_t::Type v,
+ memory_order mo) {
+ DCHECK(mo &
+ (memory_order_relaxed | memory_order_releasae | memory_order_seq_cst));
+ DCHECK(!((uptr)ptr % sizeof(*ptr)));
+
+ __spin_lock(&lock.lock);
+ ptr->val_dont_use = v;
+ __spin_unlock(&lock.lock);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_CLANG_MIPS_H
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_mips.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_other.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_other.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_other.h (revision 351984)
@@ -0,0 +1,97 @@
+//===-- sanitizer_atomic_clang_other.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
+#define SANITIZER_ATOMIC_CLANG_OTHER_H
+
+namespace __sanitizer {
+
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __sync_synchronize();
+ } else { // seq_cst
+ // E.g. on POWER we need a hw fence even before the store.
+ __sync_synchronize();
+ v = a->val_dont_use;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ // Gross, but simple and reliable.
+ // Assume that it is not in read-only memory.
+ v = __sync_fetch_and_add(
+ const_cast<typename T::Type volatile *>(&a->val_dont_use), 0);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ __sync_synchronize();
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ // Gross, but simple and reliable.
+ typename T::Type cmp = a->val_dont_use;
+ typename T::Type cur;
+ for (;;) {
+ cur = __sync_val_compare_and_swap(&a->val_dont_use, cmp, v);
+ if (cur == cmp || cur == v)
+ break;
+ cmp = cur;
+ }
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_OTHER_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_other.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_x86.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_x86.h (revision 351984)
@@ -0,0 +1,113 @@
+//===-- sanitizer_atomic_clang_x86.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_CLANG_X86_H
+#define SANITIZER_ATOMIC_CLANG_X86_H
+
+namespace __sanitizer {
+
+INLINE void proc_yield(int cnt) {
+ __asm__ __volatile__("" ::: "memory");
+ for (int i = 0; i < cnt; i++)
+ __asm__ __volatile__("pause");
+ __asm__ __volatile__("" ::: "memory");
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else if (mo == memory_order_consume) {
+ // Assume that processor respects data dependencies
+ // (and that compiler won't break them).
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ } else if (mo == memory_order_acquire) {
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ // On x86 loads are implicitly acquire.
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 plain MOV is enough for seq_cst store.
+ __asm__ __volatile__("" ::: "memory");
+ v = a->val_dont_use;
+ __asm__ __volatile__("" ::: "memory");
+ }
+ } else {
+ // 64-bit load on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;" // (ptr could be read-only)
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (v)
+ : "m" (a->val_dont_use)
+ : // mark the mmx registers as clobbered
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+
+ if (sizeof(*a) < 8 || sizeof(void*) == 8) {
+ // Assume that aligned loads are atomic.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else if (mo == memory_order_release) {
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __asm__ __volatile__("" ::: "memory");
+ } else { // seq_cst
+ // On x86 stores are implicitly release.
+ __asm__ __volatile__("" ::: "memory");
+ a->val_dont_use = v;
+ __sync_synchronize();
+ }
+ } else {
+ // 64-bit store on 32-bit platform.
+ __asm__ __volatile__(
+ "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves
+ "movq %%mm0, %0;"
+ "emms;" // Empty mmx state/Reset FP regs
+ : "=m" (a->val_dont_use)
+ : "m" (v)
+ : // mark the mmx registers as clobbered
+#ifdef __MMX__
+ "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
+#endif // #ifdef __MMX__
+ "memory");
+ if (mo == memory_order_seq_cst)
+ __sync_synchronize();
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_ATOMIC_CLANG_X86_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_clang_x86.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_msvc.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_msvc.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_atomic_msvc.h (revision 351984)
@@ -0,0 +1,265 @@
+//===-- sanitizer_atomic_msvc.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Not intended for direct inclusion. Include sanitizer_atomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ATOMIC_MSVC_H
+#define SANITIZER_ATOMIC_MSVC_H
+
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+extern "C" void _mm_mfence();
+#pragma intrinsic(_mm_mfence)
+extern "C" void _mm_pause();
+#pragma intrinsic(_mm_pause)
+extern "C" char _InterlockedExchange8( // NOLINT
+ char volatile *Addend, char Value); // NOLINT
+#pragma intrinsic(_InterlockedExchange8)
+extern "C" short _InterlockedExchange16( // NOLINT
+ short volatile *Addend, short Value); // NOLINT
+#pragma intrinsic(_InterlockedExchange16)
+extern "C" long _InterlockedExchange( // NOLINT
+ long volatile *Addend, long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchange)
+extern "C" long _InterlockedExchangeAdd( // NOLINT
+ long volatile * Addend, long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchangeAdd)
+extern "C" char _InterlockedCompareExchange8( // NOLINT
+ char volatile *Destination, // NOLINT
+ char Exchange, char Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange8)
+extern "C" short _InterlockedCompareExchange16( // NOLINT
+ short volatile *Destination, // NOLINT
+ short Exchange, short Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange16)
+extern "C"
+long long _InterlockedCompareExchange64( // NOLINT
+ long long volatile *Destination, // NOLINT
+ long long Exchange, long long Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange64)
+extern "C" void *_InterlockedCompareExchangePointer(
+ void *volatile *Destination,
+ void *Exchange, void *Comparand);
+#pragma intrinsic(_InterlockedCompareExchangePointer)
+extern "C"
+long __cdecl _InterlockedCompareExchange( // NOLINT
+ long volatile *Destination, // NOLINT
+ long Exchange, long Comparand); // NOLINT
+#pragma intrinsic(_InterlockedCompareExchange)
+
+#ifdef _WIN64
+extern "C" long long _InterlockedExchangeAdd64( // NOLINT
+ long long volatile * Addend, long long Value); // NOLINT
+#pragma intrinsic(_InterlockedExchangeAdd64)
+#endif
+
+namespace __sanitizer {
+
+INLINE void atomic_signal_fence(memory_order) {
+ _ReadWriteBarrier();
+}
+
+INLINE void atomic_thread_fence(memory_order) {
+ _mm_mfence();
+}
+
+INLINE void proc_yield(int cnt) {
+ for (int i = 0; i < cnt; i++)
+ _mm_pause();
+}
+
+template<typename T>
+INLINE typename T::Type atomic_load(
+ const volatile T *a, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_consume
+ | memory_order_acquire | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ typename T::Type v;
+ // FIXME(dvyukov): 64-bit load is not atomic on 32-bits.
+ if (mo == memory_order_relaxed) {
+ v = a->val_dont_use;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ v = a->val_dont_use;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ return v;
+}
+
+template<typename T>
+INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
+ DCHECK(mo & (memory_order_relaxed | memory_order_release
+ | memory_order_seq_cst));
+ DCHECK(!((uptr)a % sizeof(*a)));
+ // FIXME(dvyukov): 64-bit store is not atomic on 32-bits.
+ if (mo == memory_order_relaxed) {
+ a->val_dont_use = v;
+ } else {
+ atomic_signal_fence(memory_order_seq_cst);
+ a->val_dont_use = v;
+ atomic_signal_fence(memory_order_seq_cst);
+ }
+ if (mo == memory_order_seq_cst)
+ atomic_thread_fence(memory_order_seq_cst);
+}
+
+INLINE u32 atomic_fetch_add(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, (long)v); // NOLINT
+}
+
+INLINE uptr atomic_fetch_add(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64(
+ (volatile long long*)&a->val_dont_use, (long long)v); // NOLINT
+#else
+ return (uptr)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, (long)v); // NOLINT
+#endif
+}
+
+INLINE u32 atomic_fetch_sub(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
+}
+
+INLINE uptr atomic_fetch_sub(volatile atomic_uintptr_t *a,
+ uptr v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+#ifdef _WIN64
+ return (uptr)_InterlockedExchangeAdd64(
+ (volatile long long*)&a->val_dont_use, -(long long)v); // NOLINT
+#else
+ return (uptr)_InterlockedExchangeAdd(
+ (volatile long*)&a->val_dont_use, -(long)v); // NOLINT
+#endif
+}
+
+INLINE u8 atomic_exchange(volatile atomic_uint8_t *a,
+ u8 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u8)_InterlockedExchange8((volatile char*)&a->val_dont_use, v);
+}
+
+INLINE u16 atomic_exchange(volatile atomic_uint16_t *a,
+ u16 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u16)_InterlockedExchange16((volatile short*)&a->val_dont_use, v);
+}
+
+INLINE u32 atomic_exchange(volatile atomic_uint32_t *a,
+ u32 v, memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ return (u32)_InterlockedExchange((volatile long*)&a->val_dont_use, v);
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint8_t *a,
+ u8 *cmp,
+ u8 xchgv,
+ memory_order mo) {
+ (void)mo;
+ DCHECK(!((uptr)a % sizeof(*a)));
+ u8 cmpv = *cmp;
+#ifdef _WIN64
+ u8 prev = (u8)_InterlockedCompareExchange8(
+ (volatile char*)&a->val_dont_use, (char)xchgv, (char)cmpv);
+#else
+ u8 prev;
+ __asm {
+ mov al, cmpv
+ mov ecx, a
+ mov dl, xchgv
+ lock cmpxchg [ecx], dl
+ mov prev, al
+ }
+#endif
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uintptr_t *a,
+ uptr *cmp,
+ uptr xchg,
+ memory_order mo) {
+ uptr cmpv = *cmp;
+ uptr prev = (uptr)_InterlockedCompareExchangePointer(
+ (void*volatile*)&a->val_dont_use, (void*)xchg, (void*)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint16_t *a,
+ u16 *cmp,
+ u16 xchg,
+ memory_order mo) {
+ u16 cmpv = *cmp;
+ u16 prev = (u16)_InterlockedCompareExchange16(
+ (volatile short*)&a->val_dont_use, (short)xchg, (short)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint32_t *a,
+ u32 *cmp,
+ u32 xchg,
+ memory_order mo) {
+ u32 cmpv = *cmp;
+ u32 prev = (u32)_InterlockedCompareExchange(
+ (volatile long*)&a->val_dont_use, (long)xchg, (long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+INLINE bool atomic_compare_exchange_strong(volatile atomic_uint64_t *a,
+ u64 *cmp,
+ u64 xchg,
+ memory_order mo) {
+ u64 cmpv = *cmp;
+ u64 prev = (u64)_InterlockedCompareExchange64(
+ (volatile long long*)&a->val_dont_use, (long long)xchg, (long long)cmpv);
+ if (prev == cmpv)
+ return true;
+ *cmp = prev;
+ return false;
+}
+
+template<typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *a,
+ typename T::Type *cmp,
+ typename T::Type xchg,
+ memory_order mo) {
+ return atomic_compare_exchange_strong(a, cmp, xchg, mo);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ATOMIC_CLANG_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_bitvector.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_bitvector.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_bitvector.h (revision 351984)
@@ -0,0 +1,350 @@
+//===-- sanitizer_bitvector.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Specializer BitVector implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BITVECTOR_H
+#define SANITIZER_BITVECTOR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+// Fixed size bit vector based on a single basic integer.
+template <class basic_int_t = uptr>
+class BasicBitVector {
+ public:
+ enum SizeEnum : uptr { kSize = sizeof(basic_int_t) * 8 };
+
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() { bits_ = 0; }
+ void setAll() { bits_ = ~(basic_int_t)0; }
+ bool empty() const { return bits_ == 0; }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ |= mask(idx);
+ return bits_ != old;
+ }
+
+ // Returns true if the bit has changed from 1 to 0.
+ bool clearBit(uptr idx) {
+ basic_int_t old = bits_;
+ bits_ &= ~mask(idx);
+ return bits_ != old;
+ }
+
+ bool getBit(uptr idx) const { return (bits_ & mask(idx)) != 0; }
+
+ uptr getAndClearFirstOne() {
+ CHECK(!empty());
+ uptr idx = LeastSignificantSetBitIndex(bits_);
+ clearBit(idx);
+ return idx;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ |= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= v.bits_;
+ return bits_ != old;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const BasicBitVector &v) {
+ basic_int_t old = bits_;
+ bits_ &= ~v.bits_;
+ return bits_ != old;
+ }
+
+ void copyFrom(const BasicBitVector &v) { bits_ = v.bits_; }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const BasicBitVector &v) const {
+ return (bits_ & v.bits_) != 0;
+ }
+
+ // for (BasicBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const BasicBitVector &bv) : bv_(bv) {}
+ bool hasNext() const { return !bv_.empty(); }
+ uptr next() { return bv_.getAndClearFirstOne(); }
+ void clear() { bv_.clear(); }
+ private:
+ BasicBitVector bv_;
+ };
+
+ private:
+ basic_int_t mask(uptr idx) const {
+ CHECK_LT(idx, size());
+ return (basic_int_t)1UL << idx;
+ }
+ basic_int_t bits_;
+};
+
+// Fixed size bit vector of (kLevel1Size*BV::kSize**2) bits.
+// The implementation is optimized for better performance on
+// sparse bit vectors, i.e. the those with few set bits.
+template <uptr kLevel1Size = 1, class BV = BasicBitVector<> >
+class TwoLevelBitVector {
+ // This is essentially a 2-level bit vector.
+ // Set bit in the first level BV indicates that there are set bits
+ // in the corresponding BV of the second level.
+ // This structure allows O(kLevel1Size) time for clear() and empty(),
+ // as well fast handling of sparse BVs.
+ public:
+ enum SizeEnum : uptr { kSize = BV::kSize * BV::kSize * kLevel1Size };
+ // No CTOR.
+
+ uptr size() const { return kSize; }
+
+ void clear() {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ l1_[i].clear();
+ }
+
+ void setAll() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ l1_[i0].setAll();
+ for (uptr i1 = 0; i1 < BV::kSize; i1++)
+ l2_[i0][i1].setAll();
+ }
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < kLevel1Size; i++)
+ if (!l1_[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if the bit has changed from 0 to 1.
+ bool setBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ if (!l1_[i0].getBit(i1)) {
+ l1_[i0].setBit(i1);
+ l2_[i0][i1].clear();
+ }
+ bool res = l2_[i0][i1].setBit(i2);
+ // Printf("%s: %zd => %zd %zd %zd; %d\n", __func__,
+ // idx, i0, i1, i2, res);
+ return res;
+ }
+
+ bool clearBit(uptr idx) {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ bool res = false;
+ if (l1_[i0].getBit(i1)) {
+ res = l2_[i0][i1].clearBit(i2);
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ return res;
+ }
+
+ bool getBit(uptr idx) const {
+ check(idx);
+ uptr i0 = idx0(idx);
+ uptr i1 = idx1(idx);
+ uptr i2 = idx2(idx);
+ // Printf("%s: %zd => %zd %zd %zd\n", __func__, idx, i0, i1, i2);
+ return l1_[i0].getBit(i1) && l2_[i0][i1].getBit(i2);
+ }
+
+ uptr getAndClearFirstOne() {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].empty()) continue;
+ uptr i1 = l1_[i0].getAndClearFirstOne();
+ uptr i2 = l2_[i0][i1].getAndClearFirstOne();
+ if (!l2_[i0][i1].empty())
+ l1_[i0].setBit(i1);
+ uptr res = i0 * BV::kSize * BV::kSize + i1 * BV::kSize + i2;
+ // Printf("getAndClearFirstOne: %zd %zd %zd => %zd\n", i0, i1, i2, res);
+ return res;
+ }
+ CHECK(0);
+ return 0;
+ }
+
+ // Do "this |= v" and return whether new bits have been added.
+ bool setUnion(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = v.l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l1_[i0].setBit(i1))
+ l2_[i0][i1].clear();
+ if (l2_[i0][i1].setUnion(v.l2_[i0][i1]))
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= v" and return whether any bits have been removed.
+ bool setIntersection(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ if (l1_[i0].setIntersection(v.l1_[i0]))
+ res = true;
+ if (!l1_[i0].empty()) {
+ BV t = l1_[i0];
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setIntersection(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ }
+ return res;
+ }
+
+ // Do "this &= ~v" and return whether any bits have been removed.
+ bool setDifference(const TwoLevelBitVector &v) {
+ bool res = false;
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (l2_[i0][i1].setDifference(v.l2_[i0][i1]))
+ res = true;
+ if (l2_[i0][i1].empty())
+ l1_[i0].clearBit(i1);
+ }
+ }
+ return res;
+ }
+
+ void copyFrom(const TwoLevelBitVector &v) {
+ clear();
+ setUnion(v);
+ }
+
+ // Returns true if 'this' intersects with 'v'.
+ bool intersectsWith(const TwoLevelBitVector &v) const {
+ for (uptr i0 = 0; i0 < kLevel1Size; i0++) {
+ BV t = l1_[i0];
+ t.setIntersection(v.l1_[i0]);
+ while (!t.empty()) {
+ uptr i1 = t.getAndClearFirstOne();
+ if (!v.l1_[i0].getBit(i1)) continue;
+ if (l2_[i0][i1].intersectsWith(v.l2_[i0][i1]))
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // for (TwoLevelBitVector<>::Iterator it(bv); it.hasNext();) {
+ // uptr idx = it.next();
+ // use(idx);
+ // }
+ class Iterator {
+ public:
+ Iterator() { }
+ explicit Iterator(const TwoLevelBitVector &bv) : bv_(bv), i0_(0), i1_(0) {
+ it1_.clear();
+ it2_.clear();
+ }
+
+ bool hasNext() const {
+ if (it1_.hasNext()) return true;
+ for (uptr i = i0_; i < kLevel1Size; i++)
+ if (!bv_.l1_[i].empty()) return true;
+ return false;
+ }
+
+ uptr next() {
+ // Printf("++++: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ if (!it1_.hasNext() && !it2_.hasNext()) {
+ for (; i0_ < kLevel1Size; i0_++) {
+ if (bv_.l1_[i0_].empty()) continue;
+ it1_ = typename BV::Iterator(bv_.l1_[i0_]);
+ // Printf("+i0: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ break;
+ }
+ }
+ if (!it2_.hasNext()) {
+ CHECK(it1_.hasNext());
+ i1_ = it1_.next();
+ it2_ = typename BV::Iterator(bv_.l2_[i0_][i1_]);
+ // Printf("++i1: %zd %zd; %d %d; size %zd\n", i0_, i1_, it1_.hasNext(),
+ // it2_.hasNext(), kSize);
+ }
+ CHECK(it2_.hasNext());
+ uptr i2 = it2_.next();
+ uptr res = i0_ * BV::kSize * BV::kSize + i1_ * BV::kSize + i2;
+ // Printf("+ret: %zd %zd; %d %d; size %zd; res: %zd\n", i0_, i1_,
+ // it1_.hasNext(), it2_.hasNext(), kSize, res);
+ if (!it1_.hasNext() && !it2_.hasNext())
+ i0_++;
+ return res;
+ }
+
+ private:
+ const TwoLevelBitVector &bv_;
+ uptr i0_, i1_;
+ typename BV::Iterator it1_, it2_;
+ };
+
+ private:
+ void check(uptr idx) const { CHECK_LE(idx, size()); }
+
+ uptr idx0(uptr idx) const {
+ uptr res = idx / (BV::kSize * BV::kSize);
+ CHECK_LE(res, kLevel1Size);
+ return res;
+ }
+
+ uptr idx1(uptr idx) const {
+ uptr res = (idx / BV::kSize) % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ uptr idx2(uptr idx) const {
+ uptr res = idx % BV::kSize;
+ CHECK_LE(res, BV::kSize);
+ return res;
+ }
+
+ BV l1_[kLevel1Size];
+ BV l2_[kLevel1Size][BV::kSize];
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BITVECTOR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_bitvector.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_bvgraph.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_bvgraph.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_bvgraph.h (revision 351984)
@@ -0,0 +1,164 @@
+//===-- sanitizer_bvgraph.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// BVGraph -- a directed graph.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_BVGRAPH_H
+#define SANITIZER_BVGRAPH_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_bitvector.h"
+
+namespace __sanitizer {
+
+// Directed graph of fixed size implemented as an array of bit vectors.
+// Not thread-safe, all accesses should be protected by an external lock.
+template<class BV>
+class BVGraph {
+ public:
+ enum SizeEnum : uptr { kSize = BV::kSize };
+ uptr size() const { return kSize; }
+ // No CTOR.
+ void clear() {
+ for (uptr i = 0; i < size(); i++)
+ v[i].clear();
+ }
+
+ bool empty() const {
+ for (uptr i = 0; i < size(); i++)
+ if (!v[i].empty())
+ return false;
+ return true;
+ }
+
+ // Returns true if a new edge was added.
+ bool addEdge(uptr from, uptr to) {
+ check(from, to);
+ return v[from].setBit(to);
+ }
+
+ // Returns true if at least one new edge was added.
+ uptr addEdges(const BV &from, uptr to, uptr added_edges[],
+ uptr max_added_edges) {
+ uptr res = 0;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr node = t1.getAndClearFirstOne();
+ if (v[node].setBit(to))
+ if (res < max_added_edges)
+ added_edges[res++] = node;
+ }
+ return res;
+ }
+
+ // *EXPERIMENTAL*
+ // Returns true if an edge from=>to exist.
+ // This function does not use any global state except for 'this' itself,
+ // and thus can be called from different threads w/o locking.
+ // This would be racy.
+ // FIXME: investigate how much we can prove about this race being "benign".
+ bool hasEdge(uptr from, uptr to) { return v[from].getBit(to); }
+
+ // Returns true if the edge from=>to was removed.
+ bool removeEdge(uptr from, uptr to) {
+ return v[from].clearBit(to);
+ }
+
+ // Returns true if at least one edge *=>to was removed.
+ bool removeEdgesTo(const BV &to) {
+ bool res = 0;
+ for (uptr from = 0; from < size(); from++) {
+ if (v[from].setDifference(to))
+ res = true;
+ }
+ return res;
+ }
+
+ // Returns true if at least one edge from=>* was removed.
+ bool removeEdgesFrom(const BV &from) {
+ bool res = false;
+ t1.copyFrom(from);
+ while (!t1.empty()) {
+ uptr idx = t1.getAndClearFirstOne();
+ if (!v[idx].empty()) {
+ v[idx].clear();
+ res = true;
+ }
+ }
+ return res;
+ }
+
+ void removeEdgesFrom(uptr from) {
+ return v[from].clear();
+ }
+
+ bool hasEdge(uptr from, uptr to) const {
+ check(from, to);
+ return v[from].getBit(to);
+ }
+
+ // Returns true if there is a path from the node 'from'
+ // to any of the nodes in 'targets'.
+ bool isReachable(uptr from, const BV &targets) {
+ BV &to_visit = t1,
+ &visited = t2;
+ to_visit.copyFrom(v[from]);
+ visited.clear();
+ visited.setBit(from);
+ while (!to_visit.empty()) {
+ uptr idx = to_visit.getAndClearFirstOne();
+ if (visited.setBit(idx))
+ to_visit.setUnion(v[idx]);
+ }
+ return targets.intersectsWith(visited);
+ }
+
+ // Finds a path from 'from' to one of the nodes in 'target',
+ // stores up to 'path_size' items of the path into 'path',
+ // returns the path length, or 0 if there is no path of size 'path_size'.
+ uptr findPath(uptr from, const BV &targets, uptr *path, uptr path_size) {
+ if (path_size == 0)
+ return 0;
+ path[0] = from;
+ if (targets.getBit(from))
+ return 1;
+ // The function is recursive, so we don't want to create BV on stack.
+ // Instead of a getAndClearFirstOne loop we use the slower iterator.
+ for (typename BV::Iterator it(v[from]); it.hasNext(); ) {
+ uptr idx = it.next();
+ if (uptr res = findPath(idx, targets, path + 1, path_size - 1))
+ return res + 1;
+ }
+ return 0;
+ }
+
+ // Same as findPath, but finds a shortest path.
+ uptr findShortestPath(uptr from, const BV &targets, uptr *path,
+ uptr path_size) {
+ for (uptr p = 1; p <= path_size; p++)
+ if (findPath(from, targets, path, p) == p)
+ return p;
+ return 0;
+ }
+
+ private:
+ void check(uptr idx1, uptr idx2) const {
+ CHECK_LT(idx1, size());
+ CHECK_LT(idx2, size());
+ }
+ BV v[kSize];
+ // Keep temporary vectors here since we can not create large objects on stack.
+ BV t1, t2;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_BVGRAPH_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_bvgraph.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common.cc (revision 351984)
@@ -0,0 +1,346 @@
+//===-- sanitizer_common.cc -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_allocator_interface.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+
+namespace __sanitizer {
+
+const char *SanitizerToolName = "SanitizerTool";
+
+atomic_uint32_t current_verbosity;
+uptr PageSizeCached;
+u32 NumberOfCPUsCached;
+
+// PID of the tracer task in StopTheWorld. It shares the address space with the
+// main process, but has a different PID and thus requires special handling.
+uptr stoptheworld_tracer_pid = 0;
+// Cached pid of parent process - if the parent process dies, we want to keep
+// writing to the same log file.
+uptr stoptheworld_tracer_ppid = 0;
+
+void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
+ const char *mmap_type, error_t err,
+ bool raw_report) {
+ static int recursion_count;
+ if (SANITIZER_RTEMS || raw_report || recursion_count) {
+ // If we are on RTEMS or raw report is requested or we went into recursion,
+ // just die. The Report() and CHECK calls below may call mmap recursively
+ // and fail.
+ RawWrite("ERROR: Failed to mmap\n");
+ Die();
+ }
+ recursion_count++;
+ Report("ERROR: %s failed to "
+ "%s 0x%zx (%zd) bytes of %s (error code: %d)\n",
+ SanitizerToolName, mmap_type, size, size, mem_type, err);
+#if !SANITIZER_GO
+ DumpProcessMap();
+#endif
+ UNREACHABLE("unable to mmap");
+}
+
+typedef bool UptrComparisonFunction(const uptr &a, const uptr &b);
+typedef bool U32ComparisonFunction(const u32 &a, const u32 &b);
+
+const char *StripPathPrefix(const char *filepath,
+ const char *strip_path_prefix) {
+ if (!filepath) return nullptr;
+ if (!strip_path_prefix) return filepath;
+ const char *res = filepath;
+ if (const char *pos = internal_strstr(filepath, strip_path_prefix))
+ res = pos + internal_strlen(strip_path_prefix);
+ if (res[0] == '.' && res[1] == '/')
+ res += 2;
+ return res;
+}
+
+const char *StripModuleName(const char *module) {
+ if (!module)
+ return nullptr;
+ if (SANITIZER_WINDOWS) {
+ // On Windows, both slash and backslash are possible.
+ // Pick the one that goes last.
+ if (const char *bslash_pos = internal_strrchr(module, '\\'))
+ return StripModuleName(bslash_pos + 1);
+ }
+ if (const char *slash_pos = internal_strrchr(module, '/')) {
+ return slash_pos + 1;
+ }
+ return module;
+}
+
+void ReportErrorSummary(const char *error_message, const char *alt_tool_name) {
+ if (!common_flags()->print_summary)
+ return;
+ InternalScopedString buff(kMaxSummaryLength);
+ buff.append("SUMMARY: %s: %s",
+ alt_tool_name ? alt_tool_name : SanitizerToolName, error_message);
+ __sanitizer_report_error_summary(buff.data());
+}
+
+// Removes the ANSI escape sequences from the input string (in-place).
+void RemoveANSIEscapeSequencesFromString(char *str) {
+ if (!str)
+ return;
+
+ // We are going to remove the escape sequences in place.
+ char *s = str;
+ char *z = str;
+ while (*s != '\0') {
+ CHECK_GE(s, z);
+ // Skip over ANSI escape sequences with pointer 's'.
+ if (*s == '\033' && *(s + 1) == '[') {
+ s = internal_strchrnul(s, 'm');
+ if (*s == '\0') {
+ break;
+ }
+ s++;
+ continue;
+ }
+ // 's' now points at a character we want to keep. Copy over the buffer
+ // content if the escape sequence has been perviously skipped andadvance
+ // both pointers.
+ if (s != z)
+ *z = *s;
+
+ // If we have not seen an escape sequence, just advance both pointers.
+ z++;
+ s++;
+ }
+
+ // Null terminate the string.
+ *z = '\0';
+}
+
+void LoadedModule::set(const char *module_name, uptr base_address) {
+ clear();
+ full_name_ = internal_strdup(module_name);
+ base_address_ = base_address;
+}
+
+void LoadedModule::set(const char *module_name, uptr base_address,
+ ModuleArch arch, u8 uuid[kModuleUUIDSize],
+ bool instrumented) {
+ set(module_name, base_address);
+ arch_ = arch;
+ internal_memcpy(uuid_, uuid, sizeof(uuid_));
+ instrumented_ = instrumented;
+}
+
+void LoadedModule::clear() {
+ InternalFree(full_name_);
+ base_address_ = 0;
+ max_executable_address_ = 0;
+ full_name_ = nullptr;
+ arch_ = kModuleArchUnknown;
+ internal_memset(uuid_, 0, kModuleUUIDSize);
+ instrumented_ = false;
+ while (!ranges_.empty()) {
+ AddressRange *r = ranges_.front();
+ ranges_.pop_front();
+ InternalFree(r);
+ }
+}
+
+void LoadedModule::addAddressRange(uptr beg, uptr end, bool executable,
+ bool writable, const char *name) {
+ void *mem = InternalAlloc(sizeof(AddressRange));
+ AddressRange *r =
+ new(mem) AddressRange(beg, end, executable, writable, name);
+ ranges_.push_back(r);
+ if (executable && end > max_executable_address_)
+ max_executable_address_ = end;
+}
+
+bool LoadedModule::containsAddress(uptr address) const {
+ for (const AddressRange &r : ranges()) {
+ if (r.beg <= address && address < r.end)
+ return true;
+ }
+ return false;
+}
+
+static atomic_uintptr_t g_total_mmaped;
+
+void IncreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ uptr total_mmaped =
+ atomic_fetch_add(&g_total_mmaped, size, memory_order_relaxed) + size;
+ // Since for now mmap_limit_mb is not a user-facing flag, just kill
+ // a program. Use RAW_CHECK to avoid extra mmaps in reporting.
+ RAW_CHECK((total_mmaped >> 20) < common_flags()->mmap_limit_mb);
+}
+
+void DecreaseTotalMmap(uptr size) {
+ if (!common_flags()->mmap_limit_mb) return;
+ atomic_fetch_sub(&g_total_mmaped, size, memory_order_relaxed);
+}
+
+bool TemplateMatch(const char *templ, const char *str) {
+ if ((!str) || str[0] == 0)
+ return false;
+ bool start = false;
+ if (templ && templ[0] == '^') {
+ start = true;
+ templ++;
+ }
+ bool asterisk = false;
+ while (templ && templ[0]) {
+ if (templ[0] == '*') {
+ templ++;
+ start = false;
+ asterisk = true;
+ continue;
+ }
+ if (templ[0] == '$')
+ return str[0] == 0 || asterisk;
+ if (str[0] == 0)
+ return false;
+ char *tpos = (char*)internal_strchr(templ, '*');
+ char *tpos1 = (char*)internal_strchr(templ, '$');
+ if ((!tpos) || (tpos1 && tpos1 < tpos))
+ tpos = tpos1;
+ if (tpos)
+ tpos[0] = 0;
+ const char *str0 = str;
+ const char *spos = internal_strstr(str, templ);
+ str = spos + internal_strlen(templ);
+ templ = tpos;
+ if (tpos)
+ tpos[0] = tpos == tpos1 ? '$' : '*';
+ if (!spos)
+ return false;
+ if (start && spos != str0)
+ return false;
+ start = false;
+ asterisk = false;
+ }
+ return true;
+}
+
+static char binary_name_cache_str[kMaxPathLength];
+static char process_name_cache_str[kMaxPathLength];
+
+const char *GetProcessName() {
+ return process_name_cache_str;
+}
+
+static uptr ReadProcessName(/*out*/ char *buf, uptr buf_len) {
+ ReadLongProcessName(buf, buf_len);
+ char *s = const_cast<char *>(StripModuleName(buf));
+ uptr len = internal_strlen(s);
+ if (s != buf) {
+ internal_memmove(buf, s, len);
+ buf[len] = '\0';
+ }
+ return len;
+}
+
+void UpdateProcessName() {
+ ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
+}
+
+// Call once to make sure that binary_name_cache_str is initialized
+void CacheBinaryName() {
+ if (binary_name_cache_str[0] != '\0')
+ return;
+ ReadBinaryName(binary_name_cache_str, sizeof(binary_name_cache_str));
+ ReadProcessName(process_name_cache_str, sizeof(process_name_cache_str));
+}
+
+uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len) {
+ CacheBinaryName();
+ uptr name_len = internal_strlen(binary_name_cache_str);
+ name_len = (name_len < buf_len - 1) ? name_len : buf_len - 1;
+ if (buf_len == 0)
+ return 0;
+ internal_memcpy(buf, binary_name_cache_str, name_len);
+ buf[name_len] = '\0';
+ return name_len;
+}
+
+void PrintCmdline() {
+ char **argv = GetArgv();
+ if (!argv) return;
+ Printf("\nCommand: ");
+ for (uptr i = 0; argv[i]; ++i)
+ Printf("%s ", argv[i]);
+ Printf("\n\n");
+}
+
+// Malloc hooks.
+static const int kMaxMallocFreeHooks = 5;
+struct MallocFreeHook {
+ void (*malloc_hook)(const void *, uptr);
+ void (*free_hook)(const void *);
+};
+
+static MallocFreeHook MFHooks[kMaxMallocFreeHooks];
+
+void RunMallocHooks(const void *ptr, uptr size) {
+ for (int i = 0; i < kMaxMallocFreeHooks; i++) {
+ auto hook = MFHooks[i].malloc_hook;
+ if (!hook) return;
+ hook(ptr, size);
+ }
+}
+
+void RunFreeHooks(const void *ptr) {
+ for (int i = 0; i < kMaxMallocFreeHooks; i++) {
+ auto hook = MFHooks[i].free_hook;
+ if (!hook) return;
+ hook(ptr);
+ }
+}
+
+static int InstallMallocFreeHooks(void (*malloc_hook)(const void *, uptr),
+ void (*free_hook)(const void *)) {
+ if (!malloc_hook || !free_hook) return 0;
+ for (int i = 0; i < kMaxMallocFreeHooks; i++) {
+ if (MFHooks[i].malloc_hook == nullptr) {
+ MFHooks[i].malloc_hook = malloc_hook;
+ MFHooks[i].free_hook = free_hook;
+ return i + 1;
+ }
+ }
+ return 0;
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer; // NOLINT
+
+extern "C" {
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_report_error_summary,
+ const char *error_summary) {
+ Printf("%s\n", error_summary);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_acquire_crash_state() {
+ static atomic_uint8_t in_crash_state = {};
+ return !atomic_exchange(&in_crash_state, 1, memory_order_relaxed);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_install_malloc_and_free_hooks(void (*malloc_hook)(const void *,
+ uptr),
+ void (*free_hook)(const void *)) {
+ return InstallMallocFreeHooks(malloc_hook, free_hook);
+}
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common.h (revision 351984)
@@ -0,0 +1,978 @@
+//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between run-time libraries of sanitizers.
+//
+// It declares common functions and classes that are used in both runtimes.
+// Implementation of some functions are provided in sanitizer_common, while
+// others must be defined by run-time library itself.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_COMMON_H
+#define SANITIZER_COMMON_H
+
+#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+
+#if defined(_MSC_VER) && !defined(__clang__)
+extern "C" void _ReadWriteBarrier();
+#pragma intrinsic(_ReadWriteBarrier)
+#endif
+
+namespace __sanitizer {
+
+struct AddressInfo;
+struct BufferedStackTrace;
+struct SignalContext;
+struct StackTrace;
+
+// Constants.
+const uptr kWordSize = SANITIZER_WORDSIZE / 8;
+const uptr kWordSizeInBits = 8 * kWordSize;
+
+const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
+
+const uptr kMaxPathLength = 4096;
+
+const uptr kMaxThreadStackSize = 1 << 30; // 1Gb
+
+static const uptr kErrorMessageBufferSize = 1 << 16;
+
+// Denotes fake PC values that come from JIT/JAVA/etc.
+// For such PC values __tsan_symbolize_external_ex() will be called.
+const u64 kExternalPCBit = 1ULL << 60;
+
+extern const char *SanitizerToolName; // Can be changed by the tool.
+
+extern atomic_uint32_t current_verbosity;
+INLINE void SetVerbosity(int verbosity) {
+ atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
+}
+INLINE int Verbosity() {
+ return atomic_load(&current_verbosity, memory_order_relaxed);
+}
+
+#if SANITIZER_ANDROID
+INLINE uptr GetPageSize() {
+// Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
+ return 4096;
+}
+INLINE uptr GetPageSizeCached() {
+ return 4096;
+}
+#else
+uptr GetPageSize();
+extern uptr PageSizeCached;
+INLINE uptr GetPageSizeCached() {
+ if (!PageSizeCached)
+ PageSizeCached = GetPageSize();
+ return PageSizeCached;
+}
+#endif
+uptr GetMmapGranularity();
+uptr GetMaxVirtualAddress();
+uptr GetMaxUserVirtualAddress();
+// Threads
+tid_t GetTid();
+int TgKill(pid_t pid, tid_t tid, int sig);
+uptr GetThreadSelf();
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom);
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size);
+
+// Memory management
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
+INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
+ return MmapOrDie(size, mem_type, /*raw_report*/ true);
+}
+void UnmapOrDie(void *addr, uptr size);
+// Behaves just like MmapOrDie, but tolerates out of memory condition, in that
+// case returns nullptr.
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
+bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
+ WARN_UNUSED_RESULT;
+void *MmapNoReserveOrDie(uptr size, const char *mem_type);
+void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
+// Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
+// that case returns nullptr.
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
+ const char *name = nullptr);
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
+void *MmapNoAccess(uptr size);
+// Map aligned chunk of address space; size and alignment are powers of two.
+// Dies on all but out of memory errors, in the latter case returns nullptr.
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type);
+// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
+// unaccessible memory.
+bool MprotectNoAccess(uptr addr, uptr size);
+bool MprotectReadOnly(uptr addr, uptr size);
+
+void MprotectMallocZones(void *addr, int prot);
+
+// Find an available address space.
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found, uptr *max_occupied_addr);
+
+// Used to check if we can map shadow memory to a fixed location.
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
+// Releases memory pages entirely within the [beg, end] address range. Noop if
+// the provided range does not contain at least one entire page.
+void ReleaseMemoryPagesToOS(uptr beg, uptr end);
+void IncreaseTotalMmap(uptr size);
+void DecreaseTotalMmap(uptr size);
+uptr GetRSS();
+bool NoHugePagesInRegion(uptr addr, uptr length);
+bool DontDumpShadowMemory(uptr addr, uptr length);
+// Check if the built VMA size matches the runtime one.
+void CheckVMASize();
+void RunMallocHooks(const void *ptr, uptr size);
+void RunFreeHooks(const void *ptr);
+
+class ReservedAddressRange {
+ public:
+ uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
+ uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
+ uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
+ void Unmap(uptr addr, uptr size);
+ void *base() const { return base_; }
+ uptr size() const { return size_; }
+
+ private:
+ void* base_;
+ uptr size_;
+ const char* name_;
+ uptr os_handle_;
+};
+
+typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
+ /*out*/uptr *stats, uptr stats_size);
+
+// Parse the contents of /proc/self/smaps and generate a memory profile.
+// |cb| is a tool-specific callback that fills the |stats| array containing
+// |stats_size| elements.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
+
+// Simple low-level (mmap-based) allocator for internal use. Doesn't have
+// constructor, so all instances of LowLevelAllocator should be
+// linker initialized.
+class LowLevelAllocator {
+ public:
+ // Requires an external lock.
+ void *Allocate(uptr size);
+ private:
+ char *allocated_end_;
+ char *allocated_current_;
+};
+// Set the min alignment of LowLevelAllocator to at least alignment.
+void SetLowLevelAllocateMinAlignment(uptr alignment);
+typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
+// Allows to register tool-specific callbacks for LowLevelAllocator.
+// Passing NULL removes the callback.
+void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
+
+// IO
+void CatastrophicErrorWrite(const char *buffer, uptr length);
+void RawWrite(const char *buffer);
+bool ColorizeReports();
+void RemoveANSIEscapeSequencesFromString(char *buffer);
+void Printf(const char *format, ...);
+void Report(const char *format, ...);
+void SetPrintfAndReportCallback(void (*callback)(const char *));
+#define VReport(level, ...) \
+ do { \
+ if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
+ } while (0)
+#define VPrintf(level, ...) \
+ do { \
+ if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
+ } while (0)
+
+// Lock sanitizer error reporting and protects against nested errors.
+class ScopedErrorReportLock {
+ public:
+ ScopedErrorReportLock();
+ ~ScopedErrorReportLock();
+
+ static void CheckLocked();
+};
+
+extern uptr stoptheworld_tracer_pid;
+extern uptr stoptheworld_tracer_ppid;
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size);
+
+// Error report formatting.
+const char *StripPathPrefix(const char *filepath,
+ const char *strip_file_prefix);
+// Strip the directories from the module name.
+const char *StripModuleName(const char *module);
+
+// OS
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
+uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
+const char *GetProcessName();
+void UpdateProcessName();
+void CacheBinaryName();
+void DisableCoreDumperIfNecessary();
+void DumpProcessMap();
+void PrintModuleMap();
+const char *GetEnv(const char *name);
+bool SetEnv(const char *name, const char *value);
+
+u32 GetUid();
+void ReExec();
+void CheckASLR();
+void CheckMPROTECT();
+char **GetArgv();
+char **GetEnviron();
+void PrintCmdline();
+bool StackSizeIsUnlimited();
+void SetStackSizeLimitInBytes(uptr limit);
+bool AddressSpaceIsUnlimited();
+void SetAddressSpaceUnlimited();
+void AdjustStackSize(void *attr);
+void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void SetSandboxingCallback(void (*f)());
+
+void InitializeCoverage(bool enabled, const char *coverage_dir);
+
+void InitTlsSize();
+uptr GetTlsSize();
+
+// Other
+void SleepForSeconds(int seconds);
+void SleepForMillis(int millis);
+u64 NanoTime();
+u64 MonotonicNanoTime();
+int Atexit(void (*function)(void));
+bool TemplateMatch(const char *templ, const char *str);
+
+// Exit
+void NORETURN Abort();
+void NORETURN Die();
+void NORETURN
+CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
+void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
+ const char *mmap_type, error_t err,
+ bool raw_report = false);
+
+// Specific tools may override behavior of "Die" and "CheckFailed" functions
+// to do tool-specific job.
+typedef void (*DieCallbackType)(void);
+
+// It's possible to add several callbacks that would be run when "Die" is
+// called. The callbacks will be run in the opposite order. The tools are
+// strongly recommended to setup all callbacks during initialization, when there
+// is only a single thread.
+bool AddDieCallback(DieCallbackType callback);
+bool RemoveDieCallback(DieCallbackType callback);
+
+void SetUserDieCallback(DieCallbackType callback);
+
+typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
+ u64, u64);
+void SetCheckFailedCallback(CheckFailedCallbackType callback);
+
+// Callback will be called if soft_rss_limit_mb is given and the limit is
+// exceeded (exceeded==true) or if rss went down below the limit
+// (exceeded==false).
+// The callback should be registered once at the tool init time.
+void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
+
+// Functions related to signal handling.
+typedef void (*SignalHandlerType)(int, void *, void *);
+HandleSignalMode GetHandleSignalMode(int signum);
+void InstallDeadlySignalHandlers(SignalHandlerType handler);
+
+// Signal reporting.
+// Each sanitizer uses slightly different implementation of stack unwinding.
+typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
+ const void *callback_context,
+ BufferedStackTrace *stack);
+// Print deadly signal report and die.
+void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context);
+
+// Part of HandleDeadlySignal, exposed for asan.
+void StartReportDeadlySignal();
+// Part of HandleDeadlySignal, exposed for asan.
+void ReportDeadlySignal(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context);
+
+// Alternative signal stack (POSIX-only).
+void SetAlternateSignalStack();
+void UnsetAlternateSignalStack();
+
+// We don't want a summary too long.
+const int kMaxSummaryLength = 1024;
+// Construct a one-line string:
+// SUMMARY: SanitizerToolName: error_message
+// and pass it to __sanitizer_report_error_summary.
+// If alt_tool_name is provided, it's used in place of SanitizerToolName.
+void ReportErrorSummary(const char *error_message,
+ const char *alt_tool_name = nullptr);
+// Same as above, but construct error_message as:
+// error_type file:line[:column][ function]
+void ReportErrorSummary(const char *error_type, const AddressInfo &info,
+ const char *alt_tool_name = nullptr);
+// Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
+void ReportErrorSummary(const char *error_type, const StackTrace *trace,
+ const char *alt_tool_name = nullptr);
+
+void ReportMmapWriteExec(int prot);
+
+// Math
+#if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
+extern "C" {
+unsigned char _BitScanForward(unsigned long *index, unsigned long mask); // NOLINT
+unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); // NOLINT
+#if defined(_WIN64)
+unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); // NOLINT
+unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); // NOLINT
+#endif
+}
+#endif
+
+INLINE uptr MostSignificantSetBitIndex(uptr x) {
+ CHECK_NE(x, 0U);
+ unsigned long up; // NOLINT
+#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
+# ifdef _WIN64
+ up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
+# else
+ up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
+# endif
+#elif defined(_WIN64)
+ _BitScanReverse64(&up, x);
+#else
+ _BitScanReverse(&up, x);
+#endif
+ return up;
+}
+
+INLINE uptr LeastSignificantSetBitIndex(uptr x) {
+ CHECK_NE(x, 0U);
+ unsigned long up; // NOLINT
+#if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
+# ifdef _WIN64
+ up = __builtin_ctzll(x);
+# else
+ up = __builtin_ctzl(x);
+# endif
+#elif defined(_WIN64)
+ _BitScanForward64(&up, x);
+#else
+ _BitScanForward(&up, x);
+#endif
+ return up;
+}
+
+INLINE bool IsPowerOfTwo(uptr x) {
+ return (x & (x - 1)) == 0;
+}
+
+INLINE uptr RoundUpToPowerOfTwo(uptr size) {
+ CHECK(size);
+ if (IsPowerOfTwo(size)) return size;
+
+ uptr up = MostSignificantSetBitIndex(size);
+ CHECK_LT(size, (1ULL << (up + 1)));
+ CHECK_GT(size, (1ULL << up));
+ return 1ULL << (up + 1);
+}
+
+INLINE uptr RoundUpTo(uptr size, uptr boundary) {
+ RAW_CHECK(IsPowerOfTwo(boundary));
+ return (size + boundary - 1) & ~(boundary - 1);
+}
+
+INLINE uptr RoundDownTo(uptr x, uptr boundary) {
+ return x & ~(boundary - 1);
+}
+
+INLINE bool IsAligned(uptr a, uptr alignment) {
+ return (a & (alignment - 1)) == 0;
+}
+
+INLINE uptr Log2(uptr x) {
+ CHECK(IsPowerOfTwo(x));
+ return LeastSignificantSetBitIndex(x);
+}
+
+// Don't use std::min, std::max or std::swap, to minimize dependency
+// on libstdc++.
+template<class T> T Min(T a, T b) { return a < b ? a : b; }
+template<class T> T Max(T a, T b) { return a > b ? a : b; }
+template<class T> void Swap(T& a, T& b) {
+ T tmp = a;
+ a = b;
+ b = tmp;
+}
+
+// Char handling
+INLINE bool IsSpace(int c) {
+ return (c == ' ') || (c == '\n') || (c == '\t') ||
+ (c == '\f') || (c == '\r') || (c == '\v');
+}
+INLINE bool IsDigit(int c) {
+ return (c >= '0') && (c <= '9');
+}
+INLINE int ToLower(int c) {
+ return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
+}
+
+// A low-level vector based on mmap. May incur a significant memory overhead for
+// small vectors.
+// WARNING: The current implementation supports only POD types.
+template<typename T>
+class InternalMmapVectorNoCtor {
+ public:
+ void Initialize(uptr initial_capacity) {
+ capacity_bytes_ = 0;
+ size_ = 0;
+ data_ = 0;
+ reserve(initial_capacity);
+ }
+ void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
+ T &operator[](uptr i) {
+ CHECK_LT(i, size_);
+ return data_[i];
+ }
+ const T &operator[](uptr i) const {
+ CHECK_LT(i, size_);
+ return data_[i];
+ }
+ void push_back(const T &element) {
+ CHECK_LE(size_, capacity());
+ if (size_ == capacity()) {
+ uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
+ Realloc(new_capacity);
+ }
+ internal_memcpy(&data_[size_++], &element, sizeof(T));
+ }
+ T &back() {
+ CHECK_GT(size_, 0);
+ return data_[size_ - 1];
+ }
+ void pop_back() {
+ CHECK_GT(size_, 0);
+ size_--;
+ }
+ uptr size() const {
+ return size_;
+ }
+ const T *data() const {
+ return data_;
+ }
+ T *data() {
+ return data_;
+ }
+ uptr capacity() const { return capacity_bytes_ / sizeof(T); }
+ void reserve(uptr new_size) {
+ // Never downsize internal buffer.
+ if (new_size > capacity())
+ Realloc(new_size);
+ }
+ void resize(uptr new_size) {
+ if (new_size > size_) {
+ reserve(new_size);
+ internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
+ }
+ size_ = new_size;
+ }
+
+ void clear() { size_ = 0; }
+ bool empty() const { return size() == 0; }
+
+ const T *begin() const {
+ return data();
+ }
+ T *begin() {
+ return data();
+ }
+ const T *end() const {
+ return data() + size();
+ }
+ T *end() {
+ return data() + size();
+ }
+
+ void swap(InternalMmapVectorNoCtor &other) {
+ Swap(data_, other.data_);
+ Swap(capacity_bytes_, other.capacity_bytes_);
+ Swap(size_, other.size_);
+ }
+
+ private:
+ void Realloc(uptr new_capacity) {
+ CHECK_GT(new_capacity, 0);
+ CHECK_LE(size_, new_capacity);
+ uptr new_capacity_bytes =
+ RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
+ T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
+ internal_memcpy(new_data, data_, size_ * sizeof(T));
+ UnmapOrDie(data_, capacity_bytes_);
+ data_ = new_data;
+ capacity_bytes_ = new_capacity_bytes;
+ }
+
+ T *data_;
+ uptr capacity_bytes_;
+ uptr size_;
+};
+
+template <typename T>
+bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
+ const InternalMmapVectorNoCtor<T> &rhs) {
+ if (lhs.size() != rhs.size()) return false;
+ return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
+}
+
+template <typename T>
+bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
+ const InternalMmapVectorNoCtor<T> &rhs) {
+ return !(lhs == rhs);
+}
+
+template<typename T>
+class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
+ public:
+ InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(1); }
+ explicit InternalMmapVector(uptr cnt) {
+ InternalMmapVectorNoCtor<T>::Initialize(cnt);
+ this->resize(cnt);
+ }
+ ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
+ // Disallow copies and moves.
+ InternalMmapVector(const InternalMmapVector &) = delete;
+ InternalMmapVector &operator=(const InternalMmapVector &) = delete;
+ InternalMmapVector(InternalMmapVector &&) = delete;
+ InternalMmapVector &operator=(InternalMmapVector &&) = delete;
+};
+
+class InternalScopedString : public InternalMmapVector<char> {
+ public:
+ explicit InternalScopedString(uptr max_length)
+ : InternalMmapVector<char>(max_length), length_(0) {
+ (*this)[0] = '\0';
+ }
+ uptr length() { return length_; }
+ void clear() {
+ (*this)[0] = '\0';
+ length_ = 0;
+ }
+ void append(const char *format, ...);
+
+ private:
+ uptr length_;
+};
+
+template <class T>
+struct CompareLess {
+ bool operator()(const T &a, const T &b) const { return a < b; }
+};
+
+// HeapSort for arrays and InternalMmapVector.
+template <class T, class Compare = CompareLess<T>>
+void Sort(T *v, uptr size, Compare comp = {}) {
+ if (size < 2)
+ return;
+ // Stage 1: insert elements to the heap.
+ for (uptr i = 1; i < size; i++) {
+ uptr j, p;
+ for (j = i; j > 0; j = p) {
+ p = (j - 1) / 2;
+ if (comp(v[p], v[j]))
+ Swap(v[j], v[p]);
+ else
+ break;
+ }
+ }
+ // Stage 2: swap largest element with the last one,
+ // and sink the new top.
+ for (uptr i = size - 1; i > 0; i--) {
+ Swap(v[0], v[i]);
+ uptr j, max_ind;
+ for (j = 0; j < i; j = max_ind) {
+ uptr left = 2 * j + 1;
+ uptr right = 2 * j + 2;
+ max_ind = j;
+ if (left < i && comp(v[max_ind], v[left]))
+ max_ind = left;
+ if (right < i && comp(v[max_ind], v[right]))
+ max_ind = right;
+ if (max_ind != j)
+ Swap(v[j], v[max_ind]);
+ else
+ break;
+ }
+ }
+}
+
+// Works like std::lower_bound: finds the first element that is not less
+// than the val.
+template <class Container, class Value, class Compare>
+uptr InternalLowerBound(const Container &v, uptr first, uptr last,
+ const Value &val, Compare comp) {
+ while (last > first) {
+ uptr mid = (first + last) / 2;
+ if (comp(v[mid], val))
+ first = mid + 1;
+ else
+ last = mid;
+ }
+ return first;
+}
+
+enum ModuleArch {
+ kModuleArchUnknown,
+ kModuleArchI386,
+ kModuleArchX86_64,
+ kModuleArchX86_64H,
+ kModuleArchARMV6,
+ kModuleArchARMV7,
+ kModuleArchARMV7S,
+ kModuleArchARMV7K,
+ kModuleArchARM64
+};
+
+// Opens the file 'file_name" and reads up to 'max_len' bytes.
+// The resulting buffer is mmaped and stored in '*buff'.
+// Returns true if file was successfully opened and read.
+bool ReadFileToVector(const char *file_name,
+ InternalMmapVectorNoCtor<char> *buff,
+ uptr max_len = 1 << 26, error_t *errno_p = nullptr);
+
+// Opens the file 'file_name" and reads up to 'max_len' bytes.
+// This function is less I/O efficient than ReadFileToVector as it may reread
+// file multiple times to avoid mmap during read attempts. It's used to read
+// procmap, so short reads with mmap in between can produce inconsistent result.
+// The resulting buffer is mmaped and stored in '*buff'.
+// The size of the mmaped region is stored in '*buff_size'.
+// The total number of read bytes is stored in '*read_len'.
+// Returns true if file was successfully opened and read.
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len = 1 << 26,
+ error_t *errno_p = nullptr);
+
+// When adding a new architecture, don't forget to also update
+// script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc.
+inline const char *ModuleArchToString(ModuleArch arch) {
+ switch (arch) {
+ case kModuleArchUnknown:
+ return "";
+ case kModuleArchI386:
+ return "i386";
+ case kModuleArchX86_64:
+ return "x86_64";
+ case kModuleArchX86_64H:
+ return "x86_64h";
+ case kModuleArchARMV6:
+ return "armv6";
+ case kModuleArchARMV7:
+ return "armv7";
+ case kModuleArchARMV7S:
+ return "armv7s";
+ case kModuleArchARMV7K:
+ return "armv7k";
+ case kModuleArchARM64:
+ return "arm64";
+ }
+ CHECK(0 && "Invalid module arch");
+ return "";
+}
+
+const uptr kModuleUUIDSize = 16;
+const uptr kMaxSegName = 16;
+
+// Represents a binary loaded into virtual memory (e.g. this can be an
+// executable or a shared object).
+class LoadedModule {
+ public:
+ LoadedModule()
+ : full_name_(nullptr),
+ base_address_(0),
+ max_executable_address_(0),
+ arch_(kModuleArchUnknown),
+ instrumented_(false) {
+ internal_memset(uuid_, 0, kModuleUUIDSize);
+ ranges_.clear();
+ }
+ void set(const char *module_name, uptr base_address);
+ void set(const char *module_name, uptr base_address, ModuleArch arch,
+ u8 uuid[kModuleUUIDSize], bool instrumented);
+ void clear();
+ void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
+ const char *name = nullptr);
+ bool containsAddress(uptr address) const;
+
+ const char *full_name() const { return full_name_; }
+ uptr base_address() const { return base_address_; }
+ uptr max_executable_address() const { return max_executable_address_; }
+ ModuleArch arch() const { return arch_; }
+ const u8 *uuid() const { return uuid_; }
+ bool instrumented() const { return instrumented_; }
+
+ struct AddressRange {
+ AddressRange *next;
+ uptr beg;
+ uptr end;
+ bool executable;
+ bool writable;
+ char name[kMaxSegName];
+
+ AddressRange(uptr beg, uptr end, bool executable, bool writable,
+ const char *name)
+ : next(nullptr),
+ beg(beg),
+ end(end),
+ executable(executable),
+ writable(writable) {
+ internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
+ }
+ };
+
+ const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
+
+ private:
+ char *full_name_; // Owned.
+ uptr base_address_;
+ uptr max_executable_address_;
+ ModuleArch arch_;
+ u8 uuid_[kModuleUUIDSize];
+ bool instrumented_;
+ IntrusiveList<AddressRange> ranges_;
+};
+
+// List of LoadedModules. OS-dependent implementation is responsible for
+// filling this information.
+class ListOfModules {
+ public:
+ ListOfModules() : initialized(false) {}
+ ~ListOfModules() { clear(); }
+ void init();
+ void fallbackInit(); // Uses fallback init if available, otherwise clears
+ const LoadedModule *begin() const { return modules_.begin(); }
+ LoadedModule *begin() { return modules_.begin(); }
+ const LoadedModule *end() const { return modules_.end(); }
+ LoadedModule *end() { return modules_.end(); }
+ uptr size() const { return modules_.size(); }
+ const LoadedModule &operator[](uptr i) const {
+ CHECK_LT(i, modules_.size());
+ return modules_[i];
+ }
+
+ private:
+ void clear() {
+ for (auto &module : modules_) module.clear();
+ modules_.clear();
+ }
+ void clearOrInit() {
+ initialized ? clear() : modules_.Initialize(kInitialCapacity);
+ initialized = true;
+ }
+
+ InternalMmapVectorNoCtor<LoadedModule> modules_;
+ // We rarely have more than 16K loaded modules.
+ static const uptr kInitialCapacity = 1 << 14;
+ bool initialized;
+};
+
+// Callback type for iterating over a set of memory ranges.
+typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
+
+enum AndroidApiLevel {
+ ANDROID_NOT_ANDROID = 0,
+ ANDROID_KITKAT = 19,
+ ANDROID_LOLLIPOP_MR1 = 22,
+ ANDROID_POST_LOLLIPOP = 23
+};
+
+void WriteToSyslog(const char *buffer);
+
+#if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
+#define SANITIZER_WIN_TRACE 1
+#else
+#define SANITIZER_WIN_TRACE 0
+#endif
+
+#if SANITIZER_MAC || SANITIZER_WIN_TRACE
+void LogFullErrorReport(const char *buffer);
+#else
+INLINE void LogFullErrorReport(const char *buffer) {}
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+void WriteOneLineToSyslog(const char *s);
+void LogMessageOnPrintf(const char *str);
+#else
+INLINE void WriteOneLineToSyslog(const char *s) {}
+INLINE void LogMessageOnPrintf(const char *str) {}
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_WIN_TRACE
+// Initialize Android logging. Any writes before this are silently lost.
+void AndroidLogInit();
+void SetAbortMessage(const char *);
+#else
+INLINE void AndroidLogInit() {}
+// FIXME: MacOS implementation could use CRSetCrashLogMessage.
+INLINE void SetAbortMessage(const char *) {}
+#endif
+
+#if SANITIZER_ANDROID
+void SanitizerInitializeUnwinder();
+AndroidApiLevel AndroidGetApiLevel();
+#else
+INLINE void AndroidLogWrite(const char *buffer_unused) {}
+INLINE void SanitizerInitializeUnwinder() {}
+INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
+#endif
+
+INLINE uptr GetPthreadDestructorIterations() {
+#if SANITIZER_ANDROID
+ return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
+#elif SANITIZER_POSIX
+ return 4;
+#else
+// Unused on Windows.
+ return 0;
+#endif
+}
+
+void *internal_start_thread(void(*func)(void*), void *arg);
+void internal_join_thread(void *th);
+void MaybeStartBackgroudThread();
+
+// Make the compiler think that something is going on there.
+// Use this inside a loop that looks like memset/memcpy/etc to prevent the
+// compiler from recognising it and turning it into an actual call to
+// memset/memcpy/etc.
+static inline void SanitizerBreakOptimization(void *arg) {
+#if defined(_MSC_VER) && !defined(__clang__)
+ _ReadWriteBarrier();
+#else
+ __asm__ __volatile__("" : : "r" (arg) : "memory");
+#endif
+}
+
+struct SignalContext {
+ void *siginfo;
+ void *context;
+ uptr addr;
+ uptr pc;
+ uptr sp;
+ uptr bp;
+ bool is_memory_access;
+ enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
+
+ // VS2013 doesn't implement unrestricted unions, so we need a trivial default
+ // constructor
+ SignalContext() = default;
+
+ // Creates signal context in a platform-specific manner.
+ // SignalContext is going to keep pointers to siginfo and context without
+ // owning them.
+ SignalContext(void *siginfo, void *context)
+ : siginfo(siginfo),
+ context(context),
+ addr(GetAddress()),
+ is_memory_access(IsMemoryAccess()),
+ write_flag(GetWriteFlag()) {
+ InitPcSpBp();
+ }
+
+ static void DumpAllRegisters(void *context);
+
+ // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
+ int GetType() const;
+
+ // String description of the signal.
+ const char *Describe() const;
+
+ // Returns true if signal is stack overflow.
+ bool IsStackOverflow() const;
+
+ private:
+ // Platform specific initialization.
+ void InitPcSpBp();
+ uptr GetAddress() const;
+ WriteFlag GetWriteFlag() const;
+ bool IsMemoryAccess() const;
+};
+
+void InitializePlatformEarly();
+void MaybeReexec();
+
+template <typename Fn>
+class RunOnDestruction {
+ public:
+ explicit RunOnDestruction(Fn fn) : fn_(fn) {}
+ ~RunOnDestruction() { fn_(); }
+
+ private:
+ Fn fn_;
+};
+
+// A simple scope guard. Usage:
+// auto cleanup = at_scope_exit([]{ do_cleanup; });
+template <typename Fn>
+RunOnDestruction<Fn> at_scope_exit(Fn fn) {
+ return RunOnDestruction<Fn>(fn);
+}
+
+// Linux on 64-bit s390 had a nasty bug that crashes the whole machine
+// if a process uses virtual memory over 4TB (as many sanitizers like
+// to do). This function will abort the process if running on a kernel
+// that looks vulnerable.
+#if SANITIZER_LINUX && SANITIZER_S390_64
+void AvoidCVE_2016_2143();
+#else
+INLINE void AvoidCVE_2016_2143() {}
+#endif
+
+struct StackDepotStats {
+ uptr n_uniq_ids;
+ uptr allocated;
+};
+
+// The default value for allocator_release_to_os_interval_ms common flag to
+// indicate that sanitizer allocator should not attempt to release memory to OS.
+const s32 kReleaseToOSIntervalNever = -1;
+
+void CheckNoDeepBind(const char *filename, int flag);
+
+// Returns the requested amount of random data (up to 256 bytes) that can then
+// be used to seed a PRNG. Defaults to blocking like the underlying syscall.
+bool GetRandom(void *buffer, uptr length, bool blocking = true);
+
+// Returns the number of logical processors on the system.
+u32 GetNumberOfCPUs();
+extern u32 NumberOfCPUsCached;
+INLINE u32 GetNumberOfCPUsCached() {
+ if (!NumberOfCPUsCached)
+ NumberOfCPUsCached = GetNumberOfCPUs();
+ return NumberOfCPUsCached;
+}
+
+} // namespace __sanitizer
+
+inline void *operator new(__sanitizer::operator_new_size_type size,
+ __sanitizer::LowLevelAllocator &alloc) {
+ return alloc.Allocate(size);
+}
+
+#endif // SANITIZER_COMMON_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors.inc (revision 351984)
@@ -0,0 +1,9849 @@
+//===-- sanitizer_common_interceptors.inc -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common function interceptors for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define its own macros:
+// COMMON_INTERCEPTOR_ENTER
+// COMMON_INTERCEPTOR_ENTER_NOIGNORE
+// COMMON_INTERCEPTOR_READ_RANGE
+// COMMON_INTERCEPTOR_WRITE_RANGE
+// COMMON_INTERCEPTOR_INITIALIZE_RANGE
+// COMMON_INTERCEPTOR_DIR_ACQUIRE
+// COMMON_INTERCEPTOR_FD_ACQUIRE
+// COMMON_INTERCEPTOR_FD_RELEASE
+// COMMON_INTERCEPTOR_FD_ACCESS
+// COMMON_INTERCEPTOR_SET_THREAD_NAME
+// COMMON_INTERCEPTOR_ON_DLOPEN
+// COMMON_INTERCEPTOR_ON_EXIT
+// COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
+// COMMON_INTERCEPTOR_MUTEX_POST_LOCK
+// COMMON_INTERCEPTOR_MUTEX_UNLOCK
+// COMMON_INTERCEPTOR_MUTEX_REPAIR
+// COMMON_INTERCEPTOR_SET_PTHREAD_NAME
+// COMMON_INTERCEPTOR_HANDLE_RECVMSG
+// COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+// COMMON_INTERCEPTOR_MEMSET_IMPL
+// COMMON_INTERCEPTOR_MEMMOVE_IMPL
+// COMMON_INTERCEPTOR_MEMCPY_IMPL
+// COMMON_INTERCEPTOR_MMAP_IMPL
+// COMMON_INTERCEPTOR_COPY_STRING
+// COMMON_INTERCEPTOR_STRNDUP_IMPL
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_addrhashmap.h"
+#include "sanitizer_errno.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_platform_interceptors.h"
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_tls_get_addr.h"
+
+#include <stdarg.h>
+
+#if SANITIZER_INTERCEPTOR_HOOKS
+#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) f(__VA_ARGS__);
+#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
+ SANITIZER_INTERFACE_WEAK_DEF(void, f, __VA_ARGS__) {}
+#else
+#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...)
+#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...)
+
+#endif // SANITIZER_INTERCEPTOR_HOOKS
+
+#if SANITIZER_WINDOWS && !defined(va_copy)
+#define va_copy(dst, src) ((dst) = (src))
+#endif // _WIN32
+
+#if SANITIZER_FREEBSD
+#define pthread_setname_np pthread_set_name_np
+#define inet_aton __inet_aton
+#define inet_pton __inet_pton
+#define iconv __bsd_iconv
+#endif
+
+#if SANITIZER_NETBSD
+#define clock_getres __clock_getres50
+#define clock_gettime __clock_gettime50
+#define clock_settime __clock_settime50
+#define ctime __ctime50
+#define ctime_r __ctime_r50
+#define devname __devname50
+#define fgetpos __fgetpos50
+#define fsetpos __fsetpos50
+#define fts_children __fts_children60
+#define fts_close __fts_close60
+#define fts_open __fts_open60
+#define fts_read __fts_read60
+#define fts_set __fts_set60
+#define getitimer __getitimer50
+#define getmntinfo __getmntinfo13
+#define getpwent __getpwent50
+#define getpwnam __getpwnam50
+#define getpwnam_r __getpwnam_r50
+#define getpwuid __getpwuid50
+#define getpwuid_r __getpwuid_r50
+#define getutent __getutent50
+#define getutxent __getutxent50
+#define getutxid __getutxid50
+#define getutxline __getutxline50
+#define pututxline __pututxline50
+#define glob __glob30
+#define gmtime __gmtime50
+#define gmtime_r __gmtime_r50
+#define localtime __locatime50
+#define localtime_r __localtime_r50
+#define mktime __mktime50
+#define lstat __lstat50
+#define opendir __opendir30
+#define readdir __readdir30
+#define readdir_r __readdir_r30
+#define scandir __scandir30
+#define setitimer __setitimer50
+#define setlocale __setlocale50
+#define shmctl __shmctl50
+#define sigemptyset __sigemptyset14
+#define sigfillset __sigfillset14
+#define sigpending __sigpending14
+#define sigprocmask __sigprocmask14
+#define sigtimedwait __sigtimedwait50
+#define stat __stat50
+#define time __time50
+#define times __times13
+#define unvis __unvis50
+#define wait3 __wait350
+#define wait4 __wait450
+extern const unsigned short *_ctype_tab_;
+extern const short *_toupper_tab_;
+extern const short *_tolower_tab_;
+#endif
+
+// Platform-specific options.
+#if SANITIZER_MAC
+namespace __sanitizer {
+bool PlatformHasDifferentMemcpyAndMemmove();
+}
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE \
+ (__sanitizer::PlatformHasDifferentMemcpyAndMemmove())
+#elif SANITIZER_WINDOWS64
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE false
+#else
+#define PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE true
+#endif // SANITIZER_MAC
+
+#ifndef COMMON_INTERCEPTOR_INITIALIZE_RANGE
+#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(p, size) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_UNPOISON_PARAM
+#define COMMON_INTERCEPTOR_UNPOISON_PARAM(count) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_FD_ACCESS
+#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_PRE_LOCK
+#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_POST_LOCK
+#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_UNLOCK
+#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_REPAIR
+#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MUTEX_INVALID
+#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_HANDLE_RECVMSG
+#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) ((void)(msg))
+#endif
+
+#ifndef COMMON_INTERCEPTOR_FILE_OPEN
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_FILE_CLOSE
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_LIBRARY_LOADED
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_LIBRARY_UNLOADED
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_ENTER_NOIGNORE
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, ...) \
+ COMMON_INTERCEPTOR_ENTER(ctx, __VA_ARGS__)
+#endif
+
+#ifndef COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (0)
+#endif
+
+#define COMMON_INTERCEPTOR_READ_STRING(ctx, s, n) \
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s), \
+ common_flags()->strict_string_checks ? (REAL(strlen)(s)) + 1 : (n) )
+
+#ifndef COMMON_INTERCEPTOR_ON_DLOPEN
+#define COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag) \
+ CheckNoDeepBind(filename, flag);
+#endif
+
+#ifndef COMMON_INTERCEPTOR_GET_TLS_RANGE
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) *begin = *end = 0;
+#endif
+
+#ifndef COMMON_INTERCEPTOR_ACQUIRE
+#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_RELEASE
+#define COMMON_INTERCEPTOR_RELEASE(ctx, u) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_USER_CALLBACK_START
+#define COMMON_INTERCEPTOR_USER_CALLBACK_START() {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_USER_CALLBACK_END
+#define COMMON_INTERCEPTOR_USER_CALLBACK_END() {}
+#endif
+
+#ifdef SANITIZER_NLDBL_VERSION
+#define COMMON_INTERCEPT_FUNCTION_LDBL(fn) \
+ COMMON_INTERCEPT_FUNCTION_VER(fn, SANITIZER_NLDBL_VERSION)
+#else
+#define COMMON_INTERCEPT_FUNCTION_LDBL(fn) \
+ COMMON_INTERCEPT_FUNCTION(fn)
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMSET_IMPL
+#define COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memset(dst, v, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memset, dst, v, size); \
+ if (common_flags()->intercept_intrin) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ return REAL(memset)(dst, v, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMMOVE_IMPL
+#define COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) \
+ return internal_memmove(dst, src, size); \
+ COMMON_INTERCEPTOR_ENTER(ctx, memmove, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memmove)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MEMCPY_IMPL
+#define COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size) \
+ { \
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) { \
+ return internal_memmove(dst, src, size); \
+ } \
+ COMMON_INTERCEPTOR_ENTER(ctx, memcpy, dst, src, size); \
+ if (common_flags()->intercept_intrin) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, size); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, size); \
+ } \
+ return REAL(memcpy)(dst, src, size); \
+ }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_MMAP_IMPL
+#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
+ off) \
+ { return REAL(mmap)(addr, sz, prot, flags, fd, off); }
+#endif
+
+#ifndef COMMON_INTERCEPTOR_COPY_STRING
+#define COMMON_INTERCEPTOR_COPY_STRING(ctx, to, from, size) {}
+#endif
+
+#ifndef COMMON_INTERCEPTOR_STRNDUP_IMPL
+#define COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size) \
+ COMMON_INTERCEPTOR_ENTER(ctx, strndup, s, size); \
+ uptr copy_length = internal_strnlen(s, size); \
+ char *new_mem = (char *)WRAP(malloc)(copy_length + 1); \
+ if (common_flags()->intercept_strndup) { \
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s, Min(size, copy_length + 1)); \
+ } \
+ COMMON_INTERCEPTOR_COPY_STRING(ctx, new_mem, s, copy_length); \
+ internal_memcpy(new_mem, s, copy_length); \
+ new_mem[copy_length] = '\0'; \
+ return new_mem;
+#endif
+
+struct FileMetadata {
+ // For open_memstream().
+ char **addr;
+ SIZE_T *size;
+};
+
+struct CommonInterceptorMetadata {
+ enum {
+ CIMT_INVALID = 0,
+ CIMT_FILE
+ } type;
+ union {
+ FileMetadata file;
+ };
+};
+
+typedef AddrHashMap<CommonInterceptorMetadata, 31051> MetadataHashMap;
+
+static MetadataHashMap *interceptor_metadata_map;
+
+#if SI_POSIX
+UNUSED static void SetInterceptorMetadata(__sanitizer_FILE *addr,
+ const FileMetadata &file) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr);
+ CHECK(h.created());
+ h->type = CommonInterceptorMetadata::CIMT_FILE;
+ h->file = file;
+}
+
+UNUSED static const FileMetadata *GetInterceptorMetadata(
+ __sanitizer_FILE *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr,
+ /* remove */ false,
+ /* create */ false);
+ if (addr && h.exists()) {
+ CHECK(!h.created());
+ CHECK(h->type == CommonInterceptorMetadata::CIMT_FILE);
+ return &h->file;
+ } else {
+ return 0;
+ }
+}
+
+UNUSED static void DeleteInterceptorMetadata(void *addr) {
+ MetadataHashMap::Handle h(interceptor_metadata_map, (uptr)addr, true);
+ CHECK(h.exists());
+}
+#endif // SI_POSIX
+
+#if SANITIZER_INTERCEPT_STRLEN
+INTERCEPTOR(SIZE_T, strlen, const char *s) {
+ // Sometimes strlen is called prior to InitializeCommonInterceptors,
+ // in which case the REAL(strlen) typically used in
+ // COMMON_INTERCEPTOR_ENTER will fail. We use internal_strlen here
+ // to handle that.
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strlen(s);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strlen, s);
+ SIZE_T result = REAL(strlen)(s);
+ if (common_flags()->intercept_strlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, result + 1);
+ return result;
+}
+#define INIT_STRLEN COMMON_INTERCEPT_FUNCTION(strlen)
+#else
+#define INIT_STRLEN
+#endif
+
+#if SANITIZER_INTERCEPT_STRNLEN
+INTERCEPTOR(SIZE_T, strnlen, const char *s, SIZE_T maxlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnlen, s, maxlen);
+ SIZE_T length = REAL(strnlen)(s, maxlen);
+ if (common_flags()->intercept_strlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, Min(length + 1, maxlen));
+ return length;
+}
+#define INIT_STRNLEN COMMON_INTERCEPT_FUNCTION(strnlen)
+#else
+#define INIT_STRNLEN
+#endif
+
+#if SANITIZER_INTERCEPT_STRNDUP
+INTERCEPTOR(char*, strndup, const char *s, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);
+}
+#define INIT_STRNDUP COMMON_INTERCEPT_FUNCTION(strndup)
+#else
+#define INIT_STRNDUP
+#endif // SANITIZER_INTERCEPT_STRNDUP
+
+#if SANITIZER_INTERCEPT___STRNDUP
+INTERCEPTOR(char*, __strndup, const char *s, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_STRNDUP_IMPL(ctx, s, size);
+}
+#define INIT___STRNDUP COMMON_INTERCEPT_FUNCTION(__strndup)
+#else
+#define INIT___STRNDUP
+#endif // SANITIZER_INTERCEPT___STRNDUP
+
+#if SANITIZER_INTERCEPT_TEXTDOMAIN
+INTERCEPTOR(char*, textdomain, const char *domainname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, textdomain, domainname);
+ if (domainname) COMMON_INTERCEPTOR_READ_STRING(ctx, domainname, 0);
+ char *domain = REAL(textdomain)(domainname);
+ if (domain) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(domain, REAL(strlen)(domain) + 1);
+ }
+ return domain;
+}
+#define INIT_TEXTDOMAIN COMMON_INTERCEPT_FUNCTION(textdomain)
+#else
+#define INIT_TEXTDOMAIN
+#endif
+
+#if SANITIZER_INTERCEPT_STRCMP
+static inline int CharCmpX(unsigned char c1, unsigned char c2) {
+ return (c1 == c2) ? 0 : (c1 < c2) ? -1 : 1;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, uptr called_pc,
+ const char *s1, const char *s2, int result)
+
+INTERCEPTOR(int, strcmp, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcmp, s1, s2);
+ unsigned char c1, c2;
+ uptr i;
+ for (i = 0;; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (c1 != c2 || c1 == '\0') break;
+ }
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
+ int result = CharCmpX(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcmp, GET_CALLER_PC(), s1,
+ s2, result);
+ return result;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, uptr called_pc,
+ const char *s1, const char *s2, uptr n,
+ int result)
+
+INTERCEPTOR(int, strncmp, const char *s1, const char *s2, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strncmp(s1, s2, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strncmp, s1, s2, size);
+ unsigned char c1 = 0, c2 = 0;
+ uptr i;
+ for (i = 0; i < size; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (c1 != c2 || c1 == '\0') break;
+ }
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
+ int result = CharCmpX(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncmp, GET_CALLER_PC(), s1,
+ s2, size, result);
+ return result;
+}
+
+#define INIT_STRCMP COMMON_INTERCEPT_FUNCTION(strcmp)
+#define INIT_STRNCMP COMMON_INTERCEPT_FUNCTION(strncmp)
+#else
+#define INIT_STRCMP
+#define INIT_STRNCMP
+#endif
+
+#if SANITIZER_INTERCEPT_STRCASECMP
+static inline int CharCaseCmp(unsigned char c1, unsigned char c2) {
+ int c1_low = ToLower(c1);
+ int c2_low = ToLower(c2);
+ return c1_low - c2_low;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, uptr called_pc,
+ const char *s1, const char *s2, int result)
+
+INTERCEPTOR(int, strcasecmp, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcasecmp, s1, s2);
+ unsigned char c1 = 0, c2 = 0;
+ uptr i;
+ for (i = 0;; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
+ }
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, i + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s2, i + 1);
+ int result = CharCaseCmp(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasecmp, GET_CALLER_PC(),
+ s1, s2, result);
+ return result;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, uptr called_pc,
+ const char *s1, const char *s2, uptr size,
+ int result)
+
+INTERCEPTOR(int, strncasecmp, const char *s1, const char *s2, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strncasecmp, s1, s2, size);
+ unsigned char c1 = 0, c2 = 0;
+ uptr i;
+ for (i = 0; i < size; i++) {
+ c1 = (unsigned char)s1[i];
+ c2 = (unsigned char)s2[i];
+ if (CharCaseCmp(c1, c2) != 0 || c1 == '\0') break;
+ }
+ uptr i1 = i;
+ uptr i2 = i;
+ if (common_flags()->strict_string_checks) {
+ for (; i1 < size && s1[i1]; i1++) {}
+ for (; i2 < size && s2[i2]; i2++) {}
+ }
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s1), Min(i1 + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE((ctx), (s2), Min(i2 + 1, size));
+ int result = CharCaseCmp(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strncasecmp, GET_CALLER_PC(),
+ s1, s2, size, result);
+ return result;
+}
+
+#define INIT_STRCASECMP COMMON_INTERCEPT_FUNCTION(strcasecmp)
+#define INIT_STRNCASECMP COMMON_INTERCEPT_FUNCTION(strncasecmp)
+#else
+#define INIT_STRCASECMP
+#define INIT_STRNCASECMP
+#endif
+
+#if SANITIZER_INTERCEPT_STRSTR || SANITIZER_INTERCEPT_STRCASESTR
+static inline void StrstrCheck(void *ctx, char *r, const char *s1,
+ const char *s2) {
+ uptr len1 = REAL(strlen)(s1);
+ uptr len2 = REAL(strlen)(s2);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r ? r - s1 + len2 : len1 + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2 + 1);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_STRSTR
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, uptr called_pc,
+ const char *s1, const char *s2, char *result)
+
+INTERCEPTOR(char*, strstr, const char *s1, const char *s2) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strstr(s1, s2);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strstr, s1, s2);
+ char *r = REAL(strstr)(s1, s2);
+ if (common_flags()->intercept_strstr)
+ StrstrCheck(ctx, r, s1, s2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strstr, GET_CALLER_PC(), s1,
+ s2, r);
+ return r;
+}
+
+#define INIT_STRSTR COMMON_INTERCEPT_FUNCTION(strstr);
+#else
+#define INIT_STRSTR
+#endif
+
+#if SANITIZER_INTERCEPT_STRCASESTR
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, uptr called_pc,
+ const char *s1, const char *s2, char *result)
+
+INTERCEPTOR(char*, strcasestr, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcasestr, s1, s2);
+ char *r = REAL(strcasestr)(s1, s2);
+ if (common_flags()->intercept_strstr)
+ StrstrCheck(ctx, r, s1, s2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_strcasestr, GET_CALLER_PC(),
+ s1, s2, r);
+ return r;
+}
+
+#define INIT_STRCASESTR COMMON_INTERCEPT_FUNCTION(strcasestr);
+#else
+#define INIT_STRCASESTR
+#endif
+
+#if SANITIZER_INTERCEPT_STRTOK
+
+INTERCEPTOR(char*, strtok, char *str, const char *delimiters) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtok, str, delimiters);
+ if (!common_flags()->intercept_strtok) {
+ return REAL(strtok)(str, delimiters);
+ }
+ if (common_flags()->strict_string_checks) {
+ // If strict_string_checks is enabled, we check the whole first argument
+ // string on the first call (strtok saves this string in a static buffer
+ // for subsequent calls). We do not need to check strtok's result.
+ // As the delimiters can change, we check them every call.
+ if (str != nullptr) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters,
+ REAL(strlen)(delimiters) + 1);
+ return REAL(strtok)(str, delimiters);
+ } else {
+ // However, when strict_string_checks is disabled we cannot check the
+ // whole string on the first call. Instead, we check the result string
+ // which is guaranteed to be a NULL-terminated substring of the first
+ // argument. We also conservatively check one character of str and the
+ // delimiters.
+ if (str != nullptr) {
+ COMMON_INTERCEPTOR_READ_STRING(ctx, str, 1);
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delimiters, 1);
+ char *result = REAL(strtok)(str, delimiters);
+ if (result != nullptr) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, result, REAL(strlen)(result) + 1);
+ } else if (str != nullptr) {
+ // No delimiter were found, it's safe to assume that the entire str was
+ // scanned.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ }
+ return result;
+ }
+}
+
+#define INIT_STRTOK COMMON_INTERCEPT_FUNCTION(strtok)
+#else
+#define INIT_STRTOK
+#endif
+
+#if SANITIZER_INTERCEPT_MEMMEM
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, uptr called_pc,
+ const void *s1, SIZE_T len1, const void *s2,
+ SIZE_T len2, void *result)
+
+INTERCEPTOR(void*, memmem, const void *s1, SIZE_T len1, const void *s2,
+ SIZE_T len2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memmem, s1, len1, s2, len2);
+ void *r = REAL(memmem)(s1, len1, s2, len2);
+ if (common_flags()->intercept_memmem) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, len1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, len2);
+ }
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memmem, GET_CALLER_PC(),
+ s1, len1, s2, len2, r);
+ return r;
+}
+
+#define INIT_MEMMEM COMMON_INTERCEPT_FUNCTION(memmem);
+#else
+#define INIT_MEMMEM
+#endif // SANITIZER_INTERCEPT_MEMMEM
+
+#if SANITIZER_INTERCEPT_STRCHR
+INTERCEPTOR(char*, strchr, const char *s, int c) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strchr(s, c);
+ COMMON_INTERCEPTOR_ENTER(ctx, strchr, s, c);
+ char *result = REAL(strchr)(s, c);
+ if (common_flags()->intercept_strchr) {
+ // Keep strlen as macro argument, as macro may ignore it.
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s,
+ (result ? result - s : REAL(strlen)(s)) + 1);
+ }
+ return result;
+}
+#define INIT_STRCHR COMMON_INTERCEPT_FUNCTION(strchr)
+#else
+#define INIT_STRCHR
+#endif
+
+#if SANITIZER_INTERCEPT_STRCHRNUL
+INTERCEPTOR(char*, strchrnul, const char *s, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strchrnul, s, c);
+ char *result = REAL(strchrnul)(s, c);
+ uptr len = result - s + 1;
+ if (common_flags()->intercept_strchr)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s, len);
+ return result;
+}
+#define INIT_STRCHRNUL COMMON_INTERCEPT_FUNCTION(strchrnul)
+#else
+#define INIT_STRCHRNUL
+#endif
+
+#if SANITIZER_INTERCEPT_STRRCHR
+INTERCEPTOR(char*, strrchr, const char *s, int c) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_strrchr(s, c);
+ COMMON_INTERCEPTOR_ENTER(ctx, strrchr, s, c);
+ if (common_flags()->intercept_strchr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ return REAL(strrchr)(s, c);
+}
+#define INIT_STRRCHR COMMON_INTERCEPT_FUNCTION(strrchr)
+#else
+#define INIT_STRRCHR
+#endif
+
+#if SANITIZER_INTERCEPT_STRSPN
+INTERCEPTOR(SIZE_T, strspn, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strspn, s1, s2);
+ SIZE_T r = REAL(strspn)(s1, s2);
+ if (common_flags()->intercept_strspn) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
+ }
+ return r;
+}
+
+INTERCEPTOR(SIZE_T, strcspn, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strcspn, s1, s2);
+ SIZE_T r = REAL(strcspn)(s1, s2);
+ if (common_flags()->intercept_strspn) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1, r + 1);
+ }
+ return r;
+}
+
+#define INIT_STRSPN \
+ COMMON_INTERCEPT_FUNCTION(strspn); \
+ COMMON_INTERCEPT_FUNCTION(strcspn);
+#else
+#define INIT_STRSPN
+#endif
+
+#if SANITIZER_INTERCEPT_STRPBRK
+INTERCEPTOR(char *, strpbrk, const char *s1, const char *s2) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strpbrk, s1, s2);
+ char *r = REAL(strpbrk)(s1, s2);
+ if (common_flags()->intercept_strpbrk) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, REAL(strlen)(s2) + 1);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s1,
+ r ? r - s1 + 1 : REAL(strlen)(s1) + 1);
+ }
+ return r;
+}
+
+#define INIT_STRPBRK COMMON_INTERCEPT_FUNCTION(strpbrk);
+#else
+#define INIT_STRPBRK
+#endif
+
+#if SANITIZER_INTERCEPT_MEMSET
+INTERCEPTOR(void *, memset, void *dst, int v, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, dst, v, size);
+}
+
+#define INIT_MEMSET COMMON_INTERCEPT_FUNCTION(memset)
+#else
+#define INIT_MEMSET
+#endif
+
+#if SANITIZER_INTERCEPT_MEMMOVE
+INTERCEPTOR(void *, memmove, void *dst, const void *src, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+}
+
+#define INIT_MEMMOVE COMMON_INTERCEPT_FUNCTION(memmove)
+#else
+#define INIT_MEMMOVE
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCPY
+INTERCEPTOR(void *, memcpy, void *dst, const void *src, uptr size) {
+ // On OS X, calling internal_memcpy here will cause memory corruptions,
+ // because memcpy and memmove are actually aliases of the same
+ // implementation. We need to use internal_memmove here.
+ // N.B.: If we switch this to internal_ we'll have to use internal_memmove
+ // due to memcpy being an alias of memmove on OS X.
+ void *ctx;
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) {
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, dst, src, size);
+ } else {
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, dst, src, size);
+ }
+}
+
+#define INIT_MEMCPY \
+ do { \
+ if (PLATFORM_HAS_DIFFERENT_MEMCPY_AND_MEMMOVE) { \
+ COMMON_INTERCEPT_FUNCTION(memcpy); \
+ } else { \
+ ASSIGN_REAL(memcpy, memmove); \
+ } \
+ CHECK(REAL(memcpy)); \
+ } while (false)
+
+#else
+#define INIT_MEMCPY
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCMP
+DECLARE_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, uptr called_pc,
+ const void *s1, const void *s2, uptr n,
+ int result)
+
+// Common code for `memcmp` and `bcmp`.
+int MemcmpInterceptorCommon(void *ctx,
+ int (*real_fn)(const void *, const void *, uptr),
+ const void *a1, const void *a2, uptr size) {
+ if (common_flags()->intercept_memcmp) {
+ if (common_flags()->strict_memcmp) {
+ // Check the entire regions even if the first bytes of the buffers are
+ // different.
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, a1, size);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, a2, size);
+ // Fallthrough to REAL(memcmp) below.
+ } else {
+ unsigned char c1 = 0, c2 = 0;
+ const unsigned char *s1 = (const unsigned char*)a1;
+ const unsigned char *s2 = (const unsigned char*)a2;
+ uptr i;
+ for (i = 0; i < size; i++) {
+ c1 = s1[i];
+ c2 = s2[i];
+ if (c1 != c2) break;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s1, Min(i + 1, size));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s2, Min(i + 1, size));
+ int r = CharCmpX(c1, c2);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(),
+ a1, a2, size, r);
+ return r;
+ }
+ }
+ int result = real_fn(a1, a2, size);
+ CALL_WEAK_INTERCEPTOR_HOOK(__sanitizer_weak_hook_memcmp, GET_CALLER_PC(), a1,
+ a2, size, result);
+ return result;
+}
+
+INTERCEPTOR(int, memcmp, const void *a1, const void *a2, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memcmp(a1, a2, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memcmp, a1, a2, size);
+ return MemcmpInterceptorCommon(ctx, REAL(memcmp), a1, a2, size);
+}
+
+#define INIT_MEMCMP COMMON_INTERCEPT_FUNCTION(memcmp)
+#else
+#define INIT_MEMCMP
+#endif
+
+#if SANITIZER_INTERCEPT_BCMP
+INTERCEPTOR(int, bcmp, const void *a1, const void *a2, uptr size) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memcmp(a1, a2, size);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, bcmp, a1, a2, size);
+ return MemcmpInterceptorCommon(ctx, REAL(bcmp), a1, a2, size);
+}
+
+#define INIT_BCMP COMMON_INTERCEPT_FUNCTION(bcmp)
+#else
+#define INIT_BCMP
+#endif
+
+#if SANITIZER_INTERCEPT_MEMCHR
+INTERCEPTOR(void*, memchr, const void *s, int c, SIZE_T n) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_memchr(s, c, n);
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memchr, s, c, n);
+#if SANITIZER_WINDOWS
+ void *res;
+ if (REAL(memchr)) {
+ res = REAL(memchr)(s, c, n);
+ } else {
+ res = internal_memchr(s, c, n);
+ }
+#else
+ void *res = REAL(memchr)(s, c, n);
+#endif
+ uptr len = res ? (char *)res - (const char *)s + 1 : n;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, len);
+ return res;
+}
+
+#define INIT_MEMCHR COMMON_INTERCEPT_FUNCTION(memchr)
+#else
+#define INIT_MEMCHR
+#endif
+
+#if SANITIZER_INTERCEPT_MEMRCHR
+INTERCEPTOR(void*, memrchr, const void *s, int c, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, memrchr, s, c, n);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, n);
+ return REAL(memrchr)(s, c, n);
+}
+
+#define INIT_MEMRCHR COMMON_INTERCEPT_FUNCTION(memrchr)
+#else
+#define INIT_MEMRCHR
+#endif
+
+#if SANITIZER_INTERCEPT_FREXP
+INTERCEPTOR(double, frexp, double x, int *exp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, frexp, x, exp);
+ // Assuming frexp() always writes to |exp|.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ double res = REAL(frexp)(x, exp);
+ return res;
+}
+
+#define INIT_FREXP COMMON_INTERCEPT_FUNCTION(frexp);
+#else
+#define INIT_FREXP
+#endif // SANITIZER_INTERCEPT_FREXP
+
+#if SANITIZER_INTERCEPT_FREXPF_FREXPL
+INTERCEPTOR(float, frexpf, float x, int *exp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, frexpf, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ float res = REAL(frexpf)(x, exp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ return res;
+}
+
+INTERCEPTOR(long double, frexpl, long double x, int *exp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, frexpl, x, exp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ long double res = REAL(frexpl)(x, exp);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, exp, sizeof(*exp));
+ return res;
+}
+
+#define INIT_FREXPF_FREXPL \
+ COMMON_INTERCEPT_FUNCTION(frexpf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(frexpl)
+#else
+#define INIT_FREXPF_FREXPL
+#endif // SANITIZER_INTERCEPT_FREXPF_FREXPL
+
+#if SI_POSIX
+static void write_iovec(void *ctx, struct __sanitizer_iovec *iovec,
+ SIZE_T iovlen, SIZE_T maxlen) {
+ for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
+ SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iovec[i].iov_base, sz);
+ maxlen -= sz;
+ }
+}
+
+static void read_iovec(void *ctx, struct __sanitizer_iovec *iovec,
+ SIZE_T iovlen, SIZE_T maxlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec, sizeof(*iovec) * iovlen);
+ for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
+ SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec[i].iov_base, sz);
+ maxlen -= sz;
+ }
+}
+#endif
+
+#if SANITIZER_INTERCEPT_READ
+INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, read, fd, ptr, count);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(read)(fd, ptr, count);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_READ COMMON_INTERCEPT_FUNCTION(read)
+#else
+#define INIT_READ
+#endif
+
+#if SANITIZER_INTERCEPT_FREAD
+INTERCEPTOR(SIZE_T, fread, void *ptr, SIZE_T size, SIZE_T nmemb, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fread, ptr, size, nmemb, file);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(fread)(ptr, size, nmemb, file);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res * size);
+ return res;
+}
+#define INIT_FREAD COMMON_INTERCEPT_FUNCTION(fread)
+#else
+#define INIT_FREAD
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD
+INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread, fd, ptr, count, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(pread)(fd, ptr, count, offset);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREAD COMMON_INTERCEPT_FUNCTION(pread)
+#else
+#define INIT_PREAD
+#endif
+
+#if SANITIZER_INTERCEPT_PREAD64
+INTERCEPTOR(SSIZE_T, pread64, int fd, void *ptr, SIZE_T count, OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pread64, fd, ptr, count, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(pread64)(fd, ptr, count, offset);
+ if (res > 0) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREAD64 COMMON_INTERCEPT_FUNCTION(pread64)
+#else
+#define INIT_PREAD64
+#endif
+
+#if SANITIZER_INTERCEPT_READV
+INTERCEPTOR_WITH_SUFFIX(SSIZE_T, readv, int fd, __sanitizer_iovec *iov,
+ int iovcnt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readv, fd, iov, iovcnt);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SSIZE_T res = REAL(readv)(fd, iov, iovcnt);
+ if (res > 0) write_iovec(ctx, iov, iovcnt, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_READV COMMON_INTERCEPT_FUNCTION(readv)
+#else
+#define INIT_READV
+#endif
+
+#if SANITIZER_INTERCEPT_PREADV
+INTERCEPTOR(SSIZE_T, preadv, int fd, __sanitizer_iovec *iov, int iovcnt,
+ OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, preadv, fd, iov, iovcnt, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SSIZE_T res = REAL(preadv)(fd, iov, iovcnt, offset);
+ if (res > 0) write_iovec(ctx, iov, iovcnt, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREADV COMMON_INTERCEPT_FUNCTION(preadv)
+#else
+#define INIT_PREADV
+#endif
+
+#if SANITIZER_INTERCEPT_PREADV64
+INTERCEPTOR(SSIZE_T, preadv64, int fd, __sanitizer_iovec *iov, int iovcnt,
+ OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, preadv64, fd, iov, iovcnt, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SSIZE_T res = REAL(preadv64)(fd, iov, iovcnt, offset);
+ if (res > 0) write_iovec(ctx, iov, iovcnt, res);
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+#define INIT_PREADV64 COMMON_INTERCEPT_FUNCTION(preadv64)
+#else
+#define INIT_PREADV64
+#endif
+
+#if SANITIZER_INTERCEPT_WRITE
+INTERCEPTOR(SSIZE_T, write, int fd, void *ptr, SIZE_T count) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, write, fd, ptr, count);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(write)(fd, ptr, count);
+ // FIXME: this check should be _before_ the call to REAL(write), not after
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+#define INIT_WRITE COMMON_INTERCEPT_FUNCTION(write)
+#else
+#define INIT_WRITE
+#endif
+
+#if SANITIZER_INTERCEPT_FWRITE
+INTERCEPTOR(SIZE_T, fwrite, const void *p, uptr size, uptr nmemb, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fwrite, p, size, nmemb, file);
+ SIZE_T res = REAL(fwrite)(p, size, nmemb, file);
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, p, res * size);
+ return res;
+}
+#define INIT_FWRITE COMMON_INTERCEPT_FUNCTION(fwrite)
+#else
+#define INIT_FWRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE
+INTERCEPTOR(SSIZE_T, pwrite, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite, fd, ptr, count, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite)(fd, ptr, count, offset);
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+#define INIT_PWRITE COMMON_INTERCEPT_FUNCTION(pwrite)
+#else
+#define INIT_PWRITE
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITE64
+INTERCEPTOR(SSIZE_T, pwrite64, int fd, void *ptr, OFF64_T count,
+ OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwrite64, fd, ptr, count, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwrite64)(fd, ptr, count, offset);
+ if (res > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, res);
+ return res;
+}
+#define INIT_PWRITE64 COMMON_INTERCEPT_FUNCTION(pwrite64)
+#else
+#define INIT_PWRITE64
+#endif
+
+#if SANITIZER_INTERCEPT_WRITEV
+INTERCEPTOR_WITH_SUFFIX(SSIZE_T, writev, int fd, __sanitizer_iovec *iov,
+ int iovcnt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, writev, fd, iov, iovcnt);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(writev)(fd, iov, iovcnt);
+ if (res > 0) read_iovec(ctx, iov, iovcnt, res);
+ return res;
+}
+#define INIT_WRITEV COMMON_INTERCEPT_FUNCTION(writev)
+#else
+#define INIT_WRITEV
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITEV
+INTERCEPTOR(SSIZE_T, pwritev, int fd, __sanitizer_iovec *iov, int iovcnt,
+ OFF_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwritev, fd, iov, iovcnt, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwritev)(fd, iov, iovcnt, offset);
+ if (res > 0) read_iovec(ctx, iov, iovcnt, res);
+ return res;
+}
+#define INIT_PWRITEV COMMON_INTERCEPT_FUNCTION(pwritev)
+#else
+#define INIT_PWRITEV
+#endif
+
+#if SANITIZER_INTERCEPT_PWRITEV64
+INTERCEPTOR(SSIZE_T, pwritev64, int fd, __sanitizer_iovec *iov, int iovcnt,
+ OFF64_T offset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pwritev64, fd, iov, iovcnt, offset);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ SSIZE_T res = REAL(pwritev64)(fd, iov, iovcnt, offset);
+ if (res > 0) read_iovec(ctx, iov, iovcnt, res);
+ return res;
+}
+#define INIT_PWRITEV64 COMMON_INTERCEPT_FUNCTION(pwritev64)
+#else
+#define INIT_PWRITEV64
+#endif
+
+#if SANITIZER_INTERCEPT_FGETS
+INTERCEPTOR(char *, fgets, char *s, SIZE_T size, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgets, s, size, file);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(fgets)(s, size, file);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ return res;
+}
+#define INIT_FGETS COMMON_INTERCEPT_FUNCTION(fgets)
+#else
+#define INIT_FGETS
+#endif
+
+#if SANITIZER_INTERCEPT_FPUTS
+INTERCEPTOR_WITH_SUFFIX(int, fputs, char *s, void *file) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fputs, s, file);
+ if (!SANITIZER_MAC || s)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ return REAL(fputs)(s, file);
+}
+#define INIT_FPUTS COMMON_INTERCEPT_FUNCTION(fputs)
+#else
+#define INIT_FPUTS
+#endif
+
+#if SANITIZER_INTERCEPT_PUTS
+INTERCEPTOR(int, puts, char *s) {
+ // libc file streams can call user-supplied functions, see fopencookie.
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, puts, s);
+ if (!SANITIZER_MAC || s)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ return REAL(puts)(s);
+}
+#define INIT_PUTS COMMON_INTERCEPT_FUNCTION(puts)
+#else
+#define INIT_PUTS
+#endif
+
+#if SANITIZER_INTERCEPT_PRCTL
+INTERCEPTOR(int, prctl, int option, unsigned long arg2,
+ unsigned long arg3, // NOLINT
+ unsigned long arg4, unsigned long arg5) { // NOLINT
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, prctl, option, arg2, arg3, arg4, arg5);
+ static const int PR_SET_NAME = 15;
+ int res = REAL(prctl(option, arg2, arg3, arg4, arg5));
+ if (option == PR_SET_NAME) {
+ char buff[16];
+ internal_strncpy(buff, (char *)arg2, 15);
+ buff[15] = 0;
+ COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, buff);
+ }
+ return res;
+}
+#define INIT_PRCTL COMMON_INTERCEPT_FUNCTION(prctl)
+#else
+#define INIT_PRCTL
+#endif // SANITIZER_INTERCEPT_PRCTL
+
+#if SANITIZER_INTERCEPT_TIME
+INTERCEPTOR(unsigned long, time, unsigned long *t) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, time, t);
+ unsigned long local_t;
+ unsigned long res = REAL(time)(&local_t);
+ if (t && res != (unsigned long)-1) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, t, sizeof(*t));
+ *t = local_t;
+ }
+ return res;
+}
+#define INIT_TIME COMMON_INTERCEPT_FUNCTION(time);
+#else
+#define INIT_TIME
+#endif // SANITIZER_INTERCEPT_TIME
+
+#if SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+static void unpoison_tm(void *ctx, __sanitizer_tm *tm) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm));
+#if !SANITIZER_SOLARIS
+ if (tm->tm_zone) {
+ // Can not use COMMON_INTERCEPTOR_WRITE_RANGE here, because tm->tm_zone
+ // can point to shared memory and tsan would report a data race.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(tm->tm_zone,
+ REAL(strlen(tm->tm_zone)) + 1);
+ }
+#endif
+}
+INTERCEPTOR(__sanitizer_tm *, localtime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, localtime, timep);
+ __sanitizer_tm *res = REAL(localtime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ unpoison_tm(ctx, res);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_tm *, localtime_r, unsigned long *timep, void *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, localtime_r, timep, result);
+ __sanitizer_tm *res = REAL(localtime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ unpoison_tm(ctx, res);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_tm *, gmtime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gmtime, timep);
+ __sanitizer_tm *res = REAL(gmtime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ unpoison_tm(ctx, res);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_tm *, gmtime_r, unsigned long *timep, void *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gmtime_r, timep, result);
+ __sanitizer_tm *res = REAL(gmtime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ unpoison_tm(ctx, res);
+ }
+ return res;
+}
+INTERCEPTOR(char *, ctime, unsigned long *timep) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctime, timep);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(ctime)(timep);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, ctime_r, unsigned long *timep, char *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctime_r, timep, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(ctime_r)(timep, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timep, sizeof(*timep));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, asctime, __sanitizer_tm *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asctime, tm);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(asctime)(tm);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(char *, asctime_r, __sanitizer_tm *tm, char *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asctime_r, tm, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(asctime_r)(tm, result);
+ if (res) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tm, sizeof(*tm));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+INTERCEPTOR(long, mktime, __sanitizer_tm *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mktime, tm);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_sec, sizeof(tm->tm_sec));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_min, sizeof(tm->tm_min));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_hour, sizeof(tm->tm_hour));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mday, sizeof(tm->tm_mday));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_mon, sizeof(tm->tm_mon));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_year, sizeof(tm->tm_year));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &tm->tm_isdst, sizeof(tm->tm_isdst));
+ long res = REAL(mktime)(tm);
+ if (res != -1) unpoison_tm(ctx, tm);
+ return res;
+}
+#define INIT_LOCALTIME_AND_FRIENDS \
+ COMMON_INTERCEPT_FUNCTION(localtime); \
+ COMMON_INTERCEPT_FUNCTION(localtime_r); \
+ COMMON_INTERCEPT_FUNCTION(gmtime); \
+ COMMON_INTERCEPT_FUNCTION(gmtime_r); \
+ COMMON_INTERCEPT_FUNCTION(ctime); \
+ COMMON_INTERCEPT_FUNCTION(ctime_r); \
+ COMMON_INTERCEPT_FUNCTION(asctime); \
+ COMMON_INTERCEPT_FUNCTION(asctime_r); \
+ COMMON_INTERCEPT_FUNCTION(mktime);
+#else
+#define INIT_LOCALTIME_AND_FRIENDS
+#endif // SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS
+
+#if SANITIZER_INTERCEPT_STRPTIME
+INTERCEPTOR(char *, strptime, char *s, char *format, __sanitizer_tm *tm) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strptime, s, format, tm);
+ if (format)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, REAL(strlen)(format) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(strptime)(s, format, tm);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, s, res ? res - s : 0);
+ if (res && tm) {
+ // Do not call unpoison_tm here, because strptime does not, in fact,
+ // initialize the entire struct tm. For example, tm_zone pointer is left
+ // uninitialized.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tm, sizeof(*tm));
+ }
+ return res;
+}
+#define INIT_STRPTIME COMMON_INTERCEPT_FUNCTION(strptime);
+#else
+#define INIT_STRPTIME
+#endif
+
+#if SANITIZER_INTERCEPT_SCANF || SANITIZER_INTERCEPT_PRINTF
+#include "sanitizer_common_interceptors_format.inc"
+
+#define FORMAT_INTERCEPTOR_IMPL(name, vname, ...) \
+ { \
+ void *ctx; \
+ va_list ap; \
+ va_start(ap, format); \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__, ap); \
+ int res = WRAP(vname)(__VA_ARGS__, ap); \
+ va_end(ap); \
+ return res; \
+ }
+
+#endif
+
+#if SANITIZER_INTERCEPT_SCANF
+
+#define VSCANF_INTERCEPTOR_IMPL(vname, allowGnuMalloc, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \
+ va_list aq; \
+ va_copy(aq, ap); \
+ int res = REAL(vname)(__VA_ARGS__); \
+ if (res > 0) \
+ scanf_common(ctx, res, allowGnuMalloc, format, aq); \
+ va_end(aq); \
+ return res; \
+ }
+
+INTERCEPTOR(int, vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vscanf, true, format, ap)
+
+INTERCEPTOR(int, vsscanf, const char *str, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vsscanf, true, str, format, ap)
+
+INTERCEPTOR(int, vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(vfscanf, true, stream, format, ap)
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+INTERCEPTOR(int, __isoc99_vscanf, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vscanf, false, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsscanf, const char *str, const char *format,
+ va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vsscanf, false, str, format, ap)
+
+INTERCEPTOR(int, __isoc99_vfscanf, void *stream, const char *format, va_list ap)
+VSCANF_INTERCEPTOR_IMPL(__isoc99_vfscanf, false, stream, format, ap)
+#endif // SANITIZER_INTERCEPT_ISOC99_SCANF
+
+INTERCEPTOR(int, scanf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(scanf, vscanf, format)
+
+INTERCEPTOR(int, fscanf, void *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(fscanf, vfscanf, stream, format)
+
+INTERCEPTOR(int, sscanf, const char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(sscanf, vsscanf, str, format)
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+INTERCEPTOR(int, __isoc99_scanf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_scanf, __isoc99_vscanf, format)
+
+INTERCEPTOR(int, __isoc99_fscanf, void *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fscanf, __isoc99_vfscanf, stream, format)
+
+INTERCEPTOR(int, __isoc99_sscanf, const char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sscanf, __isoc99_vsscanf, str, format)
+#endif
+
+#endif
+
+#if SANITIZER_INTERCEPT_SCANF
+#define INIT_SCANF \
+ COMMON_INTERCEPT_FUNCTION_LDBL(scanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsscanf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfscanf);
+#else
+#define INIT_SCANF
+#endif
+
+#if SANITIZER_INTERCEPT_ISOC99_SCANF
+#define INIT_ISOC99_SCANF \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_scanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_sscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_fscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsscanf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfscanf);
+#else
+#define INIT_ISOC99_SCANF
+#endif
+
+#if SANITIZER_INTERCEPT_PRINTF
+
+#define VPRINTF_INTERCEPTOR_ENTER(vname, ...) \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, __VA_ARGS__); \
+ va_list aq; \
+ va_copy(aq, ap);
+
+#define VPRINTF_INTERCEPTOR_RETURN() \
+ va_end(aq);
+
+#define VPRINTF_INTERCEPTOR_IMPL(vname, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, __VA_ARGS__); \
+ if (common_flags()->check_printf) \
+ printf_common(ctx, format, aq); \
+ int res = REAL(vname)(__VA_ARGS__); \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define VSPRINTF_INTERCEPTOR_IMPL(vname, str, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define VSNPRINTF_INTERCEPTOR_IMPL(vname, str, size, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, str, size, __VA_ARGS__) \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(str, size, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, Min(size, (SIZE_T)(res + 1))); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+// FIXME: under ASan the REAL() call below may write to freed memory and
+// corrupt its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define VASPRINTF_INTERCEPTOR_IMPL(vname, strp, ...) \
+ { \
+ VPRINTF_INTERCEPTOR_ENTER(vname, strp, __VA_ARGS__) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, strp, sizeof(char *)); \
+ if (common_flags()->check_printf) { \
+ printf_common(ctx, format, aq); \
+ } \
+ int res = REAL(vname)(strp, __VA_ARGS__); \
+ if (res >= 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *strp, res + 1); \
+ } \
+ VPRINTF_INTERCEPTOR_RETURN(); \
+ return res; \
+ }
+
+INTERCEPTOR(int, vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vprintf, format, ap)
+
+INTERCEPTOR(int, vfprintf, __sanitizer_FILE *stream, const char *format,
+ va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __vsnprintf_chk, char *str, SIZE_T size, int flag,
+ SIZE_T size_to, const char *format, va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf, str, size, format, ap)
+#endif
+
+#if SANITIZER_INTERCEPT_PRINTF_L
+INTERCEPTOR(int, vsnprintf_l, char *str, SIZE_T size, void *loc,
+ const char *format, va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(vsnprintf_l, str, size, loc, format, ap)
+
+INTERCEPTOR(int, snprintf_l, char *str, SIZE_T size, void *loc,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf_l, vsnprintf_l, str, size, loc, format)
+#endif // SANITIZER_INTERCEPT_PRINTF_L
+
+INTERCEPTOR(int, vsprintf, char *str, const char *format, va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __vsprintf_chk, char *str, int flag, SIZE_T size_to,
+ const char *format, va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(vsprintf, str, format, ap)
+#endif
+
+INTERCEPTOR(int, vasprintf, char **strp, const char *format, va_list ap)
+VASPRINTF_INTERCEPTOR_IMPL(vasprintf, strp, format, ap)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_vprintf, const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vprintf, format, ap)
+
+INTERCEPTOR(int, __isoc99_vfprintf, __sanitizer_FILE *stream,
+ const char *format, va_list ap)
+VPRINTF_INTERCEPTOR_IMPL(__isoc99_vfprintf, stream, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsnprintf, char *str, SIZE_T size, const char *format,
+ va_list ap)
+VSNPRINTF_INTERCEPTOR_IMPL(__isoc99_vsnprintf, str, size, format, ap)
+
+INTERCEPTOR(int, __isoc99_vsprintf, char *str, const char *format,
+ va_list ap)
+VSPRINTF_INTERCEPTOR_IMPL(__isoc99_vsprintf, str, format,
+ ap)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+INTERCEPTOR(int, printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(printf, vprintf, format)
+
+INTERCEPTOR(int, fprintf, __sanitizer_FILE *stream, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(fprintf, vfprintf, stream, format)
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __fprintf_chk, __sanitizer_FILE *stream, SIZE_T size,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__fprintf_chk, vfprintf, stream, format)
+#endif
+
+INTERCEPTOR(int, sprintf, char *str, const char *format, ...) // NOLINT
+FORMAT_INTERCEPTOR_IMPL(sprintf, vsprintf, str, format) // NOLINT
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __sprintf_chk, char *str, int flag, SIZE_T size_to,
+ const char *format, ...) // NOLINT
+FORMAT_INTERCEPTOR_IMPL(__sprintf_chk, vsprintf, str, format) // NOLINT
+#endif
+
+INTERCEPTOR(int, snprintf, char *str, SIZE_T size, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(snprintf, vsnprintf, str, size, format)
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+INTERCEPTOR(int, __snprintf_chk, char *str, SIZE_T size, int flag,
+ SIZE_T size_to, const char *format, ...) // NOLINT
+FORMAT_INTERCEPTOR_IMPL(__snprintf_chk, vsnprintf, str, size, format) // NOLINT
+#endif
+
+INTERCEPTOR(int, asprintf, char **strp, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(asprintf, vasprintf, strp, format)
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+INTERCEPTOR(int, __isoc99_printf, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_printf, __isoc99_vprintf, format)
+
+INTERCEPTOR(int, __isoc99_fprintf, __sanitizer_FILE *stream, const char *format,
+ ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_fprintf, __isoc99_vfprintf, stream, format)
+
+INTERCEPTOR(int, __isoc99_sprintf, char *str, const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_sprintf, __isoc99_vsprintf, str, format)
+
+INTERCEPTOR(int, __isoc99_snprintf, char *str, SIZE_T size,
+ const char *format, ...)
+FORMAT_INTERCEPTOR_IMPL(__isoc99_snprintf, __isoc99_vsnprintf, str, size,
+ format)
+
+#endif // SANITIZER_INTERCEPT_ISOC99_PRINTF
+
+#endif // SANITIZER_INTERCEPT_PRINTF
+
+#if SANITIZER_INTERCEPT_PRINTF
+#define INIT_PRINTF \
+ COMMON_INTERCEPT_FUNCTION_LDBL(printf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(snprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(asprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(fprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vasprintf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(vfprintf);
+#else
+#define INIT_PRINTF
+#endif
+
+#if SANITIZER_INTERCEPT___PRINTF_CHK
+#define INIT___PRINTF_CHK \
+ COMMON_INTERCEPT_FUNCTION(__sprintf_chk); \
+ COMMON_INTERCEPT_FUNCTION(__snprintf_chk); \
+ COMMON_INTERCEPT_FUNCTION(__vsprintf_chk); \
+ COMMON_INTERCEPT_FUNCTION(__vsnprintf_chk); \
+ COMMON_INTERCEPT_FUNCTION(__fprintf_chk);
+#else
+#define INIT___PRINTF_CHK
+#endif
+
+#if SANITIZER_INTERCEPT_PRINTF_L
+#define INIT_PRINTF_L \
+ COMMON_INTERCEPT_FUNCTION(snprintf_l); \
+ COMMON_INTERCEPT_FUNCTION(vsnprintf_l);
+#else
+#define INIT_PRINTF_L
+#endif
+
+#if SANITIZER_INTERCEPT_ISOC99_PRINTF
+#define INIT_ISOC99_PRINTF \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_printf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_sprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_snprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_fprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vsnprintf); \
+ COMMON_INTERCEPT_FUNCTION(__isoc99_vfprintf);
+#else
+#define INIT_ISOC99_PRINTF
+#endif
+
+#if SANITIZER_INTERCEPT_IOCTL
+#include "sanitizer_common_interceptors_ioctl.inc"
+#include "sanitizer_interceptors_ioctl_netbsd.inc"
+INTERCEPTOR(int, ioctl, int d, unsigned long request, ...) {
+ // We need a frame pointer, because we call into ioctl_common_[pre|post] which
+ // can trigger a report and we need to be able to unwind through this
+ // function. On Mac in debug mode we might not have a frame pointer, because
+ // ioctl_common_[pre|post] doesn't get inlined here.
+ ENABLE_FRAME_POINTER;
+
+ void *ctx;
+ va_list ap;
+ va_start(ap, request);
+ void *arg = va_arg(ap, void *);
+ va_end(ap);
+ COMMON_INTERCEPTOR_ENTER(ctx, ioctl, d, request, arg);
+
+ CHECK(ioctl_initialized);
+
+ // Note: TSan does not use common flags, and they are zero-initialized.
+ // This effectively disables ioctl handling in TSan.
+ if (!common_flags()->handle_ioctl) return REAL(ioctl)(d, request, arg);
+
+ // Although request is unsigned long, the rest of the interceptor uses it
+ // as just "unsigned" to save space, because we know that all values fit in
+ // "unsigned" - they are compile-time constants.
+
+ const ioctl_desc *desc = ioctl_lookup(request);
+ ioctl_desc decoded_desc;
+ if (!desc) {
+ VPrintf(2, "Decoding unknown ioctl 0x%x\n", request);
+ if (!ioctl_decode(request, &decoded_desc))
+ Printf("WARNING: failed decoding unknown ioctl 0x%x\n", request);
+ else
+ desc = &decoded_desc;
+ }
+
+ if (desc) ioctl_common_pre(ctx, desc, d, request, arg);
+ int res = REAL(ioctl)(d, request, arg);
+ // FIXME: some ioctls have different return values for success and failure.
+ if (desc && res != -1) ioctl_common_post(ctx, desc, res, d, request, arg);
+ return res;
+}
+#define INIT_IOCTL \
+ ioctl_init(); \
+ COMMON_INTERCEPT_FUNCTION(ioctl);
+#else
+#define INIT_IOCTL
+#endif
+
+#if SANITIZER_POSIX
+UNUSED static void unpoison_passwd(void *ctx, __sanitizer_passwd *pwd) {
+ if (pwd) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd, sizeof(*pwd));
+ if (pwd->pw_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_name,
+ REAL(strlen)(pwd->pw_name) + 1);
+ if (pwd->pw_passwd)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_passwd,
+ REAL(strlen)(pwd->pw_passwd) + 1);
+#if !SANITIZER_ANDROID
+ if (pwd->pw_gecos)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_gecos,
+ REAL(strlen)(pwd->pw_gecos) + 1);
+#endif
+#if SANITIZER_MAC || SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
+ if (pwd->pw_class)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_class,
+ REAL(strlen)(pwd->pw_class) + 1);
+#endif
+ if (pwd->pw_dir)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_dir,
+ REAL(strlen)(pwd->pw_dir) + 1);
+ if (pwd->pw_shell)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwd->pw_shell,
+ REAL(strlen)(pwd->pw_shell) + 1);
+ }
+}
+
+UNUSED static void unpoison_group(void *ctx, __sanitizer_group *grp) {
+ if (grp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp, sizeof(*grp));
+ if (grp->gr_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_name,
+ REAL(strlen)(grp->gr_name) + 1);
+ if (grp->gr_passwd)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_passwd,
+ REAL(strlen)(grp->gr_passwd) + 1);
+ char **p = grp->gr_mem;
+ for (; *p; ++p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, grp->gr_mem,
+ (p - grp->gr_mem + 1) * sizeof(*p));
+ }
+}
+#endif // SANITIZER_POSIX
+
+#if SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS
+INTERCEPTOR(__sanitizer_passwd *, getpwnam, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwnam, name);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ __sanitizer_passwd *res = REAL(getpwnam)(name);
+ unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_passwd *, getpwuid, u32 uid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwuid, uid);
+ __sanitizer_passwd *res = REAL(getpwuid)(uid);
+ unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, getgrnam, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrnam, name);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ __sanitizer_group *res = REAL(getgrnam)(name);
+ unpoison_group(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, getgrgid, u32 gid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrgid, gid);
+ __sanitizer_group *res = REAL(getgrgid)(gid);
+ unpoison_group(ctx, res);
+ return res;
+}
+#define INIT_GETPWNAM_AND_FRIENDS \
+ COMMON_INTERCEPT_FUNCTION(getpwnam); \
+ COMMON_INTERCEPT_FUNCTION(getpwuid); \
+ COMMON_INTERCEPT_FUNCTION(getgrnam); \
+ COMMON_INTERCEPT_FUNCTION(getgrgid);
+#else
+#define INIT_GETPWNAM_AND_FRIENDS
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS
+INTERCEPTOR(int, getpwnam_r, const char *name, __sanitizer_passwd *pwd,
+ char *buf, SIZE_T buflen, __sanitizer_passwd **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwnam_r, name, pwd, buf, buflen, result);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getpwnam_r)(name, pwd, buf, buflen, result);
+ if (!res && result)
+ unpoison_passwd(ctx, *result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+INTERCEPTOR(int, getpwuid_r, u32 uid, __sanitizer_passwd *pwd, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwuid_r, uid, pwd, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getpwuid_r)(uid, pwd, buf, buflen, result);
+ if (!res && result)
+ unpoison_passwd(ctx, *result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+INTERCEPTOR(int, getgrnam_r, const char *name, __sanitizer_group *grp,
+ char *buf, SIZE_T buflen, __sanitizer_group **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrnam_r, name, grp, buf, buflen, result);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getgrnam_r)(name, grp, buf, buflen, result);
+ if (!res && result)
+ unpoison_group(ctx, *result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+INTERCEPTOR(int, getgrgid_r, u32 gid, __sanitizer_group *grp, char *buf,
+ SIZE_T buflen, __sanitizer_group **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrgid_r, gid, grp, buf, buflen, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getgrgid_r)(gid, grp, buf, buflen, result);
+ if (!res && result)
+ unpoison_group(ctx, *result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+#define INIT_GETPWNAM_R_AND_FRIENDS \
+ COMMON_INTERCEPT_FUNCTION(getpwnam_r); \
+ COMMON_INTERCEPT_FUNCTION(getpwuid_r); \
+ COMMON_INTERCEPT_FUNCTION(getgrnam_r); \
+ COMMON_INTERCEPT_FUNCTION(getgrgid_r);
+#else
+#define INIT_GETPWNAM_R_AND_FRIENDS
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWENT
+INTERCEPTOR(__sanitizer_passwd *, getpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent, dummy);
+ __sanitizer_passwd *res = REAL(getpwent)(dummy);
+ unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, getgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent, dummy);
+ __sanitizer_group *res = REAL(getgrent)(dummy);
+ unpoison_group(ctx, res);
+ return res;
+}
+#define INIT_GETPWENT \
+ COMMON_INTERCEPT_FUNCTION(getpwent); \
+ COMMON_INTERCEPT_FUNCTION(getgrent);
+#else
+#define INIT_GETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_FGETPWENT
+INTERCEPTOR(__sanitizer_passwd *, fgetpwent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent, fp);
+ __sanitizer_passwd *res = REAL(fgetpwent)(fp);
+ unpoison_passwd(ctx, res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_group *, fgetgrent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent, fp);
+ __sanitizer_group *res = REAL(fgetgrent)(fp);
+ unpoison_group(ctx, res);
+ return res;
+}
+#define INIT_FGETPWENT \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent); \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent);
+#else
+#define INIT_FGETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_GETPWENT_R
+INTERCEPTOR(int, getpwent_r, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpwent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getpwent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res && pwbufp)
+ unpoison_passwd(ctx, *pwbufp);
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+INTERCEPTOR(int, getgrent_r, __sanitizer_group *pwbuf, char *buf, SIZE_T buflen,
+ __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrent_r, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getgrent_r)(pwbuf, buf, buflen, pwbufp);
+ if (!res && pwbufp)
+ unpoison_group(ctx, *pwbufp);
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+#define INIT_GETPWENT_R \
+ COMMON_INTERCEPT_FUNCTION(getpwent_r); \
+ COMMON_INTERCEPT_FUNCTION(getgrent_r);
+#else
+#define INIT_GETPWENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_FGETPWENT_R
+INTERCEPTOR(int, fgetpwent_r, void *fp, __sanitizer_passwd *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_passwd **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpwent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fgetpwent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res && pwbufp)
+ unpoison_passwd(ctx, *pwbufp);
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+#define INIT_FGETPWENT_R \
+ COMMON_INTERCEPT_FUNCTION(fgetpwent_r);
+#else
+#define INIT_FGETPWENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_FGETGRENT_R
+INTERCEPTOR(int, fgetgrent_r, void *fp, __sanitizer_group *pwbuf, char *buf,
+ SIZE_T buflen, __sanitizer_group **pwbufp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetgrent_r, fp, pwbuf, buf, buflen, pwbufp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fgetgrent_r)(fp, pwbuf, buf, buflen, pwbufp);
+ if (!res && pwbufp)
+ unpoison_group(ctx, *pwbufp);
+ if (pwbufp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pwbufp, sizeof(*pwbufp));
+ return res;
+}
+#define INIT_FGETGRENT_R \
+ COMMON_INTERCEPT_FUNCTION(fgetgrent_r);
+#else
+#define INIT_FGETGRENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_SETPWENT
+// The only thing these interceptors do is disable any nested interceptors.
+// These functions may open nss modules and call uninstrumented functions from
+// them, and we don't want things like strlen() to trigger.
+INTERCEPTOR(void, setpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setpwent, dummy);
+ REAL(setpwent)(dummy);
+}
+INTERCEPTOR(void, endpwent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endpwent, dummy);
+ REAL(endpwent)(dummy);
+}
+INTERCEPTOR(void, setgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setgrent, dummy);
+ REAL(setgrent)(dummy);
+}
+INTERCEPTOR(void, endgrent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, endgrent, dummy);
+ REAL(endgrent)(dummy);
+}
+#define INIT_SETPWENT \
+ COMMON_INTERCEPT_FUNCTION(setpwent); \
+ COMMON_INTERCEPT_FUNCTION(endpwent); \
+ COMMON_INTERCEPT_FUNCTION(setgrent); \
+ COMMON_INTERCEPT_FUNCTION(endgrent);
+#else
+#define INIT_SETPWENT
+#endif
+
+#if SANITIZER_INTERCEPT_CLOCK_GETTIME
+INTERCEPTOR(int, clock_getres, u32 clk_id, void *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_getres, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(clock_getres)(clk_id, tp);
+ if (!res && tp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, clock_gettime, u32 clk_id, void *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_gettime, clk_id, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(clock_gettime)(clk_id, tp);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, struct_timespec_sz);
+ }
+ return res;
+}
+namespace __sanitizer {
+extern "C" {
+int real_clock_gettime(u32 clk_id, void *tp) {
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_clock_gettime(clk_id, tp);
+ return REAL(clock_gettime)(clk_id, tp);
+}
+} // extern "C"
+} // namespace __sanitizer
+INTERCEPTOR(int, clock_settime, u32 clk_id, const void *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, clock_settime, clk_id, tp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, tp, struct_timespec_sz);
+ return REAL(clock_settime)(clk_id, tp);
+}
+#define INIT_CLOCK_GETTIME \
+ COMMON_INTERCEPT_FUNCTION(clock_getres); \
+ COMMON_INTERCEPT_FUNCTION(clock_gettime); \
+ COMMON_INTERCEPT_FUNCTION(clock_settime);
+#else
+#define INIT_CLOCK_GETTIME
+#endif
+
+#if SANITIZER_INTERCEPT_GETITIMER
+INTERCEPTOR(int, getitimer, int which, void *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getitimer, which, curr_value);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getitimer)(which, curr_value);
+ if (!res && curr_value) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerval_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, setitimer, int which, const void *new_value, void *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setitimer, which, new_value, old_value);
+ if (new_value) {
+ // itimerval can contain padding that may be legitimately uninitialized
+ const struct __sanitizer_itimerval *nv =
+ (const struct __sanitizer_itimerval *)new_value;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_interval.tv_sec,
+ sizeof(__sanitizer_time_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_interval.tv_usec,
+ sizeof(__sanitizer_suseconds_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_value.tv_sec,
+ sizeof(__sanitizer_time_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &nv->it_value.tv_usec,
+ sizeof(__sanitizer_suseconds_t));
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(setitimer)(which, new_value, old_value);
+ if (!res && old_value) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerval_sz);
+ }
+ return res;
+}
+#define INIT_GETITIMER \
+ COMMON_INTERCEPT_FUNCTION(getitimer); \
+ COMMON_INTERCEPT_FUNCTION(setitimer);
+#else
+#define INIT_GETITIMER
+#endif
+
+#if SANITIZER_INTERCEPT_GLOB
+static void unpoison_glob_t(void *ctx, __sanitizer_glob_t *pglob) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pglob, sizeof(*pglob));
+ // +1 for NULL pointer at the end.
+ if (pglob->gl_pathv)
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, pglob->gl_pathv, (pglob->gl_pathc + 1) * sizeof(*pglob->gl_pathv));
+ for (SIZE_T i = 0; i < pglob->gl_pathc; ++i) {
+ char *p = pglob->gl_pathv[i];
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, REAL(strlen)(p) + 1);
+ }
+}
+
+#if SANITIZER_SOLARIS
+INTERCEPTOR(int, glob, const char *pattern, int flags,
+ int (*errfunc)(const char *epath, int eerrno),
+ __sanitizer_glob_t *pglob) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
+ int res = REAL(glob)(pattern, flags, errfunc, pglob);
+ if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
+ return res;
+}
+#else
+static THREADLOCAL __sanitizer_glob_t *pglob_copy;
+
+static void wrapped_gl_closedir(void *dir) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ pglob_copy->gl_closedir(dir);
+}
+
+static void *wrapped_gl_readdir(void *dir) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ return pglob_copy->gl_readdir(dir);
+}
+
+static void *wrapped_gl_opendir(const char *s) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return pglob_copy->gl_opendir(s);
+}
+
+static int wrapped_gl_lstat(const char *s, void *st) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return pglob_copy->gl_lstat(s, st);
+}
+
+static int wrapped_gl_stat(const char *s, void *st) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(s, REAL(strlen)(s) + 1);
+ return pglob_copy->gl_stat(s, st);
+}
+
+static const __sanitizer_glob_t kGlobCopy = {
+ 0, 0, 0,
+ 0, wrapped_gl_closedir, wrapped_gl_readdir,
+ wrapped_gl_opendir, wrapped_gl_lstat, wrapped_gl_stat};
+
+INTERCEPTOR(int, glob, const char *pattern, int flags,
+ int (*errfunc)(const char *epath, int eerrno),
+ __sanitizer_glob_t *pglob) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, glob, pattern, flags, errfunc, pglob);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
+ __sanitizer_glob_t glob_copy;
+ internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy));
+ if (flags & glob_altdirfunc) {
+ Swap(pglob->gl_closedir, glob_copy.gl_closedir);
+ Swap(pglob->gl_readdir, glob_copy.gl_readdir);
+ Swap(pglob->gl_opendir, glob_copy.gl_opendir);
+ Swap(pglob->gl_lstat, glob_copy.gl_lstat);
+ Swap(pglob->gl_stat, glob_copy.gl_stat);
+ pglob_copy = &glob_copy;
+ }
+ int res = REAL(glob)(pattern, flags, errfunc, pglob);
+ if (flags & glob_altdirfunc) {
+ Swap(pglob->gl_closedir, glob_copy.gl_closedir);
+ Swap(pglob->gl_readdir, glob_copy.gl_readdir);
+ Swap(pglob->gl_opendir, glob_copy.gl_opendir);
+ Swap(pglob->gl_lstat, glob_copy.gl_lstat);
+ Swap(pglob->gl_stat, glob_copy.gl_stat);
+ }
+ pglob_copy = 0;
+ if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
+ return res;
+}
+#endif // SANITIZER_SOLARIS
+#define INIT_GLOB \
+ COMMON_INTERCEPT_FUNCTION(glob);
+#else // SANITIZER_INTERCEPT_GLOB
+#define INIT_GLOB
+#endif // SANITIZER_INTERCEPT_GLOB
+
+#if SANITIZER_INTERCEPT_GLOB64
+INTERCEPTOR(int, glob64, const char *pattern, int flags,
+ int (*errfunc)(const char *epath, int eerrno),
+ __sanitizer_glob_t *pglob) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, glob64, pattern, flags, errfunc, pglob);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, pattern, 0);
+ __sanitizer_glob_t glob_copy;
+ internal_memcpy(&glob_copy, &kGlobCopy, sizeof(glob_copy));
+ if (flags & glob_altdirfunc) {
+ Swap(pglob->gl_closedir, glob_copy.gl_closedir);
+ Swap(pglob->gl_readdir, glob_copy.gl_readdir);
+ Swap(pglob->gl_opendir, glob_copy.gl_opendir);
+ Swap(pglob->gl_lstat, glob_copy.gl_lstat);
+ Swap(pglob->gl_stat, glob_copy.gl_stat);
+ pglob_copy = &glob_copy;
+ }
+ int res = REAL(glob64)(pattern, flags, errfunc, pglob);
+ if (flags & glob_altdirfunc) {
+ Swap(pglob->gl_closedir, glob_copy.gl_closedir);
+ Swap(pglob->gl_readdir, glob_copy.gl_readdir);
+ Swap(pglob->gl_opendir, glob_copy.gl_opendir);
+ Swap(pglob->gl_lstat, glob_copy.gl_lstat);
+ Swap(pglob->gl_stat, glob_copy.gl_stat);
+ }
+ pglob_copy = 0;
+ if ((!res || res == glob_nomatch) && pglob) unpoison_glob_t(ctx, pglob);
+ return res;
+}
+#define INIT_GLOB64 \
+ COMMON_INTERCEPT_FUNCTION(glob64);
+#else // SANITIZER_INTERCEPT_GLOB64
+#define INIT_GLOB64
+#endif // SANITIZER_INTERCEPT_GLOB64
+
+#if SANITIZER_INTERCEPT_WAIT
+// According to sys/wait.h, wait(), waitid(), waitpid() may have symbol version
+// suffixes on Darwin. See the declaration of INTERCEPTOR_WITH_SUFFIX for
+// details.
+INTERCEPTOR_WITH_SUFFIX(int, wait, int *status) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wait, status);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(wait)(status);
+ if (res != -1 && status)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ return res;
+}
+// On FreeBSD id_t is always 64-bit wide.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, long long id, void *infop,
+ int options) {
+#else
+INTERCEPTOR_WITH_SUFFIX(int, waitid, int idtype, int id, void *infop,
+ int options) {
+#endif
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, waitid, idtype, id, infop, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(waitid)(idtype, id, infop, options);
+ if (res != -1 && infop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, infop, siginfo_t_sz);
+ return res;
+}
+INTERCEPTOR_WITH_SUFFIX(int, waitpid, int pid, int *status, int options) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, waitpid, pid, status, options);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(waitpid)(pid, status, options);
+ if (res != -1 && status)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ return res;
+}
+INTERCEPTOR(int, wait3, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wait3, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(wait3)(status, options, rusage);
+ if (res != -1) {
+ if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#if SANITIZER_ANDROID
+INTERCEPTOR(int, __wait4, int pid, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(__wait4)(pid, status, options, rusage);
+ if (res != -1) {
+ if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(__wait4);
+#else
+INTERCEPTOR(int, wait4, int pid, int *status, int options, void *rusage) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wait4, pid, status, options, rusage);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(wait4)(pid, status, options, rusage);
+ if (res != -1) {
+ if (status) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, status, sizeof(*status));
+ if (rusage) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rusage, struct_rusage_sz);
+ }
+ return res;
+}
+#define INIT_WAIT4 COMMON_INTERCEPT_FUNCTION(wait4);
+#endif // SANITIZER_ANDROID
+#define INIT_WAIT \
+ COMMON_INTERCEPT_FUNCTION(wait); \
+ COMMON_INTERCEPT_FUNCTION(waitid); \
+ COMMON_INTERCEPT_FUNCTION(waitpid); \
+ COMMON_INTERCEPT_FUNCTION(wait3);
+#else
+#define INIT_WAIT
+#define INIT_WAIT4
+#endif
+
+#if SANITIZER_INTERCEPT_INET
+INTERCEPTOR(char *, inet_ntop, int af, const void *src, char *dst, u32 size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, inet_ntop, af, src, dst, size);
+ uptr sz = __sanitizer_in_addr_sz(af);
+ if (sz) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sz);
+ // FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(inet_ntop)(af, src, dst, size);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+INTERCEPTOR(int, inet_pton, int af, const char *src, void *dst) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, inet_pton, af, src, dst);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, src, 0);
+ // FIXME: figure out read size based on the address family.
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(inet_pton)(af, src, dst);
+ if (res == 1) {
+ uptr sz = __sanitizer_in_addr_sz(af);
+ if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz);
+ }
+ return res;
+}
+#define INIT_INET \
+ COMMON_INTERCEPT_FUNCTION(inet_ntop); \
+ COMMON_INTERCEPT_FUNCTION(inet_pton);
+#else
+#define INIT_INET
+#endif
+
+#if SANITIZER_INTERCEPT_INET
+INTERCEPTOR(int, inet_aton, const char *cp, void *dst) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, inet_aton, cp, dst);
+ if (cp) COMMON_INTERCEPTOR_READ_RANGE(ctx, cp, REAL(strlen)(cp) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(inet_aton)(cp, dst);
+ if (res != 0) {
+ uptr sz = __sanitizer_in_addr_sz(af_inet);
+ if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sz);
+ }
+ return res;
+}
+#define INIT_INET_ATON COMMON_INTERCEPT_FUNCTION(inet_aton);
+#else
+#define INIT_INET_ATON
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM
+INTERCEPTOR(int, pthread_getschedparam, uptr thread, int *policy, int *param) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_getschedparam, thread, policy, param);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_getschedparam)(thread, policy, param);
+ if (res == 0) {
+ if (policy) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, policy, sizeof(*policy));
+ if (param) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, sizeof(*param));
+ }
+ return res;
+}
+#define INIT_PTHREAD_GETSCHEDPARAM \
+ COMMON_INTERCEPT_FUNCTION(pthread_getschedparam);
+#else
+#define INIT_PTHREAD_GETSCHEDPARAM
+#endif
+
+#if SANITIZER_INTERCEPT_GETADDRINFO
+INTERCEPTOR(int, getaddrinfo, char *node, char *service,
+ struct __sanitizer_addrinfo *hints,
+ struct __sanitizer_addrinfo **out) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getaddrinfo, node, service, hints, out);
+ if (node) COMMON_INTERCEPTOR_READ_RANGE(ctx, node, REAL(strlen)(node) + 1);
+ if (service)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, service, REAL(strlen)(service) + 1);
+ if (hints)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hints, sizeof(__sanitizer_addrinfo));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getaddrinfo)(node, service, hints, out);
+ if (res == 0 && out) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, out, sizeof(*out));
+ struct __sanitizer_addrinfo *p = *out;
+ while (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ if (p->ai_addr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_addr, p->ai_addrlen);
+ if (p->ai_canonname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ai_canonname,
+ REAL(strlen)(p->ai_canonname) + 1);
+ p = p->ai_next;
+ }
+ }
+ return res;
+}
+#define INIT_GETADDRINFO COMMON_INTERCEPT_FUNCTION(getaddrinfo);
+#else
+#define INIT_GETADDRINFO
+#endif
+
+#if SANITIZER_INTERCEPT_GETNAMEINFO
+INTERCEPTOR(int, getnameinfo, void *sockaddr, unsigned salen, char *host,
+ unsigned hostlen, char *serv, unsigned servlen, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getnameinfo, sockaddr, salen, host, hostlen,
+ serv, servlen, flags);
+ // FIXME: consider adding READ_RANGE(sockaddr, salen)
+ // There is padding in in_addr that may make this too noisy
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res =
+ REAL(getnameinfo)(sockaddr, salen, host, hostlen, serv, servlen, flags);
+ if (res == 0) {
+ if (host && hostlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, host, REAL(strlen)(host) + 1);
+ if (serv && servlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, serv, REAL(strlen)(serv) + 1);
+ }
+ return res;
+}
+#define INIT_GETNAMEINFO COMMON_INTERCEPT_FUNCTION(getnameinfo);
+#else
+#define INIT_GETNAMEINFO
+#endif
+
+#if SANITIZER_INTERCEPT_GETSOCKNAME
+INTERCEPTOR(int, getsockname, int sock_fd, void *addr, int *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getsockname, sock_fd, addr, addrlen);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ int addrlen_in = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getsockname)(sock_fd, addr, addrlen);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addrlen_in, *addrlen));
+ }
+ return res;
+}
+#define INIT_GETSOCKNAME COMMON_INTERCEPT_FUNCTION(getsockname);
+#else
+#define INIT_GETSOCKNAME
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME || SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+static void write_hostent(void *ctx, struct __sanitizer_hostent *h) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h, sizeof(__sanitizer_hostent));
+ if (h->h_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h->h_name, REAL(strlen)(h->h_name) + 1);
+ char **p = h->h_aliases;
+ while (*p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ ++p;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, h->h_aliases, (p - h->h_aliases + 1) * sizeof(*h->h_aliases));
+ p = h->h_addr_list;
+ while (*p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, h->h_length);
+ ++p;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, h->h_addr_list, (p - h->h_addr_list + 1) * sizeof(*h->h_addr_list));
+}
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME
+INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname, char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname, name);
+ struct __sanitizer_hostent *res = REAL(gethostbyname)(name);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+
+INTERCEPTOR(struct __sanitizer_hostent *, gethostbyaddr, void *addr, int len,
+ int type) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr, addr, len, type);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
+ struct __sanitizer_hostent *res = REAL(gethostbyaddr)(addr, len, type);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+
+INTERCEPTOR(struct __sanitizer_hostent *, gethostent, int fake) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostent, fake);
+ struct __sanitizer_hostent *res = REAL(gethostent)(fake);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+#define INIT_GETHOSTBYNAME \
+ COMMON_INTERCEPT_FUNCTION(gethostent); \
+ COMMON_INTERCEPT_FUNCTION(gethostbyaddr); \
+ COMMON_INTERCEPT_FUNCTION(gethostbyname);
+#else
+#define INIT_GETHOSTBYNAME
+#endif // SANITIZER_INTERCEPT_GETHOSTBYNAME
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME2
+INTERCEPTOR(struct __sanitizer_hostent *, gethostbyname2, char *name, int af) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2, name, af);
+ struct __sanitizer_hostent *res = REAL(gethostbyname2)(name, af);
+ if (res) write_hostent(ctx, res);
+ return res;
+}
+#define INIT_GETHOSTBYNAME2 COMMON_INTERCEPT_FUNCTION(gethostbyname2);
+#else
+#define INIT_GETHOSTBYNAME2
+#endif // SANITIZER_INTERCEPT_GETHOSTBYNAME2
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME_R
+INTERCEPTOR(int, gethostbyname_r, char *name, struct __sanitizer_hostent *ret,
+ char *buf, SIZE_T buflen, __sanitizer_hostent **result,
+ int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname_r, name, ret, buf, buflen, result,
+ h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(gethostbyname_r)(name, ret, buf, buflen, result, h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTBYNAME_R COMMON_INTERCEPT_FUNCTION(gethostbyname_r);
+#else
+#define INIT_GETHOSTBYNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTENT_R
+INTERCEPTOR(int, gethostent_r, struct __sanitizer_hostent *ret, char *buf,
+ SIZE_T buflen, __sanitizer_hostent **result, int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostent_r, ret, buf, buflen, result,
+ h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(gethostent_r)(ret, buf, buflen, result, h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTENT_R \
+ COMMON_INTERCEPT_FUNCTION(gethostent_r);
+#else
+#define INIT_GETHOSTENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYADDR_R
+INTERCEPTOR(int, gethostbyaddr_r, void *addr, int len, int type,
+ struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
+ __sanitizer_hostent **result, int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyaddr_r, addr, len, type, ret, buf,
+ buflen, result, h_errnop);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(gethostbyaddr_r)(addr, len, type, ret, buf, buflen, result,
+ h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTBYADDR_R \
+ COMMON_INTERCEPT_FUNCTION(gethostbyaddr_r);
+#else
+#define INIT_GETHOSTBYADDR_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETHOSTBYNAME2_R
+INTERCEPTOR(int, gethostbyname2_r, char *name, int af,
+ struct __sanitizer_hostent *ret, char *buf, SIZE_T buflen,
+ __sanitizer_hostent **result, int *h_errnop) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, gethostbyname2_r, name, af, ret, buf, buflen,
+ result, h_errnop);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res =
+ REAL(gethostbyname2_r)(name, af, ret, buf, buflen, result, h_errnop);
+ if (result) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (res == 0 && *result) write_hostent(ctx, *result);
+ }
+ if (h_errnop)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, h_errnop, sizeof(*h_errnop));
+ return res;
+}
+#define INIT_GETHOSTBYNAME2_R \
+ COMMON_INTERCEPT_FUNCTION(gethostbyname2_r);
+#else
+#define INIT_GETHOSTBYNAME2_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETSOCKOPT
+INTERCEPTOR(int, getsockopt, int sockfd, int level, int optname, void *optval,
+ int *optlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getsockopt, sockfd, level, optname, optval,
+ optlen);
+ if (optlen) COMMON_INTERCEPTOR_READ_RANGE(ctx, optlen, sizeof(*optlen));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getsockopt)(sockfd, level, optname, optval, optlen);
+ if (res == 0)
+ if (optval && optlen) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, optval, *optlen);
+ return res;
+}
+#define INIT_GETSOCKOPT COMMON_INTERCEPT_FUNCTION(getsockopt);
+#else
+#define INIT_GETSOCKOPT
+#endif
+
+#if SANITIZER_INTERCEPT_ACCEPT
+INTERCEPTOR(int, accept, int fd, void *addr, unsigned *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, accept, fd, addr, addrlen);
+ unsigned addrlen0 = 0;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addrlen0 = *addrlen;
+ }
+ int fd2 = REAL(accept)(fd, addr, addrlen);
+ if (fd2 >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
+ if (addr && addrlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0));
+ }
+ return fd2;
+}
+#define INIT_ACCEPT COMMON_INTERCEPT_FUNCTION(accept);
+#else
+#define INIT_ACCEPT
+#endif
+
+#if SANITIZER_INTERCEPT_ACCEPT4
+INTERCEPTOR(int, accept4, int fd, void *addr, unsigned *addrlen, int f) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, accept4, fd, addr, addrlen, f);
+ unsigned addrlen0 = 0;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addrlen0 = *addrlen;
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int fd2 = REAL(accept4)(fd, addr, addrlen, f);
+ if (fd2 >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
+ if (addr && addrlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0));
+ }
+ return fd2;
+}
+#define INIT_ACCEPT4 COMMON_INTERCEPT_FUNCTION(accept4);
+#else
+#define INIT_ACCEPT4
+#endif
+
+#if SANITIZER_INTERCEPT_PACCEPT
+INTERCEPTOR(int, paccept, int fd, void *addr, unsigned *addrlen,
+ __sanitizer_sigset_t *set, int f) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, paccept, fd, addr, addrlen, set, f);
+ unsigned addrlen0 = 0;
+ if (addrlen) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, addrlen, sizeof(*addrlen));
+ addrlen0 = *addrlen;
+ }
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ int fd2 = REAL(paccept)(fd, addr, addrlen, set, f);
+ if (fd2 >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, fd2);
+ if (addr && addrlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(*addrlen, addrlen0));
+ }
+ return fd2;
+}
+#define INIT_PACCEPT COMMON_INTERCEPT_FUNCTION(paccept);
+#else
+#define INIT_PACCEPT
+#endif
+
+#if SANITIZER_INTERCEPT_MODF
+INTERCEPTOR(double, modf, double x, double *iptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, modf, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ double res = REAL(modf)(x, iptr);
+ if (iptr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
+ }
+ return res;
+}
+INTERCEPTOR(float, modff, float x, float *iptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, modff, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ float res = REAL(modff)(x, iptr);
+ if (iptr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
+ }
+ return res;
+}
+INTERCEPTOR(long double, modfl, long double x, long double *iptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, modfl, x, iptr);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ long double res = REAL(modfl)(x, iptr);
+ if (iptr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iptr, sizeof(*iptr));
+ }
+ return res;
+}
+#define INIT_MODF \
+ COMMON_INTERCEPT_FUNCTION(modf); \
+ COMMON_INTERCEPT_FUNCTION(modff); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(modfl);
+#else
+#define INIT_MODF
+#endif
+
+#if SANITIZER_INTERCEPT_RECVMSG || SANITIZER_INTERCEPT_RECVMMSG
+static void write_msghdr(void *ctx, struct __sanitizer_msghdr *msg,
+ SSIZE_T maxlen) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg, sizeof(*msg));
+ if (msg->msg_name && msg->msg_namelen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_name, msg->msg_namelen);
+ if (msg->msg_iov && msg->msg_iovlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_iov,
+ sizeof(*msg->msg_iov) * msg->msg_iovlen);
+ write_iovec(ctx, msg->msg_iov, msg->msg_iovlen, maxlen);
+ if (msg->msg_control && msg->msg_controllen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, msg->msg_control, msg->msg_controllen);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_RECVMSG
+INTERCEPTOR(SSIZE_T, recvmsg, int fd, struct __sanitizer_msghdr *msg,
+ int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recvmsg, fd, msg, flags);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(recvmsg)(fd, msg, flags);
+ if (res >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ if (msg) {
+ write_msghdr(ctx, msg, res);
+ COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg);
+ }
+ }
+ return res;
+}
+#define INIT_RECVMSG COMMON_INTERCEPT_FUNCTION(recvmsg);
+#else
+#define INIT_RECVMSG
+#endif
+
+#if SANITIZER_INTERCEPT_RECVMMSG
+INTERCEPTOR(int, recvmmsg, int fd, struct __sanitizer_mmsghdr *msgvec,
+ unsigned int vlen, int flags, void *timeout) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recvmmsg, fd, msgvec, vlen, flags, timeout);
+ if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);
+ int res = REAL(recvmmsg)(fd, msgvec, vlen, flags, timeout);
+ if (res >= 0) {
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ for (int i = 0; i < res; ++i) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &msgvec[i].msg_len,
+ sizeof(msgvec[i].msg_len));
+ write_msghdr(ctx, &msgvec[i].msg_hdr, msgvec[i].msg_len);
+ COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, &msgvec[i].msg_hdr);
+ }
+ }
+ return res;
+}
+#define INIT_RECVMMSG COMMON_INTERCEPT_FUNCTION(recvmmsg);
+#else
+#define INIT_RECVMMSG
+#endif
+
+#if SANITIZER_INTERCEPT_SENDMSG || SANITIZER_INTERCEPT_SENDMMSG
+static void read_msghdr_control(void *ctx, void *control, uptr controllen) {
+ const unsigned kCmsgDataOffset =
+ RoundUpTo(sizeof(__sanitizer_cmsghdr), sizeof(uptr));
+
+ char *p = (char *)control;
+ char *const control_end = p + controllen;
+ while (true) {
+ if (p + sizeof(__sanitizer_cmsghdr) > control_end) break;
+ __sanitizer_cmsghdr *cmsg = (__sanitizer_cmsghdr *)p;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_len, sizeof(cmsg->cmsg_len));
+
+ if (p + RoundUpTo(cmsg->cmsg_len, sizeof(uptr)) > control_end) break;
+
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_level,
+ sizeof(cmsg->cmsg_level));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &cmsg->cmsg_type,
+ sizeof(cmsg->cmsg_type));
+
+ if (cmsg->cmsg_len > kCmsgDataOffset) {
+ char *data = p + kCmsgDataOffset;
+ unsigned data_len = cmsg->cmsg_len - kCmsgDataOffset;
+ if (data_len > 0) COMMON_INTERCEPTOR_READ_RANGE(ctx, data, data_len);
+ }
+
+ p += RoundUpTo(cmsg->cmsg_len, sizeof(uptr));
+ }
+}
+
+static void read_msghdr(void *ctx, struct __sanitizer_msghdr *msg,
+ SSIZE_T maxlen) {
+#define R(f) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &msg->msg_##f, sizeof(msg->msg_##f))
+ R(name);
+ R(namelen);
+ R(iov);
+ R(iovlen);
+ R(control);
+ R(controllen);
+ R(flags);
+#undef R
+ if (msg->msg_name && msg->msg_namelen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_name, msg->msg_namelen);
+ if (msg->msg_iov && msg->msg_iovlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, msg->msg_iov,
+ sizeof(*msg->msg_iov) * msg->msg_iovlen);
+ read_iovec(ctx, msg->msg_iov, msg->msg_iovlen, maxlen);
+ if (msg->msg_control && msg->msg_controllen)
+ read_msghdr_control(ctx, msg->msg_control, msg->msg_controllen);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_SENDMSG
+INTERCEPTOR(SSIZE_T, sendmsg, int fd, struct __sanitizer_msghdr *msg,
+ int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sendmsg, fd, msg, flags);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ SSIZE_T res = REAL(sendmsg)(fd, msg, flags);
+ if (common_flags()->intercept_send && res >= 0 && msg)
+ read_msghdr(ctx, msg, res);
+ return res;
+}
+#define INIT_SENDMSG COMMON_INTERCEPT_FUNCTION(sendmsg);
+#else
+#define INIT_SENDMSG
+#endif
+
+#if SANITIZER_INTERCEPT_SENDMMSG
+INTERCEPTOR(int, sendmmsg, int fd, struct __sanitizer_mmsghdr *msgvec,
+ unsigned vlen, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sendmmsg, fd, msgvec, vlen, flags);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ int res = REAL(sendmmsg)(fd, msgvec, vlen, flags);
+ if (res >= 0 && msgvec)
+ for (int i = 0; i < res; ++i) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &msgvec[i].msg_len,
+ sizeof(msgvec[i].msg_len));
+ if (common_flags()->intercept_send)
+ read_msghdr(ctx, &msgvec[i].msg_hdr, msgvec[i].msg_len);
+ }
+ return res;
+}
+#define INIT_SENDMMSG COMMON_INTERCEPT_FUNCTION(sendmmsg);
+#else
+#define INIT_SENDMMSG
+#endif
+
+#if SANITIZER_INTERCEPT_GETPEERNAME
+INTERCEPTOR(int, getpeername, int sockfd, void *addr, unsigned *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpeername, sockfd, addr, addrlen);
+ unsigned addr_sz;
+ if (addrlen) addr_sz = *addrlen;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getpeername)(sockfd, addr, addrlen);
+ if (!res && addr && addrlen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, Min(addr_sz, *addrlen));
+ return res;
+}
+#define INIT_GETPEERNAME COMMON_INTERCEPT_FUNCTION(getpeername);
+#else
+#define INIT_GETPEERNAME
+#endif
+
+#if SANITIZER_INTERCEPT_SYSINFO
+INTERCEPTOR(int, sysinfo, void *info) {
+ void *ctx;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ COMMON_INTERCEPTOR_ENTER(ctx, sysinfo, info);
+ int res = REAL(sysinfo)(info);
+ if (!res && info)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, struct_sysinfo_sz);
+ return res;
+}
+#define INIT_SYSINFO COMMON_INTERCEPT_FUNCTION(sysinfo);
+#else
+#define INIT_SYSINFO
+#endif
+
+#if SANITIZER_INTERCEPT_READDIR
+INTERCEPTOR(__sanitizer_dirent *, opendir, const char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, opendir, path);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ __sanitizer_dirent *res = REAL(opendir)(path);
+ if (res)
+ COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path);
+ return res;
+}
+
+INTERCEPTOR(__sanitizer_dirent *, readdir, void *dirp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readdir, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_dirent *res = REAL(readdir)(dirp);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ return res;
+}
+
+INTERCEPTOR(int, readdir_r, void *dirp, __sanitizer_dirent *entry,
+ __sanitizer_dirent **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readdir_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(readdir_r)(dirp, entry, result);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (*result)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ }
+ return res;
+}
+
+#define INIT_READDIR \
+ COMMON_INTERCEPT_FUNCTION(opendir); \
+ COMMON_INTERCEPT_FUNCTION(readdir); \
+ COMMON_INTERCEPT_FUNCTION(readdir_r);
+#else
+#define INIT_READDIR
+#endif
+
+#if SANITIZER_INTERCEPT_READDIR64
+INTERCEPTOR(__sanitizer_dirent64 *, readdir64, void *dirp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readdir64, dirp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_dirent64 *res = REAL(readdir64)(dirp);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, res->d_reclen);
+ return res;
+}
+
+INTERCEPTOR(int, readdir64_r, void *dirp, __sanitizer_dirent64 *entry,
+ __sanitizer_dirent64 **result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readdir64_r, dirp, entry, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(readdir64_r)(dirp, entry, result);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ if (*result)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *result, (*result)->d_reclen);
+ }
+ return res;
+}
+#define INIT_READDIR64 \
+ COMMON_INTERCEPT_FUNCTION(readdir64); \
+ COMMON_INTERCEPT_FUNCTION(readdir64_r);
+#else
+#define INIT_READDIR64
+#endif
+
+#if SANITIZER_INTERCEPT_PTRACE
+INTERCEPTOR(uptr, ptrace, int request, int pid, void *addr, void *data) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ptrace, request, pid, addr, data);
+ __sanitizer_iovec local_iovec;
+
+ if (data) {
+ if (request == ptrace_setregs)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_regs_struct_sz);
+ else if (request == ptrace_setfpregs)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpregs_struct_sz);
+ else if (request == ptrace_setfpxregs)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_fpxregs_struct_sz);
+ else if (request == ptrace_setvfpregs)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, struct_user_vfpregs_struct_sz);
+ else if (request == ptrace_setsiginfo)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, siginfo_t_sz);
+ // Some kernel might zero the iovec::iov_base in case of invalid
+ // write access. In this case copy the invalid address for further
+ // inspection.
+ else if (request == ptrace_setregset || request == ptrace_getregset) {
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)data;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec, sizeof(*iovec));
+ local_iovec = *iovec;
+ if (request == ptrace_setregset)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iovec->iov_base, iovec->iov_len);
+ }
+ }
+
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ uptr res = REAL(ptrace)(request, pid, addr, data);
+
+ if (!res && data) {
+ // Note that PEEK* requests assign different meaning to the return value.
+ // This function does not handle them (nor does it need to).
+ if (request == ptrace_getregs)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_regs_struct_sz);
+ else if (request == ptrace_getfpregs)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpregs_struct_sz);
+ else if (request == ptrace_getfpxregs)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_fpxregs_struct_sz);
+ else if (request == ptrace_getvfpregs)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, struct_user_vfpregs_struct_sz);
+ else if (request == ptrace_getsiginfo)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, siginfo_t_sz);
+ else if (request == ptrace_geteventmsg)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(unsigned long));
+ else if (request == ptrace_getregset) {
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)data;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, iovec, sizeof(*iovec));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, local_iovec.iov_base,
+ local_iovec.iov_len);
+ }
+ }
+ return res;
+}
+
+#define INIT_PTRACE COMMON_INTERCEPT_FUNCTION(ptrace);
+#else
+#define INIT_PTRACE
+#endif
+
+#if SANITIZER_INTERCEPT_SETLOCALE
+static void unpoison_ctype_arrays(void *ctx) {
+#if SANITIZER_NETBSD
+ // These arrays contain 256 regular elements in unsigned char range + 1 EOF
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _ctype_tab_, 257 * sizeof(short));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _toupper_tab_, 257 * sizeof(short));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, _tolower_tab_, 257 * sizeof(short));
+#endif
+}
+
+INTERCEPTOR(char *, setlocale, int category, char *locale) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setlocale, category, locale);
+ if (locale)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, locale, REAL(strlen)(locale) + 1);
+ char *res = REAL(setlocale)(category, locale);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ unpoison_ctype_arrays(ctx);
+ }
+ return res;
+}
+
+#define INIT_SETLOCALE COMMON_INTERCEPT_FUNCTION(setlocale);
+#else
+#define INIT_SETLOCALE
+#endif
+
+#if SANITIZER_INTERCEPT_GETCWD
+INTERCEPTOR(char *, getcwd, char *buf, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getcwd, buf, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(getcwd)(buf, size);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_GETCWD COMMON_INTERCEPT_FUNCTION(getcwd);
+#else
+#define INIT_GETCWD
+#endif
+
+#if SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME
+INTERCEPTOR(char *, get_current_dir_name, int fake) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, get_current_dir_name, fake);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(get_current_dir_name)(fake);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+
+#define INIT_GET_CURRENT_DIR_NAME \
+ COMMON_INTERCEPT_FUNCTION(get_current_dir_name);
+#else
+#define INIT_GET_CURRENT_DIR_NAME
+#endif
+
+UNUSED static inline void FixRealStrtolEndptr(const char *nptr, char **endptr) {
+ CHECK(endptr);
+ if (nptr == *endptr) {
+ // No digits were found at strtol call, we need to find out the last
+ // symbol accessed by strtoll on our own.
+ // We get this symbol by skipping leading blanks and optional +/- sign.
+ while (IsSpace(*nptr)) nptr++;
+ if (*nptr == '+' || *nptr == '-') nptr++;
+ *endptr = const_cast<char *>(nptr);
+ }
+ CHECK(*endptr >= nptr);
+}
+
+UNUSED static inline void StrtolFixAndCheck(void *ctx, const char *nptr,
+ char **endptr, char *real_endptr, int base) {
+ if (endptr) {
+ *endptr = real_endptr;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, endptr, sizeof(*endptr));
+ }
+ // If base has unsupported value, strtol can exit with EINVAL
+ // without reading any characters. So do additional checks only
+ // if base is valid.
+ bool is_valid_base = (base == 0) || (2 <= base && base <= 36);
+ if (is_valid_base) {
+ FixRealStrtolEndptr(nptr, &real_endptr);
+ }
+ COMMON_INTERCEPTOR_READ_STRING(ctx, nptr, is_valid_base ?
+ (real_endptr - nptr) + 1 : 0);
+}
+
+
+#if SANITIZER_INTERCEPT_STRTOIMAX
+INTERCEPTOR(INTMAX_T, strtoimax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtoimax, nptr, endptr, base);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *real_endptr;
+ INTMAX_T res = REAL(strtoimax)(nptr, &real_endptr, base);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ return res;
+}
+
+INTERCEPTOR(UINTMAX_T, strtoumax, const char *nptr, char **endptr, int base) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtoumax, nptr, endptr, base);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *real_endptr;
+ UINTMAX_T res = REAL(strtoumax)(nptr, &real_endptr, base);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ return res;
+}
+
+#define INIT_STRTOIMAX \
+ COMMON_INTERCEPT_FUNCTION(strtoimax); \
+ COMMON_INTERCEPT_FUNCTION(strtoumax);
+#else
+#define INIT_STRTOIMAX
+#endif
+
+#if SANITIZER_INTERCEPT_MBSTOWCS
+INTERCEPTOR(SIZE_T, mbstowcs, wchar_t *dest, const char *src, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mbstowcs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(mbstowcs)(dest, src, len);
+ if (res != (SIZE_T) - 1 && dest) {
+ SIZE_T write_cnt = res + (res < len);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t));
+ }
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, mbsrtowcs, wchar_t *dest, const char **src, SIZE_T len,
+ void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mbsrtowcs, dest, src, len, ps);
+ if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(mbsrtowcs)(dest, src, len, ps);
+ if (res != (SIZE_T)(-1) && dest && src) {
+ // This function, and several others, may or may not write the terminating
+ // \0 character. They write it iff they clear *src.
+ SIZE_T write_cnt = res + !*src;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t));
+ }
+ return res;
+}
+
+#define INIT_MBSTOWCS \
+ COMMON_INTERCEPT_FUNCTION(mbstowcs); \
+ COMMON_INTERCEPT_FUNCTION(mbsrtowcs);
+#else
+#define INIT_MBSTOWCS
+#endif
+
+#if SANITIZER_INTERCEPT_MBSNRTOWCS
+INTERCEPTOR(SIZE_T, mbsnrtowcs, wchar_t *dest, const char **src, SIZE_T nms,
+ SIZE_T len, void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mbsnrtowcs, dest, src, nms, len, ps);
+ if (src) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+ if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
+ }
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(mbsnrtowcs)(dest, src, nms, len, ps);
+ if (res != (SIZE_T)(-1) && dest && src) {
+ SIZE_T write_cnt = res + !*src;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt * sizeof(wchar_t));
+ }
+ return res;
+}
+
+#define INIT_MBSNRTOWCS COMMON_INTERCEPT_FUNCTION(mbsnrtowcs);
+#else
+#define INIT_MBSNRTOWCS
+#endif
+
+#if SANITIZER_INTERCEPT_WCSTOMBS
+INTERCEPTOR(SIZE_T, wcstombs, char *dest, const wchar_t *src, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcstombs, dest, src, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(wcstombs)(dest, src, len);
+ if (res != (SIZE_T) - 1 && dest) {
+ SIZE_T write_cnt = res + (res < len);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);
+ }
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, wcsrtombs, char *dest, const wchar_t **src, SIZE_T len,
+ void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsrtombs, dest, src, len, ps);
+ if (src) COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(wcsrtombs)(dest, src, len, ps);
+ if (res != (SIZE_T) - 1 && dest && src) {
+ SIZE_T write_cnt = res + !*src;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);
+ }
+ return res;
+}
+
+#define INIT_WCSTOMBS \
+ COMMON_INTERCEPT_FUNCTION(wcstombs); \
+ COMMON_INTERCEPT_FUNCTION(wcsrtombs);
+#else
+#define INIT_WCSTOMBS
+#endif
+
+#if SANITIZER_INTERCEPT_WCSNRTOMBS
+INTERCEPTOR(SIZE_T, wcsnrtombs, char *dest, const wchar_t **src, SIZE_T nms,
+ SIZE_T len, void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsnrtombs, dest, src, nms, len, ps);
+ if (src) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+ if (nms) COMMON_INTERCEPTOR_READ_RANGE(ctx, *src, nms);
+ }
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(wcsnrtombs)(dest, src, nms, len, ps);
+ if (res != ((SIZE_T)-1) && dest && src) {
+ SIZE_T write_cnt = res + !*src;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, write_cnt);
+ }
+ return res;
+}
+
+#define INIT_WCSNRTOMBS COMMON_INTERCEPT_FUNCTION(wcsnrtombs);
+#else
+#define INIT_WCSNRTOMBS
+#endif
+
+
+#if SANITIZER_INTERCEPT_WCRTOMB
+INTERCEPTOR(SIZE_T, wcrtomb, char *dest, wchar_t src, void *ps) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcrtomb, dest, src, ps);
+ if (ps) COMMON_INTERCEPTOR_READ_RANGE(ctx, ps, mbstate_t_sz);
+
+ if (!dest)
+ return REAL(wcrtomb)(dest, src, ps);
+
+ char local_dest[32];
+ SIZE_T res = REAL(wcrtomb)(local_dest, src, ps);
+ if (res != ((SIZE_T)-1)) {
+ CHECK_LE(res, sizeof(local_dest));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res);
+ REAL(memcpy)(dest, local_dest, res);
+ }
+ return res;
+}
+
+#define INIT_WCRTOMB COMMON_INTERCEPT_FUNCTION(wcrtomb);
+#else
+#define INIT_WCRTOMB
+#endif
+
+#if SANITIZER_INTERCEPT_WCTOMB
+INTERCEPTOR(int, wctomb, char *dest, wchar_t src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wctomb, dest, src);
+ if (!dest)
+ return REAL(wctomb)(dest, src);
+
+ char local_dest[32];
+ int res = REAL(wctomb)(local_dest, src);
+ if (res != -1) {
+ CHECK_LE(res, sizeof(local_dest));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, res);
+ REAL(memcpy)(dest, local_dest, res);
+ }
+ return res;
+}
+
+#define INIT_WCTOMB COMMON_INTERCEPT_FUNCTION(wctomb);
+#else
+#define INIT_WCTOMB
+#endif
+
+#if SANITIZER_INTERCEPT_TCGETATTR
+INTERCEPTOR(int, tcgetattr, int fd, void *termios_p) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tcgetattr, fd, termios_p);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(tcgetattr)(fd, termios_p);
+ if (!res && termios_p)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, termios_p, struct_termios_sz);
+ return res;
+}
+
+#define INIT_TCGETATTR COMMON_INTERCEPT_FUNCTION(tcgetattr);
+#else
+#define INIT_TCGETATTR
+#endif
+
+#if SANITIZER_INTERCEPT_REALPATH
+INTERCEPTOR(char *, realpath, const char *path, char *resolved_path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, realpath, path, resolved_path);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+
+ // Workaround a bug in glibc where dlsym(RTLD_NEXT, ...) returns the oldest
+ // version of a versioned symbol. For realpath(), this gives us something
+ // (called __old_realpath) that does not handle NULL in the second argument.
+ // Handle it as part of the interceptor.
+ char *allocated_path = nullptr;
+ if (!resolved_path)
+ allocated_path = resolved_path = (char *)WRAP(malloc)(path_max + 1);
+
+ char *res = REAL(realpath)(path, resolved_path);
+ if (allocated_path && !res) WRAP(free)(allocated_path);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_REALPATH COMMON_INTERCEPT_FUNCTION(realpath);
+#else
+#define INIT_REALPATH
+#endif
+
+#if SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME
+INTERCEPTOR(char *, canonicalize_file_name, const char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, canonicalize_file_name, path);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ char *res = REAL(canonicalize_file_name)(path);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_CANONICALIZE_FILE_NAME \
+ COMMON_INTERCEPT_FUNCTION(canonicalize_file_name);
+#else
+#define INIT_CANONICALIZE_FILE_NAME
+#endif
+
+#if SANITIZER_INTERCEPT_CONFSTR
+INTERCEPTOR(SIZE_T, confstr, int name, char *buf, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, confstr, name, buf, len);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(confstr)(name, buf, len);
+ if (buf && res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res < len ? res : len);
+ return res;
+}
+#define INIT_CONFSTR COMMON_INTERCEPT_FUNCTION(confstr);
+#else
+#define INIT_CONFSTR
+#endif
+
+#if SANITIZER_INTERCEPT_SCHED_GETAFFINITY
+INTERCEPTOR(int, sched_getaffinity, int pid, SIZE_T cpusetsize, void *mask) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sched_getaffinity, pid, cpusetsize, mask);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sched_getaffinity)(pid, cpusetsize, mask);
+ if (mask && !res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mask, cpusetsize);
+ return res;
+}
+#define INIT_SCHED_GETAFFINITY COMMON_INTERCEPT_FUNCTION(sched_getaffinity);
+#else
+#define INIT_SCHED_GETAFFINITY
+#endif
+
+#if SANITIZER_INTERCEPT_SCHED_GETPARAM
+INTERCEPTOR(int, sched_getparam, int pid, void *param) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sched_getparam, pid, param);
+ int res = REAL(sched_getparam)(pid, param);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, param, struct_sched_param_sz);
+ return res;
+}
+#define INIT_SCHED_GETPARAM COMMON_INTERCEPT_FUNCTION(sched_getparam);
+#else
+#define INIT_SCHED_GETPARAM
+#endif
+
+#if SANITIZER_INTERCEPT_STRERROR
+INTERCEPTOR(char *, strerror, int errnum) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strerror, errnum);
+ char *res = REAL(strerror)(errnum);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_STRERROR COMMON_INTERCEPT_FUNCTION(strerror);
+#else
+#define INIT_STRERROR
+#endif
+
+#if SANITIZER_INTERCEPT_STRERROR_R
+// There are 2 versions of strerror_r:
+// * POSIX version returns 0 on success, negative error code on failure,
+// writes message to buf.
+// * GNU version returns message pointer, which points to either buf or some
+// static storage.
+#if ((_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE) || \
+ SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD || \
+ SANITIZER_FREEBSD || SANITIZER_OPENBSD
+// POSIX version. Spec is not clear on whether buf is NULL-terminated.
+// At least on OSX, buf contents are valid even when the call fails.
+INTERCEPTOR(int, strerror_r, int errnum, char *buf, SIZE_T buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(strerror_r)(errnum, buf, buflen);
+
+ SIZE_T sz = internal_strnlen(buf, buflen);
+ if (sz < buflen) ++sz;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
+ return res;
+}
+#else
+// GNU version.
+INTERCEPTOR(char *, strerror_r, int errnum, char *buf, SIZE_T buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(strerror_r)(errnum, buf, buflen);
+ if (res == buf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ else
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#endif //(_POSIX_C_SOURCE >= 200112L || _XOPEN_SOURCE >= 600) && !_GNU_SOURCE ||
+ //SANITIZER_MAC
+#define INIT_STRERROR_R COMMON_INTERCEPT_FUNCTION(strerror_r);
+#else
+#define INIT_STRERROR_R
+#endif
+
+#if SANITIZER_INTERCEPT_XPG_STRERROR_R
+INTERCEPTOR(int, __xpg_strerror_r, int errnum, char *buf, SIZE_T buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __xpg_strerror_r, errnum, buf, buflen);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(__xpg_strerror_r)(errnum, buf, buflen);
+ // This version always returns a null-terminated string.
+ if (buf && buflen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ return res;
+}
+#define INIT_XPG_STRERROR_R COMMON_INTERCEPT_FUNCTION(__xpg_strerror_r);
+#else
+#define INIT_XPG_STRERROR_R
+#endif
+
+#if SANITIZER_INTERCEPT_SCANDIR
+typedef int (*scandir_filter_f)(const struct __sanitizer_dirent *);
+typedef int (*scandir_compar_f)(const struct __sanitizer_dirent **,
+ const struct __sanitizer_dirent **);
+
+static THREADLOCAL scandir_filter_f scandir_filter;
+static THREADLOCAL scandir_compar_f scandir_compar;
+
+static int wrapped_scandir_filter(const struct __sanitizer_dirent *dir) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ return scandir_filter(dir);
+}
+
+static int wrapped_scandir_compar(const struct __sanitizer_dirent **a,
+ const struct __sanitizer_dirent **b) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ return scandir_compar(a, b);
+}
+
+INTERCEPTOR(int, scandir, char *dirp, __sanitizer_dirent ***namelist,
+ scandir_filter_f filter, scandir_compar_f compar) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, scandir, dirp, namelist, filter, compar);
+ if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+ scandir_filter = filter;
+ scandir_compar = compar;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(scandir)(dirp, namelist,
+ filter ? wrapped_scandir_filter : nullptr,
+ compar ? wrapped_scandir_compar : nullptr);
+ scandir_filter = nullptr;
+ scandir_compar = nullptr;
+ if (namelist && res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
+ for (int i = 0; i < res; ++i)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
+ (*namelist)[i]->d_reclen);
+ }
+ return res;
+}
+#define INIT_SCANDIR COMMON_INTERCEPT_FUNCTION(scandir);
+#else
+#define INIT_SCANDIR
+#endif
+
+#if SANITIZER_INTERCEPT_SCANDIR64
+typedef int (*scandir64_filter_f)(const struct __sanitizer_dirent64 *);
+typedef int (*scandir64_compar_f)(const struct __sanitizer_dirent64 **,
+ const struct __sanitizer_dirent64 **);
+
+static THREADLOCAL scandir64_filter_f scandir64_filter;
+static THREADLOCAL scandir64_compar_f scandir64_compar;
+
+static int wrapped_scandir64_filter(const struct __sanitizer_dirent64 *dir) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(dir, dir->d_reclen);
+ return scandir64_filter(dir);
+}
+
+static int wrapped_scandir64_compar(const struct __sanitizer_dirent64 **a,
+ const struct __sanitizer_dirent64 **b) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(2);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(a, sizeof(*a));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*a, (*a)->d_reclen);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(b, sizeof(*b));
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*b, (*b)->d_reclen);
+ return scandir64_compar(a, b);
+}
+
+INTERCEPTOR(int, scandir64, char *dirp, __sanitizer_dirent64 ***namelist,
+ scandir64_filter_f filter, scandir64_compar_f compar) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, scandir64, dirp, namelist, filter, compar);
+ if (dirp) COMMON_INTERCEPTOR_READ_RANGE(ctx, dirp, REAL(strlen)(dirp) + 1);
+ scandir64_filter = filter;
+ scandir64_compar = compar;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res =
+ REAL(scandir64)(dirp, namelist,
+ filter ? wrapped_scandir64_filter : nullptr,
+ compar ? wrapped_scandir64_compar : nullptr);
+ scandir64_filter = nullptr;
+ scandir64_compar = nullptr;
+ if (namelist && res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelist, sizeof(*namelist));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *namelist, sizeof(**namelist) * res);
+ for (int i = 0; i < res; ++i)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (*namelist)[i],
+ (*namelist)[i]->d_reclen);
+ }
+ return res;
+}
+#define INIT_SCANDIR64 COMMON_INTERCEPT_FUNCTION(scandir64);
+#else
+#define INIT_SCANDIR64
+#endif
+
+#if SANITIZER_INTERCEPT_GETGROUPS
+INTERCEPTOR(int, getgroups, int size, u32 *lst) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgroups, size, lst);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getgroups)(size, lst);
+ if (res >= 0 && lst && size > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lst, res * sizeof(*lst));
+ return res;
+}
+#define INIT_GETGROUPS COMMON_INTERCEPT_FUNCTION(getgroups);
+#else
+#define INIT_GETGROUPS
+#endif
+
+#if SANITIZER_INTERCEPT_POLL
+static void read_pollfd(void *ctx, __sanitizer_pollfd *fds,
+ __sanitizer_nfds_t nfds) {
+ for (unsigned i = 0; i < nfds; ++i) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &fds[i].fd, sizeof(fds[i].fd));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, &fds[i].events, sizeof(fds[i].events));
+ }
+}
+
+static void write_pollfd(void *ctx, __sanitizer_pollfd *fds,
+ __sanitizer_nfds_t nfds) {
+ for (unsigned i = 0; i < nfds; ++i)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &fds[i].revents,
+ sizeof(fds[i].revents));
+}
+
+INTERCEPTOR(int, poll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds,
+ int timeout) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, poll, fds, nfds, timeout);
+ if (fds && nfds) read_pollfd(ctx, fds, nfds);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(poll)(fds, nfds, timeout);
+ if (fds && nfds) write_pollfd(ctx, fds, nfds);
+ return res;
+}
+#define INIT_POLL COMMON_INTERCEPT_FUNCTION(poll);
+#else
+#define INIT_POLL
+#endif
+
+#if SANITIZER_INTERCEPT_PPOLL
+INTERCEPTOR(int, ppoll, __sanitizer_pollfd *fds, __sanitizer_nfds_t nfds,
+ void *timeout_ts, __sanitizer_sigset_t *sigmask) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ppoll, fds, nfds, timeout_ts, sigmask);
+ if (fds && nfds) read_pollfd(ctx, fds, nfds);
+ if (timeout_ts)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout_ts, struct_timespec_sz);
+ if (sigmask) COMMON_INTERCEPTOR_READ_RANGE(ctx, sigmask, sizeof(*sigmask));
+ int res =
+ COMMON_INTERCEPTOR_BLOCK_REAL(ppoll)(fds, nfds, timeout_ts, sigmask);
+ if (fds && nfds) write_pollfd(ctx, fds, nfds);
+ return res;
+}
+#define INIT_PPOLL COMMON_INTERCEPT_FUNCTION(ppoll);
+#else
+#define INIT_PPOLL
+#endif
+
+#if SANITIZER_INTERCEPT_WORDEXP
+INTERCEPTOR(int, wordexp, char *s, __sanitizer_wordexp_t *p, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wordexp, s, p, flags);
+ if (s) COMMON_INTERCEPTOR_READ_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(wordexp)(s, p, flags);
+ if (!res && p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ if (p->we_wordc)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->we_wordv,
+ sizeof(*p->we_wordv) * p->we_wordc);
+ for (uptr i = 0; i < p->we_wordc; ++i) {
+ char *w = p->we_wordv[i];
+ if (w) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, w, REAL(strlen)(w) + 1);
+ }
+ }
+ return res;
+}
+#define INIT_WORDEXP COMMON_INTERCEPT_FUNCTION(wordexp);
+#else
+#define INIT_WORDEXP
+#endif
+
+#if SANITIZER_INTERCEPT_SIGWAIT
+INTERCEPTOR(int, sigwait, __sanitizer_sigset_t *set, int *sig) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigwait, set, sig);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigwait)(set, sig);
+ if (!res && sig) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sig, sizeof(*sig));
+ return res;
+}
+#define INIT_SIGWAIT COMMON_INTERCEPT_FUNCTION(sigwait);
+#else
+#define INIT_SIGWAIT
+#endif
+
+#if SANITIZER_INTERCEPT_SIGWAITINFO
+INTERCEPTOR(int, sigwaitinfo, __sanitizer_sigset_t *set, void *info) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigwaitinfo, set, info);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigwaitinfo)(set, info);
+ if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
+ return res;
+}
+#define INIT_SIGWAITINFO COMMON_INTERCEPT_FUNCTION(sigwaitinfo);
+#else
+#define INIT_SIGWAITINFO
+#endif
+
+#if SANITIZER_INTERCEPT_SIGTIMEDWAIT
+INTERCEPTOR(int, sigtimedwait, __sanitizer_sigset_t *set, void *info,
+ void *timeout) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigtimedwait, set, info, timeout);
+ if (timeout) COMMON_INTERCEPTOR_READ_RANGE(ctx, timeout, struct_timespec_sz);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigtimedwait)(set, info, timeout);
+ if (res > 0 && info) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, info, siginfo_t_sz);
+ return res;
+}
+#define INIT_SIGTIMEDWAIT COMMON_INTERCEPT_FUNCTION(sigtimedwait);
+#else
+#define INIT_SIGTIMEDWAIT
+#endif
+
+#if SANITIZER_INTERCEPT_SIGSETOPS
+INTERCEPTOR(int, sigemptyset, __sanitizer_sigset_t *set) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigemptyset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigemptyset)(set);
+ if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
+ return res;
+}
+
+INTERCEPTOR(int, sigfillset, __sanitizer_sigset_t *set) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigfillset, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigfillset)(set);
+ if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
+ return res;
+}
+#define INIT_SIGSETOPS \
+ COMMON_INTERCEPT_FUNCTION(sigemptyset); \
+ COMMON_INTERCEPT_FUNCTION(sigfillset);
+#else
+#define INIT_SIGSETOPS
+#endif
+
+#if SANITIZER_INTERCEPT_SIGPENDING
+INTERCEPTOR(int, sigpending, __sanitizer_sigset_t *set) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigpending, set);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigpending)(set);
+ if (!res && set) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, set, sizeof(*set));
+ return res;
+}
+#define INIT_SIGPENDING COMMON_INTERCEPT_FUNCTION(sigpending);
+#else
+#define INIT_SIGPENDING
+#endif
+
+#if SANITIZER_INTERCEPT_SIGPROCMASK
+INTERCEPTOR(int, sigprocmask, int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sigprocmask, how, set, oldset);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(sigprocmask)(how, set, oldset);
+ if (!res && oldset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
+ return res;
+}
+#define INIT_SIGPROCMASK COMMON_INTERCEPT_FUNCTION(sigprocmask);
+#else
+#define INIT_SIGPROCMASK
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_SIGMASK
+INTERCEPTOR(int, pthread_sigmask, int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_sigmask, how, set, oldset);
+ if (set) COMMON_INTERCEPTOR_READ_RANGE(ctx, set, sizeof(*set));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_sigmask)(how, set, oldset);
+ if (!res && oldset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldset, sizeof(*oldset));
+ return res;
+}
+#define INIT_PTHREAD_SIGMASK COMMON_INTERCEPT_FUNCTION(pthread_sigmask);
+#else
+#define INIT_PTHREAD_SIGMASK
+#endif
+
+#if SANITIZER_INTERCEPT_BACKTRACE
+INTERCEPTOR(int, backtrace, void **buffer, int size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, backtrace, buffer, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(backtrace)(buffer, size);
+ if (res && buffer)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buffer, res * sizeof(*buffer));
+ return res;
+}
+
+INTERCEPTOR(char **, backtrace_symbols, void **buffer, int size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, backtrace_symbols, buffer, size);
+ if (buffer && size)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, size * sizeof(*buffer));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char **res = REAL(backtrace_symbols)(buffer, size);
+ if (res && size) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, size * sizeof(*res));
+ for (int i = 0; i < size; ++i)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res[i], REAL(strlen(res[i])) + 1);
+ }
+ return res;
+}
+#define INIT_BACKTRACE \
+ COMMON_INTERCEPT_FUNCTION(backtrace); \
+ COMMON_INTERCEPT_FUNCTION(backtrace_symbols);
+#else
+#define INIT_BACKTRACE
+#endif
+
+#if SANITIZER_INTERCEPT__EXIT
+INTERCEPTOR(void, _exit, int status) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _exit, status);
+ COMMON_INTERCEPTOR_USER_CALLBACK_START();
+ int status1 = COMMON_INTERCEPTOR_ON_EXIT(ctx);
+ COMMON_INTERCEPTOR_USER_CALLBACK_END();
+ if (status == 0) status = status1;
+ REAL(_exit)(status);
+}
+#define INIT__EXIT COMMON_INTERCEPT_FUNCTION(_exit);
+#else
+#define INIT__EXIT
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEX
+INTERCEPTOR(int, pthread_mutex_lock, void *m) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_lock, m);
+ COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m);
+ int res = REAL(pthread_mutex_lock)(m);
+ if (res == errno_EOWNERDEAD)
+ COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_mutex_unlock, void *m) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_mutex_unlock, m);
+ COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m);
+ int res = REAL(pthread_mutex_unlock)(m);
+ if (res == errno_EINVAL)
+ COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m);
+ return res;
+}
+
+#define INIT_PTHREAD_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(pthread_mutex_lock)
+#define INIT_PTHREAD_MUTEX_UNLOCK \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutex_unlock)
+#else
+#define INIT_PTHREAD_MUTEX_LOCK
+#define INIT_PTHREAD_MUTEX_UNLOCK
+#endif
+
+#if SANITIZER_INTERCEPT___PTHREAD_MUTEX
+INTERCEPTOR(int, __pthread_mutex_lock, void *m) {
+ return WRAP(pthread_mutex_lock)(m);
+}
+
+INTERCEPTOR(int, __pthread_mutex_unlock, void *m) {
+ return WRAP(pthread_mutex_unlock)(m);
+}
+
+#define INIT___PTHREAD_MUTEX_LOCK \
+ COMMON_INTERCEPT_FUNCTION(__pthread_mutex_lock)
+#define INIT___PTHREAD_MUTEX_UNLOCK \
+ COMMON_INTERCEPT_FUNCTION(__pthread_mutex_unlock)
+#else
+#define INIT___PTHREAD_MUTEX_LOCK
+#define INIT___PTHREAD_MUTEX_UNLOCK
+#endif
+
+#if SANITIZER_INTERCEPT___LIBC_MUTEX
+INTERCEPTOR(int, __libc_mutex_lock, void *m)
+ALIAS(WRAPPER_NAME(pthread_mutex_lock));
+
+INTERCEPTOR(int, __libc_mutex_unlock, void *m)
+ALIAS(WRAPPER_NAME(pthread_mutex_unlock));
+
+INTERCEPTOR(int, __libc_thr_setcancelstate, int state, int *oldstate)
+ALIAS(WRAPPER_NAME(pthread_setcancelstate));
+
+#define INIT___LIBC_MUTEX_LOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_lock)
+#define INIT___LIBC_MUTEX_UNLOCK COMMON_INTERCEPT_FUNCTION(__libc_mutex_unlock)
+#define INIT___LIBC_THR_SETCANCELSTATE \
+ COMMON_INTERCEPT_FUNCTION(__libc_thr_setcancelstate)
+#else
+#define INIT___LIBC_MUTEX_LOCK
+#define INIT___LIBC_MUTEX_UNLOCK
+#define INIT___LIBC_THR_SETCANCELSTATE
+#endif
+
+#if SANITIZER_INTERCEPT_GETMNTENT || SANITIZER_INTERCEPT_GETMNTENT_R
+static void write_mntent(void *ctx, __sanitizer_mntent *mnt) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt, sizeof(*mnt));
+ if (mnt->mnt_fsname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_fsname,
+ REAL(strlen)(mnt->mnt_fsname) + 1);
+ if (mnt->mnt_dir)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_dir,
+ REAL(strlen)(mnt->mnt_dir) + 1);
+ if (mnt->mnt_type)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_type,
+ REAL(strlen)(mnt->mnt_type) + 1);
+ if (mnt->mnt_opts)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mnt->mnt_opts,
+ REAL(strlen)(mnt->mnt_opts) + 1);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_GETMNTENT
+INTERCEPTOR(__sanitizer_mntent *, getmntent, void *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getmntent, fp);
+ __sanitizer_mntent *res = REAL(getmntent)(fp);
+ if (res) write_mntent(ctx, res);
+ return res;
+}
+#define INIT_GETMNTENT COMMON_INTERCEPT_FUNCTION(getmntent);
+#else
+#define INIT_GETMNTENT
+#endif
+
+#if SANITIZER_INTERCEPT_GETMNTENT_R
+INTERCEPTOR(__sanitizer_mntent *, getmntent_r, void *fp,
+ __sanitizer_mntent *mntbuf, char *buf, int buflen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getmntent_r, fp, mntbuf, buf, buflen);
+ __sanitizer_mntent *res = REAL(getmntent_r)(fp, mntbuf, buf, buflen);
+ if (res) write_mntent(ctx, res);
+ return res;
+}
+#define INIT_GETMNTENT_R COMMON_INTERCEPT_FUNCTION(getmntent_r);
+#else
+#define INIT_GETMNTENT_R
+#endif
+
+#if SANITIZER_INTERCEPT_STATFS
+INTERCEPTOR(int, statfs, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statfs, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statfs)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatfs, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatfs, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatfs)(fd, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs_sz);
+ return res;
+}
+#define INIT_STATFS \
+ COMMON_INTERCEPT_FUNCTION(statfs); \
+ COMMON_INTERCEPT_FUNCTION(fstatfs);
+#else
+#define INIT_STATFS
+#endif
+
+#if SANITIZER_INTERCEPT_STATFS64
+INTERCEPTOR(int, statfs64, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statfs64, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statfs64)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatfs64, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatfs64)(fd, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statfs64_sz);
+ return res;
+}
+#define INIT_STATFS64 \
+ COMMON_INTERCEPT_FUNCTION(statfs64); \
+ COMMON_INTERCEPT_FUNCTION(fstatfs64);
+#else
+#define INIT_STATFS64
+#endif
+
+#if SANITIZER_INTERCEPT_STATVFS
+INTERCEPTOR(int, statvfs, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statvfs)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatvfs, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs, fd, buf);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatvfs)(fd, buf);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+#define INIT_STATVFS \
+ COMMON_INTERCEPT_FUNCTION(statvfs); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs);
+#else
+#define INIT_STATVFS
+#endif
+
+#if SANITIZER_INTERCEPT_STATVFS64
+INTERCEPTOR(int, statvfs64, char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs64, path, buf);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(statvfs64)(path, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatvfs64, int fd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs64, fd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(fstatvfs64)(fd, buf);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs64_sz);
+ return res;
+}
+#define INIT_STATVFS64 \
+ COMMON_INTERCEPT_FUNCTION(statvfs64); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs64);
+#else
+#define INIT_STATVFS64
+#endif
+
+#if SANITIZER_INTERCEPT_INITGROUPS
+INTERCEPTOR(int, initgroups, char *user, u32 group) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, initgroups, user, group);
+ if (user) COMMON_INTERCEPTOR_READ_RANGE(ctx, user, REAL(strlen)(user) + 1);
+ int res = REAL(initgroups)(user, group);
+ return res;
+}
+#define INIT_INITGROUPS COMMON_INTERCEPT_FUNCTION(initgroups);
+#else
+#define INIT_INITGROUPS
+#endif
+
+#if SANITIZER_INTERCEPT_ETHER_NTOA_ATON
+INTERCEPTOR(char *, ether_ntoa, __sanitizer_ether_addr *addr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa, addr);
+ if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ char *res = REAL(ether_ntoa)(addr);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+INTERCEPTOR(__sanitizer_ether_addr *, ether_aton, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_aton, buf);
+ if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ __sanitizer_ether_addr *res = REAL(ether_aton)(buf);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, sizeof(*res));
+ return res;
+}
+#define INIT_ETHER_NTOA_ATON \
+ COMMON_INTERCEPT_FUNCTION(ether_ntoa); \
+ COMMON_INTERCEPT_FUNCTION(ether_aton);
+#else
+#define INIT_ETHER_NTOA_ATON
+#endif
+
+#if SANITIZER_INTERCEPT_ETHER_HOST
+INTERCEPTOR(int, ether_ntohost, char *hostname, __sanitizer_ether_addr *addr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_ntohost, hostname, addr);
+ if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(ether_ntohost)(hostname, addr);
+ if (!res && hostname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ return res;
+}
+INTERCEPTOR(int, ether_hostton, char *hostname, __sanitizer_ether_addr *addr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_hostton, hostname, addr);
+ if (hostname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(ether_hostton)(hostname, addr);
+ if (!res && addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
+ return res;
+}
+INTERCEPTOR(int, ether_line, char *line, __sanitizer_ether_addr *addr,
+ char *hostname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_line, line, addr, hostname);
+ if (line) COMMON_INTERCEPTOR_READ_RANGE(ctx, line, REAL(strlen)(line) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(ether_line)(line, addr, hostname);
+ if (!res) {
+ if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
+ if (hostname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hostname, REAL(strlen)(hostname) + 1);
+ }
+ return res;
+}
+#define INIT_ETHER_HOST \
+ COMMON_INTERCEPT_FUNCTION(ether_ntohost); \
+ COMMON_INTERCEPT_FUNCTION(ether_hostton); \
+ COMMON_INTERCEPT_FUNCTION(ether_line);
+#else
+#define INIT_ETHER_HOST
+#endif
+
+#if SANITIZER_INTERCEPT_ETHER_R
+INTERCEPTOR(char *, ether_ntoa_r, __sanitizer_ether_addr *addr, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_ntoa_r, addr, buf);
+ if (addr) COMMON_INTERCEPTOR_READ_RANGE(ctx, addr, sizeof(*addr));
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(ether_ntoa_r)(addr, buf);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+INTERCEPTOR(__sanitizer_ether_addr *, ether_aton_r, char *buf,
+ __sanitizer_ether_addr *addr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ether_aton_r, buf, addr);
+ if (buf) COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_ether_addr *res = REAL(ether_aton_r)(buf, addr);
+ if (res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(*res));
+ return res;
+}
+#define INIT_ETHER_R \
+ COMMON_INTERCEPT_FUNCTION(ether_ntoa_r); \
+ COMMON_INTERCEPT_FUNCTION(ether_aton_r);
+#else
+#define INIT_ETHER_R
+#endif
+
+#if SANITIZER_INTERCEPT_SHMCTL
+INTERCEPTOR(int, shmctl, int shmid, int cmd, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, shmctl, shmid, cmd, buf);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(shmctl)(shmid, cmd, buf);
+ if (res >= 0) {
+ unsigned sz = 0;
+ if (cmd == shmctl_ipc_stat || cmd == shmctl_shm_stat)
+ sz = sizeof(__sanitizer_shmid_ds);
+ else if (cmd == shmctl_ipc_info)
+ sz = struct_shminfo_sz;
+ else if (cmd == shmctl_shm_info)
+ sz = struct_shm_info_sz;
+ if (sz) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sz);
+ }
+ return res;
+}
+#define INIT_SHMCTL COMMON_INTERCEPT_FUNCTION(shmctl);
+#else
+#define INIT_SHMCTL
+#endif
+
+#if SANITIZER_INTERCEPT_RANDOM_R
+INTERCEPTOR(int, random_r, void *buf, u32 *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, random_r, buf, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(random_r)(buf, result);
+ if (!res && result)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+#define INIT_RANDOM_R COMMON_INTERCEPT_FUNCTION(random_r);
+#else
+#define INIT_RANDOM_R
+#endif
+
+// FIXME: under ASan the REAL() call below may write to freed memory and corrupt
+// its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED || \
+ SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSSCHED || \
+ SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GET || \
+ SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GET
+#define INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(fn, sz) \
+ INTERCEPTOR(int, fn, void *attr, void *r) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, fn, attr, r); \
+ int res = REAL(fn)(attr, r); \
+ if (!res && r) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, r, sz); \
+ return res; \
+ }
+#define INTERCEPTOR_PTHREAD_ATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_attr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_MUTEXATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_mutexattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_rwlockattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_CONDATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_condattr_get##what, sz)
+#define INTERCEPTOR_PTHREAD_BARRIERATTR_GET(what, sz) \
+ INTERCEPTOR_PTHREAD_OBJECT_ATTR_GET(pthread_barrierattr_get##what, sz)
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET
+INTERCEPTOR_PTHREAD_ATTR_GET(detachstate, sizeof(int))
+INTERCEPTOR_PTHREAD_ATTR_GET(guardsize, sizeof(SIZE_T))
+INTERCEPTOR_PTHREAD_ATTR_GET(scope, sizeof(int))
+INTERCEPTOR_PTHREAD_ATTR_GET(stacksize, sizeof(SIZE_T))
+INTERCEPTOR(int, pthread_attr_getstack, void *attr, void **addr, SIZE_T *size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getstack, attr, addr, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_attr_getstack)(attr, addr, size);
+ if (!res) {
+ if (addr) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, addr, sizeof(*addr));
+ if (size) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, size, sizeof(*size));
+ }
+ return res;
+}
+
+// We may need to call the real pthread_attr_getstack from the run-time
+// in sanitizer_common, but we don't want to include the interception headers
+// there. So, just define this function here.
+namespace __sanitizer {
+extern "C" {
+int real_pthread_attr_getstack(void *attr, void **addr, SIZE_T *size) {
+ return REAL(pthread_attr_getstack)(attr, addr, size);
+}
+} // extern "C"
+} // namespace __sanitizer
+
+#define INIT_PTHREAD_ATTR_GET \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getdetachstate); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getguardsize); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getscope); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getstacksize); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getstack);
+#else
+#define INIT_PTHREAD_ATTR_GET
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED
+INTERCEPTOR_PTHREAD_ATTR_GET(schedparam, struct_sched_param_sz)
+INTERCEPTOR_PTHREAD_ATTR_GET(schedpolicy, sizeof(int))
+
+#define INIT_PTHREAD_ATTR_GET_SCHED \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedparam); \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getschedpolicy);
+#else
+#define INIT_PTHREAD_ATTR_GET_SCHED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED
+INTERCEPTOR_PTHREAD_ATTR_GET(inheritsched, sizeof(int))
+
+#define INIT_PTHREAD_ATTR_GETINHERITSCHED \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getinheritsched);
+#else
+#define INIT_PTHREAD_ATTR_GETINHERITSCHED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP
+INTERCEPTOR(int, pthread_attr_getaffinity_np, void *attr, SIZE_T cpusetsize,
+ void *cpuset) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_attr_getaffinity_np, attr, cpusetsize,
+ cpuset);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(pthread_attr_getaffinity_np)(attr, cpusetsize, cpuset);
+ if (!res && cpusetsize && cpuset)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cpuset, cpusetsize);
+ return res;
+}
+
+#define INIT_PTHREAD_ATTR_GETAFFINITY_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_attr_getaffinity_np);
+#else
+#define INIT_PTHREAD_ATTR_GETAFFINITY_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getpshared);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(type, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_gettype);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETTYPE
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(protocol, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprotocol);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPROTOCOL
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(prioceiling, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getprioceiling);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP
+INTERCEPTOR_PTHREAD_MUTEXATTR_GET(robust_np, sizeof(int))
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_mutexattr_getrobust_np);
+#else
+#define INIT_PTHREAD_MUTEXATTR_GETROBUST_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getpshared);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP
+INTERCEPTOR_PTHREAD_RWLOCKATTR_GET(kind_np, sizeof(int))
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP \
+ COMMON_INTERCEPT_FUNCTION(pthread_rwlockattr_getkind_np);
+#else
+#define INIT_PTHREAD_RWLOCKATTR_GETKIND_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_CONDATTR_GET(pshared, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getpshared);
+#else
+#define INIT_PTHREAD_CONDATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK
+INTERCEPTOR_PTHREAD_CONDATTR_GET(clock, sizeof(int))
+#define INIT_PTHREAD_CONDATTR_GETCLOCK \
+ COMMON_INTERCEPT_FUNCTION(pthread_condattr_getclock);
+#else
+#define INIT_PTHREAD_CONDATTR_GETCLOCK
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED
+INTERCEPTOR_PTHREAD_BARRIERATTR_GET(pshared, sizeof(int)) // !mac !android
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED \
+ COMMON_INTERCEPT_FUNCTION(pthread_barrierattr_getpshared);
+#else
+#define INIT_PTHREAD_BARRIERATTR_GETPSHARED
+#endif
+
+#if SANITIZER_INTERCEPT_TMPNAM
+INTERCEPTOR(char *, tmpnam, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tmpnam, s);
+ char *res = REAL(tmpnam)(s);
+ if (res) {
+ if (s)
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ else
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_TMPNAM COMMON_INTERCEPT_FUNCTION(tmpnam);
+#else
+#define INIT_TMPNAM
+#endif
+
+#if SANITIZER_INTERCEPT_TMPNAM_R
+INTERCEPTOR(char *, tmpnam_r, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tmpnam_r, s);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(tmpnam_r)(s);
+ if (res && s) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, s, REAL(strlen)(s) + 1);
+ return res;
+}
+#define INIT_TMPNAM_R COMMON_INTERCEPT_FUNCTION(tmpnam_r);
+#else
+#define INIT_TMPNAM_R
+#endif
+
+#if SANITIZER_INTERCEPT_TTYNAME
+INTERCEPTOR(char *, ttyname, int fd) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ttyname, fd);
+ char *res = REAL(ttyname)(fd);
+ if (res != nullptr)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_TTYNAME COMMON_INTERCEPT_FUNCTION(ttyname);
+#else
+#define INIT_TTYNAME
+#endif
+
+#if SANITIZER_INTERCEPT_TTYNAME_R
+INTERCEPTOR(int, ttyname_r, int fd, char *name, SIZE_T namesize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ttyname_r, fd, name, namesize);
+ int res = REAL(ttyname_r)(fd, name, namesize);
+ if (res == 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return res;
+}
+#define INIT_TTYNAME_R COMMON_INTERCEPT_FUNCTION(ttyname_r);
+#else
+#define INIT_TTYNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_TEMPNAM
+INTERCEPTOR(char *, tempnam, char *dir, char *pfx) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tempnam, dir, pfx);
+ if (dir) COMMON_INTERCEPTOR_READ_RANGE(ctx, dir, REAL(strlen)(dir) + 1);
+ if (pfx) COMMON_INTERCEPTOR_READ_RANGE(ctx, pfx, REAL(strlen)(pfx) + 1);
+ char *res = REAL(tempnam)(dir, pfx);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ return res;
+}
+#define INIT_TEMPNAM COMMON_INTERCEPT_FUNCTION(tempnam);
+#else
+#define INIT_TEMPNAM
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP && !SANITIZER_NETBSD
+INTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setname_np, thread, name);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);
+ COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name);
+ return REAL(pthread_setname_np)(thread, name);
+}
+#define INIT_PTHREAD_SETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_setname_np);
+#elif SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP && SANITIZER_NETBSD
+INTERCEPTOR(int, pthread_setname_np, uptr thread, const char *name, void *arg) {
+ void *ctx;
+ char newname[32]; // PTHREAD_MAX_NAMELEN_NP=32
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setname_np, thread, name, arg);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, name, 0);
+ internal_snprintf(newname, sizeof(newname), name, arg);
+ COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, newname);
+ return REAL(pthread_setname_np)(thread, name, arg);
+}
+#define INIT_PTHREAD_SETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_setname_np);
+#else
+#define INIT_PTHREAD_SETNAME_NP
+#endif
+
+#if SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP
+INTERCEPTOR(int, pthread_getname_np, uptr thread, char *name, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_getname_np, thread, name, len);
+ int res = REAL(pthread_getname_np)(thread, name, len);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, internal_strnlen(name, len) + 1);
+ return res;
+}
+#define INIT_PTHREAD_GETNAME_NP COMMON_INTERCEPT_FUNCTION(pthread_getname_np);
+#else
+#define INIT_PTHREAD_GETNAME_NP
+#endif
+
+#if SANITIZER_INTERCEPT_SINCOS
+INTERCEPTOR(void, sincos, double x, double *sin, double *cos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sincos, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(sincos)(x, sin, cos);
+ if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
+ if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
+}
+INTERCEPTOR(void, sincosf, float x, float *sin, float *cos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sincosf, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(sincosf)(x, sin, cos);
+ if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
+ if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
+}
+INTERCEPTOR(void, sincosl, long double x, long double *sin, long double *cos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sincosl, x, sin, cos);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(sincosl)(x, sin, cos);
+ if (sin) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sin, sizeof(*sin));
+ if (cos) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cos, sizeof(*cos));
+}
+#define INIT_SINCOS \
+ COMMON_INTERCEPT_FUNCTION(sincos); \
+ COMMON_INTERCEPT_FUNCTION(sincosf); \
+ COMMON_INTERCEPT_FUNCTION_LDBL(sincosl);
+#else
+#define INIT_SINCOS
+#endif
+
+#if SANITIZER_INTERCEPT_REMQUO
+INTERCEPTOR(double, remquo, double x, double y, int *quo) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, remquo, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ double res = REAL(remquo)(x, y, quo);
+ if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
+ return res;
+}
+INTERCEPTOR(float, remquof, float x, float y, int *quo) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, remquof, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ float res = REAL(remquof)(x, y, quo);
+ if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
+ return res;
+}
+#define INIT_REMQUO \
+ COMMON_INTERCEPT_FUNCTION(remquo); \
+ COMMON_INTERCEPT_FUNCTION(remquof);
+#else
+#define INIT_REMQUO
+#endif
+
+#if SANITIZER_INTERCEPT_REMQUOL
+INTERCEPTOR(long double, remquol, long double x, long double y, int *quo) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, remquol, x, y, quo);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ long double res = REAL(remquol)(x, y, quo);
+ if (quo) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, quo, sizeof(*quo));
+ return res;
+}
+#define INIT_REMQUOL \
+ COMMON_INTERCEPT_FUNCTION_LDBL(remquol);
+#else
+#define INIT_REMQUOL
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMA
+extern int signgam;
+INTERCEPTOR(double, lgamma, double x) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgamma, x);
+ double res = REAL(lgamma)(x);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam));
+ return res;
+}
+INTERCEPTOR(float, lgammaf, float x) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgammaf, x);
+ float res = REAL(lgammaf)(x);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam));
+ return res;
+}
+#define INIT_LGAMMA \
+ COMMON_INTERCEPT_FUNCTION(lgamma); \
+ COMMON_INTERCEPT_FUNCTION(lgammaf);
+#else
+#define INIT_LGAMMA
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMAL
+INTERCEPTOR(long double, lgammal, long double x) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgammal, x);
+ long double res = REAL(lgammal)(x);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, &signgam, sizeof(signgam));
+ return res;
+}
+#define INIT_LGAMMAL \
+ COMMON_INTERCEPT_FUNCTION_LDBL(lgammal);
+#else
+#define INIT_LGAMMAL
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMA_R
+INTERCEPTOR(double, lgamma_r, double x, int *signp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgamma_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ double res = REAL(lgamma_r)(x, signp);
+ if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
+ return res;
+}
+INTERCEPTOR(float, lgammaf_r, float x, int *signp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgammaf_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ float res = REAL(lgammaf_r)(x, signp);
+ if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
+ return res;
+}
+#define INIT_LGAMMA_R \
+ COMMON_INTERCEPT_FUNCTION(lgamma_r); \
+ COMMON_INTERCEPT_FUNCTION(lgammaf_r);
+#else
+#define INIT_LGAMMA_R
+#endif
+
+#if SANITIZER_INTERCEPT_LGAMMAL_R
+INTERCEPTOR(long double, lgammal_r, long double x, int *signp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgammal_r, x, signp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ long double res = REAL(lgammal_r)(x, signp);
+ if (signp) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, signp, sizeof(*signp));
+ return res;
+}
+#define INIT_LGAMMAL_R COMMON_INTERCEPT_FUNCTION_LDBL(lgammal_r);
+#else
+#define INIT_LGAMMAL_R
+#endif
+
+#if SANITIZER_INTERCEPT_DRAND48_R
+INTERCEPTOR(int, drand48_r, void *buffer, double *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, drand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(drand48_r)(buffer, result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+INTERCEPTOR(int, lrand48_r, void *buffer, long *result) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lrand48_r, buffer, result);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(lrand48_r)(buffer, result);
+ if (result) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(*result));
+ return res;
+}
+#define INIT_DRAND48_R \
+ COMMON_INTERCEPT_FUNCTION(drand48_r); \
+ COMMON_INTERCEPT_FUNCTION(lrand48_r);
+#else
+#define INIT_DRAND48_R
+#endif
+
+#if SANITIZER_INTERCEPT_RAND_R
+INTERCEPTOR(int, rand_r, unsigned *seedp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, rand_r, seedp);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, seedp, sizeof(*seedp));
+ return REAL(rand_r)(seedp);
+}
+#define INIT_RAND_R COMMON_INTERCEPT_FUNCTION(rand_r);
+#else
+#define INIT_RAND_R
+#endif
+
+#if SANITIZER_INTERCEPT_GETLINE
+INTERCEPTOR(SSIZE_T, getline, char **lineptr, SIZE_T *n, void *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getline, lineptr, n, stream);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(getline)(lineptr, n, stream);
+ if (res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1);
+ }
+ return res;
+}
+
+// FIXME: under ASan the call below may write to freed memory and corrupt its
+// metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define GETDELIM_INTERCEPTOR_IMPL(vname) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, vname, lineptr, n, delim, stream); \
+ SSIZE_T res = REAL(vname)(lineptr, n, delim, stream); \
+ if (res > 0) { \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineptr, sizeof(*lineptr)); \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n)); \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *lineptr, res + 1); \
+ } \
+ return res; \
+ }
+
+INTERCEPTOR(SSIZE_T, __getdelim, char **lineptr, SIZE_T *n, int delim,
+ void *stream)
+GETDELIM_INTERCEPTOR_IMPL(__getdelim)
+
+// There's no __getdelim() on FreeBSD so we supply the getdelim() interceptor
+// with its own body.
+INTERCEPTOR(SSIZE_T, getdelim, char **lineptr, SIZE_T *n, int delim,
+ void *stream)
+GETDELIM_INTERCEPTOR_IMPL(getdelim)
+
+#define INIT_GETLINE \
+ COMMON_INTERCEPT_FUNCTION(getline); \
+ COMMON_INTERCEPT_FUNCTION(__getdelim); \
+ COMMON_INTERCEPT_FUNCTION(getdelim);
+#else
+#define INIT_GETLINE
+#endif
+
+#if SANITIZER_INTERCEPT_ICONV
+INTERCEPTOR(SIZE_T, iconv, void *cd, char **inbuf, SIZE_T *inbytesleft,
+ char **outbuf, SIZE_T *outbytesleft) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, iconv, cd, inbuf, inbytesleft, outbuf,
+ outbytesleft);
+ if (inbytesleft)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, inbytesleft, sizeof(*inbytesleft));
+ if (inbuf && inbytesleft)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *inbuf, *inbytesleft);
+ if (outbytesleft)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, outbytesleft, sizeof(*outbytesleft));
+ void *outbuf_orig = outbuf ? *outbuf : nullptr;
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SIZE_T res = REAL(iconv)(cd, inbuf, inbytesleft, outbuf, outbytesleft);
+ if (outbuf && *outbuf > outbuf_orig) {
+ SIZE_T sz = (char *)*outbuf - (char *)outbuf_orig;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, outbuf_orig, sz);
+ }
+ return res;
+}
+#define INIT_ICONV COMMON_INTERCEPT_FUNCTION(iconv);
+#else
+#define INIT_ICONV
+#endif
+
+#if SANITIZER_INTERCEPT_TIMES
+INTERCEPTOR(__sanitizer_clock_t, times, void *tms) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, times, tms);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_clock_t res = REAL(times)(tms);
+ if (res != (__sanitizer_clock_t)-1 && tms)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tms, struct_tms_sz);
+ return res;
+}
+#define INIT_TIMES COMMON_INTERCEPT_FUNCTION(times);
+#else
+#define INIT_TIMES
+#endif
+
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#if !SANITIZER_S390
+#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_addr)
+// If you see any crashes around this functions, there are 2 known issues with
+// it: 1. __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// 2. It can be called recursively if sanitizer code uses __tls_get_addr
+// to access thread local variables (it should not happen normally,
+// because sanitizers use initial-exec tls model).
+INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg);
+ void *res = REAL(__tls_get_addr)(arg);
+ uptr tls_begin, tls_end;
+ COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, tls_begin, tls_end);
+ if (dtv) {
+ // New DTLS block has been allocated.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);
+ }
+ return res;
+}
+#if SANITIZER_PPC
+// On PowerPC, we also need to intercept __tls_get_addr_opt, which has
+// mostly the same semantics as __tls_get_addr, but its presence enables
+// some optimizations in linker (which are safe to ignore here).
+extern "C" __attribute__((alias("__interceptor___tls_get_addr"),
+ visibility("default")))
+void *__tls_get_addr_opt(void *arg);
+#endif
+#else // SANITIZER_S390
+// On s390, we have to intercept two functions here:
+// - __tls_get_addr_internal, which is a glibc-internal function that is like
+// the usual __tls_get_addr, but returns a TP-relative offset instead of
+// a proper pointer. It is used by dlsym for TLS symbols.
+// - __tls_get_offset, which is like the above, but also takes a GOT-relative
+// descriptor offset as an argument instead of a pointer. GOT address
+// is passed in r12, so it's necessary to write it in assembly. This is
+// the function used by the compiler.
+extern "C" uptr __tls_get_offset_wrapper(void *arg, uptr (*fn)(void *arg));
+#define INIT_TLS_GET_ADDR COMMON_INTERCEPT_FUNCTION(__tls_get_offset)
+DEFINE_REAL(uptr, __tls_get_offset, void *arg)
+extern "C" uptr __tls_get_offset(void *arg);
+extern "C" uptr __interceptor___tls_get_offset(void *arg);
+INTERCEPTOR(uptr, __tls_get_addr_internal, void *arg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr_internal, arg);
+ uptr res = __tls_get_offset_wrapper(arg, REAL(__tls_get_offset));
+ uptr tp = reinterpret_cast<uptr>(__builtin_thread_pointer());
+ void *ptr = reinterpret_cast<void *>(res + tp);
+ uptr tls_begin, tls_end;
+ COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end);
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, ptr, tls_begin, tls_end);
+ if (dtv) {
+ // New DTLS block has been allocated.
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size);
+ }
+ return res;
+}
+// We need a hidden symbol aliasing the above, so that we can jump
+// directly to it from the assembly below.
+extern "C" __attribute__((alias("__interceptor___tls_get_addr_internal"),
+ visibility("hidden")))
+uptr __tls_get_addr_hidden(void *arg);
+// Now carefully intercept __tls_get_offset.
+asm(
+ ".text\n"
+// The __intercept_ version has to exist, so that gen_dynamic_list.py
+// exports our symbol.
+ ".weak __tls_get_offset\n"
+ ".type __tls_get_offset, @function\n"
+ "__tls_get_offset:\n"
+ ".global __interceptor___tls_get_offset\n"
+ ".type __interceptor___tls_get_offset, @function\n"
+ "__interceptor___tls_get_offset:\n"
+#ifdef __s390x__
+ "la %r2, 0(%r2,%r12)\n"
+ "jg __tls_get_addr_hidden\n"
+#else
+ "basr %r3,0\n"
+ "0: la %r2,0(%r2,%r12)\n"
+ "l %r4,1f-0b(%r3)\n"
+ "b 0(%r4,%r3)\n"
+ "1: .long __tls_get_addr_hidden - 0b\n"
+#endif
+ ".size __interceptor___tls_get_offset, .-__interceptor___tls_get_offset\n"
+// Assembly wrapper to call REAL(__tls_get_offset)(arg)
+ ".type __tls_get_offset_wrapper, @function\n"
+ "__tls_get_offset_wrapper:\n"
+#ifdef __s390x__
+ "sgr %r2,%r12\n"
+#else
+ "sr %r2,%r12\n"
+#endif
+ "br %r3\n"
+ ".size __tls_get_offset_wrapper, .-__tls_get_offset_wrapper\n"
+);
+#endif // SANITIZER_S390
+#else
+#define INIT_TLS_GET_ADDR
+#endif
+
+#if SANITIZER_INTERCEPT_LISTXATTR
+INTERCEPTOR(SSIZE_T, listxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, listxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(listxattr)(path, list, size);
+ // Here and below, size == 0 is a special case where nothing is written to the
+ // buffer, and res contains the desired buffer size.
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, llistxattr, const char *path, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, llistxattr, path, list, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(llistxattr)(path, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, flistxattr, int fd, char *list, SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, flistxattr, fd, list, size);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(flistxattr)(fd, list, size);
+ if (size && res > 0 && list) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, list, res);
+ return res;
+}
+#define INIT_LISTXATTR \
+ COMMON_INTERCEPT_FUNCTION(listxattr); \
+ COMMON_INTERCEPT_FUNCTION(llistxattr); \
+ COMMON_INTERCEPT_FUNCTION(flistxattr);
+#else
+#define INIT_LISTXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETXATTR
+INTERCEPTOR(SSIZE_T, getxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(getxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, lgetxattr, const char *path, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lgetxattr, path, name, value, size);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(lgetxattr)(path, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, fgetxattr, int fd, const char *name, char *value,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetxattr, fd, name, value, size);
+ if (name) COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ SSIZE_T res = REAL(fgetxattr)(fd, name, value, size);
+ if (size && res > 0 && value) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, res);
+ return res;
+}
+#define INIT_GETXATTR \
+ COMMON_INTERCEPT_FUNCTION(getxattr); \
+ COMMON_INTERCEPT_FUNCTION(lgetxattr); \
+ COMMON_INTERCEPT_FUNCTION(fgetxattr);
+#else
+#define INIT_GETXATTR
+#endif
+
+#if SANITIZER_INTERCEPT_GETRESID
+INTERCEPTOR(int, getresuid, void *ruid, void *euid, void *suid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresuid, ruid, euid, suid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getresuid)(ruid, euid, suid);
+ if (res >= 0) {
+ if (ruid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ruid, uid_t_sz);
+ if (euid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, euid, uid_t_sz);
+ if (suid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, suid, uid_t_sz);
+ }
+ return res;
+}
+INTERCEPTOR(int, getresgid, void *rgid, void *egid, void *sgid) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getresgid, rgid, egid, sgid);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getresgid)(rgid, egid, sgid);
+ if (res >= 0) {
+ if (rgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rgid, gid_t_sz);
+ if (egid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, egid, gid_t_sz);
+ if (sgid) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sgid, gid_t_sz);
+ }
+ return res;
+}
+#define INIT_GETRESID \
+ COMMON_INTERCEPT_FUNCTION(getresuid); \
+ COMMON_INTERCEPT_FUNCTION(getresgid);
+#else
+#define INIT_GETRESID
+#endif
+
+#if SANITIZER_INTERCEPT_GETIFADDRS
+// As long as getifaddrs()/freeifaddrs() use calloc()/free(), we don't need to
+// intercept freeifaddrs(). If that ceases to be the case, we might need to
+// intercept it to poison the memory again.
+INTERCEPTOR(int, getifaddrs, __sanitizer_ifaddrs **ifap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getifaddrs, ifap);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(getifaddrs)(ifap);
+ if (res == 0 && ifap) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifap, sizeof(void *));
+ __sanitizer_ifaddrs *p = *ifap;
+ while (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(__sanitizer_ifaddrs));
+ if (p->ifa_name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_name,
+ REAL(strlen)(p->ifa_name) + 1);
+ if (p->ifa_addr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_addr, struct_sockaddr_sz);
+ if (p->ifa_netmask)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_netmask, struct_sockaddr_sz);
+ // On Linux this is a union, but the other member also points to a
+ // struct sockaddr, so the following is sufficient.
+ if (p->ifa_dstaddr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->ifa_dstaddr, struct_sockaddr_sz);
+ // FIXME(smatveev): Unpoison p->ifa_data as well.
+ p = p->ifa_next;
+ }
+ }
+ return res;
+}
+#define INIT_GETIFADDRS \
+ COMMON_INTERCEPT_FUNCTION(getifaddrs);
+#else
+#define INIT_GETIFADDRS
+#endif
+
+#if SANITIZER_INTERCEPT_IF_INDEXTONAME
+INTERCEPTOR(char *, if_indextoname, unsigned int ifindex, char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_indextoname, ifindex, ifname);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ char *res = REAL(if_indextoname)(ifindex, ifname);
+ if (res && ifname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return res;
+}
+INTERCEPTOR(unsigned int, if_nametoindex, const char* ifname) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, if_nametoindex, ifname);
+ if (ifname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ifname, REAL(strlen)(ifname) + 1);
+ return REAL(if_nametoindex)(ifname);
+}
+#define INIT_IF_INDEXTONAME \
+ COMMON_INTERCEPT_FUNCTION(if_indextoname); \
+ COMMON_INTERCEPT_FUNCTION(if_nametoindex);
+#else
+#define INIT_IF_INDEXTONAME
+#endif
+
+#if SANITIZER_INTERCEPT_CAPGET
+INTERCEPTOR(int, capget, void *hdrp, void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capget, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(capget)(hdrp, datap);
+ if (res == 0 && datap)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ // We can also return -1 and write to hdrp->version if the version passed in
+ // hdrp->version is unsupported. But that's not a trivial condition to check,
+ // and anyway COMMON_INTERCEPTOR_READ_RANGE protects us to some extent.
+ return res;
+}
+INTERCEPTOR(int, capset, void *hdrp, const void *datap) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, capset, hdrp, datap);
+ if (hdrp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, hdrp, __user_cap_header_struct_sz);
+ if (datap)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, datap, __user_cap_data_struct_sz);
+ return REAL(capset)(hdrp, datap);
+}
+#define INIT_CAPGET \
+ COMMON_INTERCEPT_FUNCTION(capget); \
+ COMMON_INTERCEPT_FUNCTION(capset);
+#else
+#define INIT_CAPGET
+#endif
+
+#if SANITIZER_INTERCEPT_AEABI_MEM
+INTERCEPTOR(void *, __aeabi_memmove, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memmove8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMMOVE_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy4, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memcpy8, void *to, const void *from, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMCPY_IMPL(ctx, to, from, size);
+}
+
+// Note the argument order.
+INTERCEPTOR(void *, __aeabi_memset, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset4, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memset8, void *block, uptr size, int c) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, c, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr4, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+INTERCEPTOR(void *, __aeabi_memclr8, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+
+#define INIT_AEABI_MEM \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memmove8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memcpy8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memset8); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr4); \
+ COMMON_INTERCEPT_FUNCTION(__aeabi_memclr8);
+#else
+#define INIT_AEABI_MEM
+#endif // SANITIZER_INTERCEPT_AEABI_MEM
+
+#if SANITIZER_INTERCEPT___BZERO
+INTERCEPTOR(void *, __bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT___BZERO COMMON_INTERCEPT_FUNCTION(__bzero);
+#else
+#define INIT___BZERO
+#endif // SANITIZER_INTERCEPT___BZERO
+
+#if SANITIZER_INTERCEPT_BZERO
+INTERCEPTOR(void *, bzero, void *block, uptr size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_MEMSET_IMPL(ctx, block, 0, size);
+}
+#define INIT_BZERO COMMON_INTERCEPT_FUNCTION(bzero);
+#else
+#define INIT_BZERO
+#endif // SANITIZER_INTERCEPT_BZERO
+
+#if SANITIZER_INTERCEPT_FTIME
+INTERCEPTOR(int, ftime, __sanitizer_timeb *tp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ftime, tp);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(ftime)(tp);
+ if (tp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, tp, sizeof(*tp));
+ return res;
+}
+#define INIT_FTIME COMMON_INTERCEPT_FUNCTION(ftime);
+#else
+#define INIT_FTIME
+#endif // SANITIZER_INTERCEPT_FTIME
+
+#if SANITIZER_INTERCEPT_XDR
+INTERCEPTOR(void, xdrmem_create, __sanitizer_XDR *xdrs, uptr addr,
+ unsigned size, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrmem_create, xdrs, addr, size, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(xdrmem_create)(xdrs, addr, size, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+ if (op == __sanitizer_XDR_ENCODE) {
+ // It's not obvious how much data individual xdr_ routines write.
+ // Simply unpoison the entire target buffer in advance.
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, (void *)addr, size);
+ }
+}
+
+INTERCEPTOR(void, xdrstdio_create, __sanitizer_XDR *xdrs, void *file, int op) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdrstdio_create, xdrs, file, op);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ REAL(xdrstdio_create)(xdrs, file, op);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, xdrs, sizeof(*xdrs));
+}
+
+// FIXME: under ASan the call below may write to freed memory and corrupt
+// its metadata. See
+// https://github.com/google/sanitizers/issues/321.
+#define XDR_INTERCEPTOR(F, T) \
+ INTERCEPTOR(int, F, __sanitizer_XDR *xdrs, T *p) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, F, xdrs, p); \
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p)); \
+ int res = REAL(F)(xdrs, p); \
+ if (res && p && xdrs->x_op == __sanitizer_XDR_DECODE) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p)); \
+ return res; \
+ }
+
+XDR_INTERCEPTOR(xdr_short, short)
+XDR_INTERCEPTOR(xdr_u_short, unsigned short)
+XDR_INTERCEPTOR(xdr_int, int)
+XDR_INTERCEPTOR(xdr_u_int, unsigned)
+XDR_INTERCEPTOR(xdr_long, long)
+XDR_INTERCEPTOR(xdr_u_long, unsigned long)
+XDR_INTERCEPTOR(xdr_hyper, long long)
+XDR_INTERCEPTOR(xdr_u_hyper, unsigned long long)
+XDR_INTERCEPTOR(xdr_longlong_t, long long)
+XDR_INTERCEPTOR(xdr_u_longlong_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_int8_t, u8)
+XDR_INTERCEPTOR(xdr_uint8_t, u8)
+XDR_INTERCEPTOR(xdr_int16_t, u16)
+XDR_INTERCEPTOR(xdr_uint16_t, u16)
+XDR_INTERCEPTOR(xdr_int32_t, u32)
+XDR_INTERCEPTOR(xdr_uint32_t, u32)
+XDR_INTERCEPTOR(xdr_int64_t, u64)
+XDR_INTERCEPTOR(xdr_uint64_t, u64)
+XDR_INTERCEPTOR(xdr_quad_t, long long)
+XDR_INTERCEPTOR(xdr_u_quad_t, unsigned long long)
+XDR_INTERCEPTOR(xdr_bool, bool)
+XDR_INTERCEPTOR(xdr_enum, int)
+XDR_INTERCEPTOR(xdr_char, char)
+XDR_INTERCEPTOR(xdr_u_char, unsigned char)
+XDR_INTERCEPTOR(xdr_float, float)
+XDR_INTERCEPTOR(xdr_double, double)
+
+// FIXME: intercept xdr_array, opaque, union, vector, reference, pointer,
+// wrapstring, sizeof
+
+INTERCEPTOR(int, xdr_bytes, __sanitizer_XDR *xdrs, char **p, unsigned *sizep,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_bytes, xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sizep, sizeof(*sizep));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, *sizep);
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(xdr_bytes)(xdrs, p, sizep, maxsize);
+ if (p && sizep && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizep, sizeof(*sizep));
+ if (res && *p && *sizep) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, *sizep);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, xdr_string, __sanitizer_XDR *xdrs, char **p,
+ unsigned maxsize) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, xdr_string, xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_ENCODE) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, p, sizeof(*p));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ int res = REAL(xdr_string)(xdrs, p, maxsize);
+ if (p && xdrs->x_op == __sanitizer_XDR_DECODE) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+ if (res && *p)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *p, REAL(strlen)(*p) + 1);
+ }
+ return res;
+}
+
+#define INIT_XDR \
+ COMMON_INTERCEPT_FUNCTION(xdrmem_create); \
+ COMMON_INTERCEPT_FUNCTION(xdrstdio_create); \
+ COMMON_INTERCEPT_FUNCTION(xdr_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_short); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_int); \
+ COMMON_INTERCEPT_FUNCTION(xdr_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_long); \
+ COMMON_INTERCEPT_FUNCTION(xdr_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_hyper); \
+ COMMON_INTERCEPT_FUNCTION(xdr_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_longlong_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint8_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint16_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint32_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_int64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_uint64_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_quad_t); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bool); \
+ COMMON_INTERCEPT_FUNCTION(xdr_enum); \
+ COMMON_INTERCEPT_FUNCTION(xdr_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_u_char); \
+ COMMON_INTERCEPT_FUNCTION(xdr_float); \
+ COMMON_INTERCEPT_FUNCTION(xdr_double); \
+ COMMON_INTERCEPT_FUNCTION(xdr_bytes); \
+ COMMON_INTERCEPT_FUNCTION(xdr_string);
+#else
+#define INIT_XDR
+#endif // SANITIZER_INTERCEPT_XDR
+
+#if SANITIZER_INTERCEPT_TSEARCH
+INTERCEPTOR(void *, tsearch, void *key, void **rootp,
+ int (*compar)(const void *, const void *)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, tsearch, key, rootp, compar);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ void *res = REAL(tsearch)(key, rootp, compar);
+ if (res && *(void **)res == key)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, sizeof(void *));
+ return res;
+}
+#define INIT_TSEARCH COMMON_INTERCEPT_FUNCTION(tsearch);
+#else
+#define INIT_TSEARCH
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS || SANITIZER_INTERCEPT_FOPEN || \
+ SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+void unpoison_file(__sanitizer_FILE *fp) {
+#if SANITIZER_HAS_STRUCT_FILE
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp, sizeof(*fp));
+#if SANITIZER_NETBSD
+ if (fp->_bf._base && fp->_bf._size > 0)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_bf._base,
+ fp->_bf._size);
+#else
+ if (fp->_IO_read_base && fp->_IO_read_base < fp->_IO_read_end)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(fp->_IO_read_base,
+ fp->_IO_read_end - fp->_IO_read_base);
+#endif
+#endif // SANITIZER_HAS_STRUCT_FILE
+}
+#endif
+
+#if SANITIZER_INTERCEPT_LIBIO_INTERNALS
+// These guys are called when a .c source is built with -O2.
+INTERCEPTOR(int, __uflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __uflow, fp);
+ int res = REAL(__uflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __underflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __underflow, fp);
+ int res = REAL(__underflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __overflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __overflow, fp, ch);
+ int res = REAL(__overflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wuflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wuflow, fp);
+ int res = REAL(__wuflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __wunderflow, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __wunderflow, fp);
+ int res = REAL(__wunderflow)(fp);
+ unpoison_file(fp);
+ return res;
+}
+INTERCEPTOR(int, __woverflow, __sanitizer_FILE *fp, int ch) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __woverflow, fp, ch);
+ int res = REAL(__woverflow)(fp, ch);
+ unpoison_file(fp);
+ return res;
+}
+#define INIT_LIBIO_INTERNALS \
+ COMMON_INTERCEPT_FUNCTION(__uflow); \
+ COMMON_INTERCEPT_FUNCTION(__underflow); \
+ COMMON_INTERCEPT_FUNCTION(__overflow); \
+ COMMON_INTERCEPT_FUNCTION(__wuflow); \
+ COMMON_INTERCEPT_FUNCTION(__wunderflow); \
+ COMMON_INTERCEPT_FUNCTION(__woverflow);
+#else
+#define INIT_LIBIO_INTERNALS
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN
+INTERCEPTOR(__sanitizer_FILE *, fopen, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen, path, mode);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fdopen, int fd, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fdopen, fd, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fdopen)(fd, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen, path, mode, fp);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN \
+ COMMON_INTERCEPT_FUNCTION(fopen); \
+ COMMON_INTERCEPT_FUNCTION(fdopen); \
+ COMMON_INTERCEPT_FUNCTION(freopen);
+#else
+#define INIT_FOPEN
+#endif
+
+#if SANITIZER_INTERCEPT_FOPEN64
+INTERCEPTOR(__sanitizer_FILE *, fopen64, const char *path, const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopen64, path, mode);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ __sanitizer_FILE *res = REAL(fopen64)(path, mode);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, freopen64, const char *path, const char *mode,
+ __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, freopen64, path, mode, fp);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, mode, REAL(strlen)(mode) + 1);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ __sanitizer_FILE *res = REAL(freopen64)(path, mode, fp);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, path);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_FOPEN64 \
+ COMMON_INTERCEPT_FUNCTION(fopen64); \
+ COMMON_INTERCEPT_FUNCTION(freopen64);
+#else
+#define INIT_FOPEN64
+#endif
+
+#if SANITIZER_INTERCEPT_OPEN_MEMSTREAM
+INTERCEPTOR(__sanitizer_FILE *, open_memstream, char **ptr, SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_memstream, ptr, sizeloc);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_FILE *res = REAL(open_memstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, open_wmemstream, wchar_t **ptr,
+ SIZE_T *sizeloc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_wmemstream, ptr, sizeloc);
+ __sanitizer_FILE *res = REAL(open_wmemstream)(ptr, sizeloc);
+ if (res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, sizeof(*ptr));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sizeloc, sizeof(*sizeloc));
+ unpoison_file(res);
+ FileMetadata file = {(char **)ptr, sizeloc};
+ SetInterceptorMetadata(res, file);
+ }
+ return res;
+}
+INTERCEPTOR(__sanitizer_FILE *, fmemopen, void *buf, SIZE_T size,
+ const char *mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fmemopen, buf, size, mode);
+ // FIXME: under ASan the call below may write to freed memory and corrupt
+ // its metadata. See
+ // https://github.com/google/sanitizers/issues/321.
+ __sanitizer_FILE *res = REAL(fmemopen)(buf, size, mode);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_OPEN_MEMSTREAM \
+ COMMON_INTERCEPT_FUNCTION(open_memstream); \
+ COMMON_INTERCEPT_FUNCTION(open_wmemstream); \
+ COMMON_INTERCEPT_FUNCTION(fmemopen);
+#else
+#define INIT_OPEN_MEMSTREAM
+#endif
+
+#if SANITIZER_INTERCEPT_OBSTACK
+static void initialize_obstack(__sanitizer_obstack *obstack) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack, sizeof(*obstack));
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(obstack->chunk,
+ sizeof(*obstack->chunk));
+}
+
+INTERCEPTOR(int, _obstack_begin_1, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr arg, uptr sz),
+ void (*free_fn)(uptr arg, void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin_1, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin_1)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(int, _obstack_begin, __sanitizer_obstack *obstack, int sz,
+ int align, void *(*alloc_fn)(uptr sz), void (*free_fn)(void *p)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_begin, obstack, sz, align, alloc_fn,
+ free_fn);
+ int res = REAL(_obstack_begin)(obstack, sz, align, alloc_fn, free_fn);
+ if (res) initialize_obstack(obstack);
+ return res;
+}
+INTERCEPTOR(void, _obstack_newchunk, __sanitizer_obstack *obstack, int length) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, _obstack_newchunk, obstack, length);
+ REAL(_obstack_newchunk)(obstack, length);
+ if (obstack->chunk)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(
+ obstack->chunk, obstack->next_free - (char *)obstack->chunk);
+}
+#define INIT_OBSTACK \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin_1); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_begin); \
+ COMMON_INTERCEPT_FUNCTION(_obstack_newchunk);
+#else
+#define INIT_OBSTACK
+#endif
+
+#if SANITIZER_INTERCEPT_FFLUSH
+INTERCEPTOR(int, fflush, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fflush, fp);
+ int res = REAL(fflush)(fp);
+ // FIXME: handle fp == NULL
+ if (fp) {
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ if (m) COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ }
+ return res;
+}
+#define INIT_FFLUSH COMMON_INTERCEPT_FUNCTION(fflush);
+#else
+#define INIT_FFLUSH
+#endif
+
+#if SANITIZER_INTERCEPT_FCLOSE
+INTERCEPTOR(int, fclose, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fclose, fp);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ int res = REAL(fclose)(fp);
+ if (m) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ DeleteInterceptorMetadata(fp);
+ }
+ return res;
+}
+#define INIT_FCLOSE COMMON_INTERCEPT_FUNCTION(fclose);
+#else
+#define INIT_FCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlopen, filename, flag);
+ if (filename) COMMON_INTERCEPTOR_READ_STRING(ctx, filename, 0);
+ COMMON_INTERCEPTOR_ON_DLOPEN(filename, flag);
+ void *res = REAL(dlopen)(filename, flag);
+ Symbolizer::GetOrInit()->InvalidateModuleList();
+ COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, res);
+ return res;
+}
+
+INTERCEPTOR(int, dlclose, void *handle) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, dlclose, handle);
+ int res = REAL(dlclose)(handle);
+ Symbolizer::GetOrInit()->InvalidateModuleList();
+ COMMON_INTERCEPTOR_LIBRARY_UNLOADED();
+ return res;
+}
+#define INIT_DLOPEN_DLCLOSE \
+ COMMON_INTERCEPT_FUNCTION(dlopen); \
+ COMMON_INTERCEPT_FUNCTION(dlclose);
+#else
+#define INIT_DLOPEN_DLCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_GETPASS
+INTERCEPTOR(char *, getpass, const char *prompt) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getpass, prompt);
+ if (prompt)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, prompt, REAL(strlen)(prompt)+1);
+ char *res = REAL(getpass)(prompt);
+ if (res) COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res)+1);
+ return res;
+}
+
+#define INIT_GETPASS COMMON_INTERCEPT_FUNCTION(getpass);
+#else
+#define INIT_GETPASS
+#endif
+
+#if SANITIZER_INTERCEPT_TIMERFD
+INTERCEPTOR(int, timerfd_settime, int fd, int flags, void *new_value,
+ void *old_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_settime, fd, flags, new_value,
+ old_value);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, new_value, struct_itimerspec_sz);
+ int res = REAL(timerfd_settime)(fd, flags, new_value, old_value);
+ if (res != -1 && old_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, old_value, struct_itimerspec_sz);
+ return res;
+}
+
+INTERCEPTOR(int, timerfd_gettime, int fd, void *curr_value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, timerfd_gettime, fd, curr_value);
+ int res = REAL(timerfd_gettime)(fd, curr_value);
+ if (res != -1 && curr_value)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, curr_value, struct_itimerspec_sz);
+ return res;
+}
+#define INIT_TIMERFD \
+ COMMON_INTERCEPT_FUNCTION(timerfd_settime); \
+ COMMON_INTERCEPT_FUNCTION(timerfd_gettime);
+#else
+#define INIT_TIMERFD
+#endif
+
+#if SANITIZER_INTERCEPT_MLOCKX
+// Linux kernel has a bug that leads to kernel deadlock if a process
+// maps TBs of memory and then calls mlock().
+static void MlockIsUnsupported() {
+ static atomic_uint8_t printed;
+ if (atomic_exchange(&printed, 1, memory_order_relaxed))
+ return;
+ VPrintf(1, "%s ignores mlock/mlockall/munlock/munlockall\n",
+ SanitizerToolName);
+}
+
+INTERCEPTOR(int, mlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlock, const void *addr, uptr len) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, mlockall, int flags) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+INTERCEPTOR(int, munlockall, void) {
+ MlockIsUnsupported();
+ return 0;
+}
+
+#define INIT_MLOCKX \
+ COMMON_INTERCEPT_FUNCTION(mlock); \
+ COMMON_INTERCEPT_FUNCTION(munlock); \
+ COMMON_INTERCEPT_FUNCTION(mlockall); \
+ COMMON_INTERCEPT_FUNCTION(munlockall);
+
+#else
+#define INIT_MLOCKX
+#endif // SANITIZER_INTERCEPT_MLOCKX
+
+#if SANITIZER_INTERCEPT_FOPENCOOKIE
+struct WrappedCookie {
+ void *real_cookie;
+ __sanitizer_cookie_io_functions_t real_io_funcs;
+};
+
+static uptr wrapped_read(void *cookie, char *buf, uptr size) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_read real_read = wrapped_cookie->real_io_funcs.read;
+ return real_read ? real_read(wrapped_cookie->real_cookie, buf, size) : 0;
+}
+
+static uptr wrapped_write(void *cookie, const char *buf, uptr size) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_write real_write = wrapped_cookie->real_io_funcs.write;
+ return real_write ? real_write(wrapped_cookie->real_cookie, buf, size) : size;
+}
+
+static int wrapped_seek(void *cookie, u64 *offset, int whence) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(offset, sizeof(*offset));
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_seek real_seek = wrapped_cookie->real_io_funcs.seek;
+ return real_seek ? real_seek(wrapped_cookie->real_cookie, offset, whence)
+ : -1;
+}
+
+static int wrapped_close(void *cookie) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ WrappedCookie *wrapped_cookie = (WrappedCookie *)cookie;
+ __sanitizer_cookie_io_close real_close = wrapped_cookie->real_io_funcs.close;
+ int res = real_close ? real_close(wrapped_cookie->real_cookie) : 0;
+ InternalFree(wrapped_cookie);
+ return res;
+}
+
+INTERCEPTOR(__sanitizer_FILE *, fopencookie, void *cookie, const char *mode,
+ __sanitizer_cookie_io_functions_t io_funcs) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fopencookie, cookie, mode, io_funcs);
+ WrappedCookie *wrapped_cookie =
+ (WrappedCookie *)InternalAlloc(sizeof(WrappedCookie));
+ wrapped_cookie->real_cookie = cookie;
+ wrapped_cookie->real_io_funcs = io_funcs;
+ __sanitizer_FILE *res =
+ REAL(fopencookie)(wrapped_cookie, mode, {wrapped_read, wrapped_write,
+ wrapped_seek, wrapped_close});
+ return res;
+}
+
+#define INIT_FOPENCOOKIE COMMON_INTERCEPT_FUNCTION(fopencookie);
+#else
+#define INIT_FOPENCOOKIE
+#endif // SANITIZER_INTERCEPT_FOPENCOOKIE
+
+#if SANITIZER_INTERCEPT_SEM
+INTERCEPTOR(int, sem_init, __sanitizer_sem_t *s, int pshared, unsigned value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_init, s, pshared, value);
+ // Workaround a bug in glibc's "old" semaphore implementation by
+ // zero-initializing the sem_t contents. This has to be done here because
+ // interceptors bind to the lowest symbols version by default, hitting the
+ // buggy code path while the non-sanitized build of the same code works fine.
+ REAL(memset)(s, 0, sizeof(*s));
+ int res = REAL(sem_init)(s, pshared, value);
+ return res;
+}
+
+INTERCEPTOR(int, sem_destroy, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_destroy, s);
+ int res = REAL(sem_destroy)(s);
+ return res;
+}
+
+INTERCEPTOR(int, sem_wait, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_wait, s);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_wait)(s);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sem_trywait, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_trywait, s);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_trywait)(s);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sem_timedwait, __sanitizer_sem_t *s, void *abstime) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_timedwait, s, abstime);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, abstime, struct_timespec_sz);
+ int res = COMMON_INTERCEPTOR_BLOCK_REAL(sem_timedwait)(s, abstime);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sem_post, __sanitizer_sem_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_post, s);
+ COMMON_INTERCEPTOR_RELEASE(ctx, (uptr)s);
+ int res = REAL(sem_post)(s);
+ return res;
+}
+
+INTERCEPTOR(int, sem_getvalue, __sanitizer_sem_t *s, int *sval) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sem_getvalue, s, sval);
+ int res = REAL(sem_getvalue)(s, sval);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_ACQUIRE(ctx, (uptr)s);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sval, sizeof(*sval));
+ }
+ return res;
+}
+#define INIT_SEM \
+ COMMON_INTERCEPT_FUNCTION(sem_init); \
+ COMMON_INTERCEPT_FUNCTION(sem_destroy); \
+ COMMON_INTERCEPT_FUNCTION(sem_wait); \
+ COMMON_INTERCEPT_FUNCTION(sem_trywait); \
+ COMMON_INTERCEPT_FUNCTION(sem_timedwait); \
+ COMMON_INTERCEPT_FUNCTION(sem_post); \
+ COMMON_INTERCEPT_FUNCTION(sem_getvalue);
+#else
+#define INIT_SEM
+#endif // SANITIZER_INTERCEPT_SEM
+
+#if SANITIZER_INTERCEPT_PTHREAD_SETCANCEL
+INTERCEPTOR(int, pthread_setcancelstate, int state, int *oldstate) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcancelstate, state, oldstate);
+ int res = REAL(pthread_setcancelstate)(state, oldstate);
+ if (res == 0 && oldstate != nullptr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldstate, sizeof(*oldstate));
+ return res;
+}
+
+INTERCEPTOR(int, pthread_setcanceltype, int type, int *oldtype) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pthread_setcanceltype, type, oldtype);
+ int res = REAL(pthread_setcanceltype)(type, oldtype);
+ if (res == 0 && oldtype != nullptr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldtype, sizeof(*oldtype));
+ return res;
+}
+#define INIT_PTHREAD_SETCANCEL \
+ COMMON_INTERCEPT_FUNCTION(pthread_setcancelstate); \
+ COMMON_INTERCEPT_FUNCTION(pthread_setcanceltype);
+#else
+#define INIT_PTHREAD_SETCANCEL
+#endif
+
+#if SANITIZER_INTERCEPT_MINCORE
+INTERCEPTOR(int, mincore, void *addr, uptr length, unsigned char *vec) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mincore, addr, length, vec);
+ int res = REAL(mincore)(addr, length, vec);
+ if (res == 0) {
+ uptr page_size = GetPageSizeCached();
+ uptr vec_size = ((length + page_size - 1) & (~(page_size - 1))) / page_size;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, vec, vec_size);
+ }
+ return res;
+}
+#define INIT_MINCORE COMMON_INTERCEPT_FUNCTION(mincore);
+#else
+#define INIT_MINCORE
+#endif
+
+#if SANITIZER_INTERCEPT_PROCESS_VM_READV
+INTERCEPTOR(SSIZE_T, process_vm_readv, int pid, __sanitizer_iovec *local_iov,
+ uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt,
+ uptr flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, process_vm_readv, pid, local_iov, liovcnt,
+ remote_iov, riovcnt, flags);
+ SSIZE_T res = REAL(process_vm_readv)(pid, local_iov, liovcnt, remote_iov,
+ riovcnt, flags);
+ if (res > 0)
+ write_iovec(ctx, local_iov, liovcnt, res);
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, process_vm_writev, int pid, __sanitizer_iovec *local_iov,
+ uptr liovcnt, __sanitizer_iovec *remote_iov, uptr riovcnt,
+ uptr flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, process_vm_writev, pid, local_iov, liovcnt,
+ remote_iov, riovcnt, flags);
+ SSIZE_T res = REAL(process_vm_writev)(pid, local_iov, liovcnt, remote_iov,
+ riovcnt, flags);
+ if (res > 0)
+ read_iovec(ctx, local_iov, liovcnt, res);
+ return res;
+}
+#define INIT_PROCESS_VM_READV \
+ COMMON_INTERCEPT_FUNCTION(process_vm_readv); \
+ COMMON_INTERCEPT_FUNCTION(process_vm_writev);
+#else
+#define INIT_PROCESS_VM_READV
+#endif
+
+#if SANITIZER_INTERCEPT_CTERMID
+INTERCEPTOR(char *, ctermid, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctermid, s);
+ char *res = REAL(ctermid)(s);
+ if (res) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_CTERMID COMMON_INTERCEPT_FUNCTION(ctermid);
+#else
+#define INIT_CTERMID
+#endif
+
+#if SANITIZER_INTERCEPT_CTERMID_R
+INTERCEPTOR(char *, ctermid_r, char *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ctermid_r, s);
+ char *res = REAL(ctermid_r)(s);
+ if (res) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, REAL(strlen)(res) + 1);
+ }
+ return res;
+}
+#define INIT_CTERMID_R COMMON_INTERCEPT_FUNCTION(ctermid_r);
+#else
+#define INIT_CTERMID_R
+#endif
+
+#if SANITIZER_INTERCEPT_RECV_RECVFROM
+INTERCEPTOR(SSIZE_T, recv, int fd, void *buf, SIZE_T len, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recv, fd, buf, len, flags);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SSIZE_T res = REAL(recv)(fd, buf, len, flags);
+ if (res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ }
+ if (res >= 0 && fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, recvfrom, int fd, void *buf, SIZE_T len, int flags,
+ void *srcaddr, int *addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, recvfrom, fd, buf, len, flags, srcaddr,
+ addrlen);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ SIZE_T srcaddr_sz;
+ if (srcaddr) srcaddr_sz = *addrlen;
+ (void)srcaddr_sz; // prevent "set but not used" warning
+ SSIZE_T res = REAL(recvfrom)(fd, buf, len, flags, srcaddr, addrlen);
+ if (res > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ if (srcaddr)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(srcaddr,
+ Min((SIZE_T)*addrlen, srcaddr_sz));
+ }
+ return res;
+}
+#define INIT_RECV_RECVFROM \
+ COMMON_INTERCEPT_FUNCTION(recv); \
+ COMMON_INTERCEPT_FUNCTION(recvfrom);
+#else
+#define INIT_RECV_RECVFROM
+#endif
+
+#if SANITIZER_INTERCEPT_SEND_SENDTO
+INTERCEPTOR(SSIZE_T, send, int fd, void *buf, SIZE_T len, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, send, fd, buf, len, flags);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ SSIZE_T res = REAL(send)(fd, buf, len, flags);
+ if (common_flags()->intercept_send && res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ return res;
+}
+
+INTERCEPTOR(SSIZE_T, sendto, int fd, void *buf, SIZE_T len, int flags,
+ void *dstaddr, int addrlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sendto, fd, buf, len, flags, dstaddr, addrlen);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ // Can't check dstaddr as it may have uninitialized padding at the end.
+ SSIZE_T res = REAL(sendto)(fd, buf, len, flags, dstaddr, addrlen);
+ if (common_flags()->intercept_send && res > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buf, Min((SIZE_T)res, len));
+ return res;
+}
+#define INIT_SEND_SENDTO \
+ COMMON_INTERCEPT_FUNCTION(send); \
+ COMMON_INTERCEPT_FUNCTION(sendto);
+#else
+#define INIT_SEND_SENDTO
+#endif
+
+#if SANITIZER_INTERCEPT_EVENTFD_READ_WRITE
+INTERCEPTOR(int, eventfd_read, int fd, u64 *value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, eventfd_read, fd, value);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ int res = REAL(eventfd_read)(fd, value);
+ if (res == 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, value, sizeof(*value));
+ if (fd >= 0) COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+INTERCEPTOR(int, eventfd_write, int fd, u64 value) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, eventfd_write, fd, value);
+ if (fd >= 0) {
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd);
+ }
+ int res = REAL(eventfd_write)(fd, value);
+ return res;
+}
+#define INIT_EVENTFD_READ_WRITE \
+ COMMON_INTERCEPT_FUNCTION(eventfd_read); \
+ COMMON_INTERCEPT_FUNCTION(eventfd_write)
+#else
+#define INIT_EVENTFD_READ_WRITE
+#endif
+
+#if SANITIZER_INTERCEPT_STAT
+INTERCEPTOR(int, stat, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, stat, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(stat)(path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT_STAT COMMON_INTERCEPT_FUNCTION(stat)
+#else
+#define INIT_STAT
+#endif
+
+#if SANITIZER_INTERCEPT_LSTAT
+INTERCEPTOR(int, lstat, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, lstat, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(lstat)(path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT_LSTAT COMMON_INTERCEPT_FUNCTION(lstat)
+#else
+#define INIT_LSTAT
+#endif
+
+#if SANITIZER_INTERCEPT___XSTAT
+INTERCEPTOR(int, __xstat, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __xstat, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__xstat)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT___XSTAT COMMON_INTERCEPT_FUNCTION(__xstat)
+#else
+#define INIT___XSTAT
+#endif
+
+#if SANITIZER_INTERCEPT___XSTAT64
+INTERCEPTOR(int, __xstat64, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __xstat64, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__xstat64)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT___XSTAT64 COMMON_INTERCEPT_FUNCTION(__xstat64)
+#else
+#define INIT___XSTAT64
+#endif
+
+#if SANITIZER_INTERCEPT___LXSTAT
+INTERCEPTOR(int, __lxstat, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __lxstat, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__lxstat)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat_sz);
+ return res;
+}
+#define INIT___LXSTAT COMMON_INTERCEPT_FUNCTION(__lxstat)
+#else
+#define INIT___LXSTAT
+#endif
+
+#if SANITIZER_INTERCEPT___LXSTAT64
+INTERCEPTOR(int, __lxstat64, int version, const char *path, void *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, __lxstat64, version, path, buf);
+ if (common_flags()->intercept_stat)
+ COMMON_INTERCEPTOR_READ_STRING(ctx, path, 0);
+ int res = REAL(__lxstat64)(version, path, buf);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer::struct_stat64_sz);
+ return res;
+}
+#define INIT___LXSTAT64 COMMON_INTERCEPT_FUNCTION(__lxstat64)
+#else
+#define INIT___LXSTAT64
+#endif
+
+// FIXME: add other *stat interceptor
+
+#if SANITIZER_INTERCEPT_UTMP
+INTERCEPTOR(void *, getutent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutent, dummy);
+ void *res = REAL(getutent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutid, ut);
+ void *res = REAL(getutid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutline, ut);
+ void *res = REAL(getutline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmp_sz);
+ return res;
+}
+#define INIT_UTMP \
+ COMMON_INTERCEPT_FUNCTION(getutent); \
+ COMMON_INTERCEPT_FUNCTION(getutid); \
+ COMMON_INTERCEPT_FUNCTION(getutline);
+#else
+#define INIT_UTMP
+#endif
+
+#if SANITIZER_INTERCEPT_UTMPX
+INTERCEPTOR(void *, getutxent, int dummy) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxent, dummy);
+ void *res = REAL(getutxent)(dummy);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxid, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxid, ut);
+ void *res = REAL(getutxid)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, getutxline, void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getutxline, ut);
+ void *res = REAL(getutxline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+INTERCEPTOR(void *, pututxline, const void *ut) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pututxline, ut);
+ if (ut)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ut, __sanitizer::struct_utmpx_sz);
+ void *res = REAL(pututxline)(ut);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer::struct_utmpx_sz);
+ return res;
+}
+#define INIT_UTMPX \
+ COMMON_INTERCEPT_FUNCTION(getutxent); \
+ COMMON_INTERCEPT_FUNCTION(getutxid); \
+ COMMON_INTERCEPT_FUNCTION(getutxline); \
+ COMMON_INTERCEPT_FUNCTION(pututxline);
+#else
+#define INIT_UTMPX
+#endif
+
+#if SANITIZER_INTERCEPT_GETLOADAVG
+INTERCEPTOR(int, getloadavg, double *loadavg, int nelem) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getloadavg, loadavg, nelem);
+ int res = REAL(getloadavg)(loadavg, nelem);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, loadavg, res * sizeof(*loadavg));
+ return res;
+}
+#define INIT_GETLOADAVG \
+ COMMON_INTERCEPT_FUNCTION(getloadavg);
+#else
+#define INIT_GETLOADAVG
+#endif
+
+#if SANITIZER_INTERCEPT_MCHECK_MPROBE
+INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mprobe, void *ptr) {
+ return 0;
+}
+#endif
+
+INTERCEPTOR(SIZE_T, wcslen, const wchar_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcslen, s);
+ SIZE_T res = REAL(wcslen)(s);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (res + 1));
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, wcsnlen, const wchar_t *s, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsnlen, s, n);
+ SIZE_T res = REAL(wcsnlen)(s, n);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * Min(res + 1, n));
+ return res;
+}
+#define INIT_WCSLEN \
+ COMMON_INTERCEPT_FUNCTION(wcslen); \
+ COMMON_INTERCEPT_FUNCTION(wcsnlen);
+
+#if SANITIZER_INTERCEPT_WCSCAT
+INTERCEPTOR(wchar_t *, wcscat, wchar_t *dst, const wchar_t *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcscat, dst, src);
+ SIZE_T src_size = REAL(wcslen)(src);
+ SIZE_T dst_size = REAL(wcslen)(dst);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, (src_size + 1) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,
+ (src_size + 1) * sizeof(wchar_t));
+ return REAL(wcscat)(dst, src); // NOLINT
+}
+
+INTERCEPTOR(wchar_t *, wcsncat, wchar_t *dst, const wchar_t *src, SIZE_T n) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsncat, dst, src, n);
+ SIZE_T src_size = REAL(wcsnlen)(src, n);
+ SIZE_T dst_size = REAL(wcslen)(dst);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src,
+ Min(src_size + 1, n) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, dst, (dst_size + 1) * sizeof(wchar_t));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst + dst_size,
+ (src_size + 1) * sizeof(wchar_t));
+ return REAL(wcsncat)(dst, src, n); // NOLINT
+}
+#define INIT_WCSCAT \
+ COMMON_INTERCEPT_FUNCTION(wcscat); \
+ COMMON_INTERCEPT_FUNCTION(wcsncat);
+#else
+#define INIT_WCSCAT
+#endif
+
+#if SANITIZER_INTERCEPT_WCSDUP
+INTERCEPTOR(wchar_t *, wcsdup, wchar_t *s) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, wcsdup, s);
+ SIZE_T len = REAL(wcslen)(s);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, s, sizeof(wchar_t) * (len + 1));
+ wchar_t *result = REAL(wcsdup)(s);
+ if (result)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, result, sizeof(wchar_t) * (len + 1));
+ return result;
+}
+
+#define INIT_WCSDUP COMMON_INTERCEPT_FUNCTION(wcsdup);
+#else
+#define INIT_WCSDUP
+#endif
+
+#if SANITIZER_INTERCEPT_STRXFRM
+static SIZE_T RealStrLen(const char *str) { return REAL(strlen)(str); }
+
+static SIZE_T RealStrLen(const wchar_t *str) { return REAL(wcslen)(str); }
+
+#define STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, strxfrm, dest, src, len, ##__VA_ARGS__); \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, \
+ sizeof(*src) * (RealStrLen(src) + 1)); \
+ SIZE_T res = REAL(strxfrm)(dest, src, len, ##__VA_ARGS__); \
+ if (res < len) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dest, sizeof(*src) * (res + 1)); \
+ return res; \
+ }
+
+INTERCEPTOR(SIZE_T, strxfrm, char *dest, const char *src, SIZE_T len) {
+ STRXFRM_INTERCEPTOR_IMPL(strxfrm, dest, src, len);
+}
+
+INTERCEPTOR(SIZE_T, strxfrm_l, char *dest, const char *src, SIZE_T len,
+ void *locale) {
+ STRXFRM_INTERCEPTOR_IMPL(strxfrm_l, dest, src, len, locale);
+}
+
+#define INIT_STRXFRM \
+ COMMON_INTERCEPT_FUNCTION(strxfrm); \
+ COMMON_INTERCEPT_FUNCTION(strxfrm_l);
+#else
+#define INIT_STRXFRM
+#endif
+
+#if SANITIZER_INTERCEPT___STRXFRM_L
+INTERCEPTOR(SIZE_T, __strxfrm_l, char *dest, const char *src, SIZE_T len,
+ void *locale) {
+ STRXFRM_INTERCEPTOR_IMPL(__strxfrm_l, dest, src, len, locale);
+}
+
+#define INIT___STRXFRM_L COMMON_INTERCEPT_FUNCTION(__strxfrm_l);
+#else
+#define INIT___STRXFRM_L
+#endif
+
+#if SANITIZER_INTERCEPT_WCSXFRM
+INTERCEPTOR(SIZE_T, wcsxfrm, wchar_t *dest, const wchar_t *src, SIZE_T len) {
+ STRXFRM_INTERCEPTOR_IMPL(wcsxfrm, dest, src, len);
+}
+
+INTERCEPTOR(SIZE_T, wcsxfrm_l, wchar_t *dest, const wchar_t *src, SIZE_T len,
+ void *locale) {
+ STRXFRM_INTERCEPTOR_IMPL(wcsxfrm_l, dest, src, len, locale);
+}
+
+#define INIT_WCSXFRM \
+ COMMON_INTERCEPT_FUNCTION(wcsxfrm); \
+ COMMON_INTERCEPT_FUNCTION(wcsxfrm_l);
+#else
+#define INIT_WCSXFRM
+#endif
+
+#if SANITIZER_INTERCEPT___WCSXFRM_L
+INTERCEPTOR(SIZE_T, __wcsxfrm_l, wchar_t *dest, const wchar_t *src, SIZE_T len,
+ void *locale) {
+ STRXFRM_INTERCEPTOR_IMPL(__wcsxfrm_l, dest, src, len, locale);
+}
+
+#define INIT___WCSXFRM_L COMMON_INTERCEPT_FUNCTION(__wcsxfrm_l);
+#else
+#define INIT___WCSXFRM_L
+#endif
+
+#if SANITIZER_INTERCEPT_ACCT
+INTERCEPTOR(int, acct, const char *file) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, acct, file);
+ if (file)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+ return REAL(acct)(file);
+}
+#define INIT_ACCT COMMON_INTERCEPT_FUNCTION(acct)
+#else
+#define INIT_ACCT
+#endif
+
+#if SANITIZER_INTERCEPT_USER_FROM_UID
+INTERCEPTOR(const char *, user_from_uid, u32 uid, int nouser) {
+ void *ctx;
+ const char *user;
+ COMMON_INTERCEPTOR_ENTER(ctx, user_from_uid, uid, nouser);
+ user = REAL(user_from_uid)(uid, nouser);
+ if (user)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, user, REAL(strlen)(user) + 1);
+ return user;
+}
+#define INIT_USER_FROM_UID COMMON_INTERCEPT_FUNCTION(user_from_uid)
+#else
+#define INIT_USER_FROM_UID
+#endif
+
+#if SANITIZER_INTERCEPT_UID_FROM_USER
+INTERCEPTOR(int, uid_from_user, const char *name, u32 *uid) {
+ void *ctx;
+ int res;
+ COMMON_INTERCEPTOR_ENTER(ctx, uid_from_user, name, uid);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ res = REAL(uid_from_user)(name, uid);
+ if (uid)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, uid, sizeof(*uid));
+ return res;
+}
+#define INIT_UID_FROM_USER COMMON_INTERCEPT_FUNCTION(uid_from_user)
+#else
+#define INIT_UID_FROM_USER
+#endif
+
+#if SANITIZER_INTERCEPT_GROUP_FROM_GID
+INTERCEPTOR(const char *, group_from_gid, u32 gid, int nogroup) {
+ void *ctx;
+ const char *group;
+ COMMON_INTERCEPTOR_ENTER(ctx, group_from_gid, gid, nogroup);
+ group = REAL(group_from_gid)(gid, nogroup);
+ if (group)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, group, REAL(strlen)(group) + 1);
+ return group;
+}
+#define INIT_GROUP_FROM_GID COMMON_INTERCEPT_FUNCTION(group_from_gid)
+#else
+#define INIT_GROUP_FROM_GID
+#endif
+
+#if SANITIZER_INTERCEPT_GID_FROM_GROUP
+INTERCEPTOR(int, gid_from_group, const char *group, u32 *gid) {
+ void *ctx;
+ int res;
+ COMMON_INTERCEPTOR_ENTER(ctx, gid_from_group, group, gid);
+ if (group)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, group, REAL(strlen)(group) + 1);
+ res = REAL(gid_from_group)(group, gid);
+ if (gid)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, gid, sizeof(*gid));
+ return res;
+}
+#define INIT_GID_FROM_GROUP COMMON_INTERCEPT_FUNCTION(gid_from_group)
+#else
+#define INIT_GID_FROM_GROUP
+#endif
+
+#if SANITIZER_INTERCEPT_ACCESS
+INTERCEPTOR(int, access, const char *path, int mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, access, path, mode);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ return REAL(access)(path, mode);
+}
+#define INIT_ACCESS COMMON_INTERCEPT_FUNCTION(access)
+#else
+#define INIT_ACCESS
+#endif
+
+#if SANITIZER_INTERCEPT_FACCESSAT
+INTERCEPTOR(int, faccessat, int fd, const char *path, int mode, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, faccessat, fd, path, mode, flags);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ return REAL(faccessat)(fd, path, mode, flags);
+}
+#define INIT_FACCESSAT COMMON_INTERCEPT_FUNCTION(faccessat)
+#else
+#define INIT_FACCESSAT
+#endif
+
+#if SANITIZER_INTERCEPT_GETGROUPLIST
+INTERCEPTOR(int, getgrouplist, const char *name, u32 basegid, u32 *groups,
+ int *ngroups) {
+ void *ctx;
+ int res;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgrouplist, name, basegid, groups, ngroups);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (ngroups)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ngroups, sizeof(*ngroups));
+ res = REAL(getgrouplist)(name, basegid, groups, ngroups);
+ if (!res && groups && ngroups) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ngroups, sizeof(*ngroups));
+ }
+ return res;
+}
+
+#define INIT_GETGROUPLIST COMMON_INTERCEPT_FUNCTION(getgrouplist);
+#else
+#define INIT_GETGROUPLIST
+#endif
+
+#if SANITIZER_INTERCEPT_GETGROUPMEMBERSHIP
+INTERCEPTOR(int, getgroupmembership, const char *name, u32 basegid, u32 *groups,
+ int maxgrp, int *ngroups) {
+ void *ctx;
+ int res;
+ COMMON_INTERCEPTOR_ENTER(ctx, getgroupmembership, name, basegid, groups,
+ maxgrp, ngroups);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ res = REAL(getgroupmembership)(name, basegid, groups, maxgrp, ngroups);
+ if (!res && groups && ngroups) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, groups, sizeof(*groups) * (*ngroups));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ngroups, sizeof(*ngroups));
+ }
+ return res;
+}
+
+#define INIT_GETGROUPMEMBERSHIP COMMON_INTERCEPT_FUNCTION(getgroupmembership);
+#else
+#define INIT_GETGROUPMEMBERSHIP
+#endif
+
+#if SANITIZER_INTERCEPT_READLINK
+INTERCEPTOR(SSIZE_T, readlink, const char *path, char *buf, SIZE_T bufsiz) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readlink, path, buf, bufsiz);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ SSIZE_T res = REAL(readlink)(path, buf, bufsiz);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
+ return res;
+}
+
+#define INIT_READLINK COMMON_INTERCEPT_FUNCTION(readlink)
+#else
+#define INIT_READLINK
+#endif
+
+#if SANITIZER_INTERCEPT_READLINKAT
+INTERCEPTOR(SSIZE_T, readlinkat, int dirfd, const char *path, char *buf,
+ SIZE_T bufsiz) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, readlinkat, dirfd, path, buf, bufsiz);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ SSIZE_T res = REAL(readlinkat)(dirfd, path, buf, bufsiz);
+ if (res > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, res);
+ return res;
+}
+
+#define INIT_READLINKAT COMMON_INTERCEPT_FUNCTION(readlinkat)
+#else
+#define INIT_READLINKAT
+#endif
+
+#if SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT
+INTERCEPTOR(int, name_to_handle_at, int dirfd, const char *pathname,
+ struct file_handle *handle, int *mount_id, int flags) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, name_to_handle_at, dirfd, pathname, handle,
+ mount_id, flags);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pathname, REAL(strlen)(pathname) + 1);
+
+ __sanitizer_file_handle *sanitizer_handle =
+ reinterpret_cast<__sanitizer_file_handle*>(handle);
+ COMMON_INTERCEPTOR_READ_RANGE(
+ ctx, &sanitizer_handle->handle_bytes,
+ sizeof(sanitizer_handle->handle_bytes));
+
+ int res = REAL(name_to_handle_at)(dirfd, pathname, handle, mount_id, flags);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, &sanitizer_handle->handle_bytes,
+ sizeof(sanitizer_handle->handle_bytes));
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, &sanitizer_handle->handle_type,
+ sizeof(sanitizer_handle->handle_type));
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, &sanitizer_handle->f_handle, sanitizer_handle->handle_bytes);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mount_id, sizeof(*mount_id));
+ }
+ return res;
+}
+
+#define INIT_NAME_TO_HANDLE_AT COMMON_INTERCEPT_FUNCTION(name_to_handle_at)
+#else
+#define INIT_NAME_TO_HANDLE_AT
+#endif
+
+#if SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT
+INTERCEPTOR(int, open_by_handle_at, int mount_fd, struct file_handle* handle,
+ int flags) {
+ void* ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, open_by_handle_at, mount_fd, handle, flags);
+
+ __sanitizer_file_handle *sanitizer_handle =
+ reinterpret_cast<__sanitizer_file_handle*>(handle);
+ COMMON_INTERCEPTOR_READ_RANGE(
+ ctx, &sanitizer_handle->handle_bytes,
+ sizeof(sanitizer_handle->handle_bytes));
+ COMMON_INTERCEPTOR_READ_RANGE(
+ ctx, &sanitizer_handle->handle_type,
+ sizeof(sanitizer_handle->handle_type));
+ COMMON_INTERCEPTOR_READ_RANGE(
+ ctx, &sanitizer_handle->f_handle, sanitizer_handle->handle_bytes);
+
+ return REAL(open_by_handle_at)(mount_fd, handle, flags);
+}
+
+#define INIT_OPEN_BY_HANDLE_AT COMMON_INTERCEPT_FUNCTION(open_by_handle_at)
+#else
+#define INIT_OPEN_BY_HANDLE_AT
+#endif
+
+#if SANITIZER_INTERCEPT_STRLCPY
+INTERCEPTOR(SIZE_T, strlcpy, char *dst, char *src, SIZE_T size) {
+ void *ctx;
+ SIZE_T res;
+ COMMON_INTERCEPTOR_ENTER(ctx, strlcpy, dst, src, size);
+ if (src) {
+ // Keep strnlen as macro argument, as macro may ignore it.
+ COMMON_INTERCEPTOR_READ_STRING(
+ ctx, src, Min(internal_strnlen(src, size), size - 1) + 1);
+ }
+ res = REAL(strlcpy)(dst, src, size);
+ COMMON_INTERCEPTOR_COPY_STRING(ctx, dst, src, REAL(strlen)(dst) + 1);
+ return res;
+}
+
+INTERCEPTOR(SIZE_T, strlcat, char *dst, char *src, SIZE_T size) {
+ void *ctx;
+ SIZE_T len = 0;
+ COMMON_INTERCEPTOR_ENTER(ctx, strlcat, dst, src, size);
+ // src is checked in the strlcpy() interceptor
+ if (dst) {
+ len = internal_strnlen(dst, size);
+ COMMON_INTERCEPTOR_READ_STRING(ctx, dst, Min(len, size - 1) + 1);
+ }
+ // Reuse the rest of the code in the strlcpy() interceptor
+ return WRAP(strlcpy)(dst + len, src, size - len) + len;
+}
+#define INIT_STRLCPY \
+ COMMON_INTERCEPT_FUNCTION(strlcpy); \
+ COMMON_INTERCEPT_FUNCTION(strlcat);
+#else
+#define INIT_STRLCPY
+#endif
+
+#if SANITIZER_INTERCEPT_MMAP
+INTERCEPTOR(void *, mmap, void *addr, SIZE_T sz, int prot, int flags, int fd,
+ OFF_T off) {
+ void *ctx;
+ if (common_flags()->detect_write_exec)
+ ReportMmapWriteExec(prot);
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return (void *)internal_mmap(addr, sz, prot, flags, fd, off);
+ COMMON_INTERCEPTOR_ENTER(ctx, mmap, addr, sz, prot, flags, fd, off);
+ COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, off);
+}
+
+INTERCEPTOR(int, mprotect, void *addr, SIZE_T sz, int prot) {
+ void *ctx;
+ if (common_flags()->detect_write_exec)
+ ReportMmapWriteExec(prot);
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return (int)internal_mprotect(addr, sz, prot);
+ COMMON_INTERCEPTOR_ENTER(ctx, mprotect, addr, sz, prot);
+ MprotectMallocZones(addr, prot);
+ return REAL(mprotect)(addr, sz, prot);
+}
+#define INIT_MMAP \
+ COMMON_INTERCEPT_FUNCTION(mmap); \
+ COMMON_INTERCEPT_FUNCTION(mprotect);
+#else
+#define INIT_MMAP
+#endif
+
+#if SANITIZER_INTERCEPT_MMAP64
+INTERCEPTOR(void *, mmap64, void *addr, SIZE_T sz, int prot, int flags, int fd,
+ OFF64_T off) {
+ void *ctx;
+ if (common_flags()->detect_write_exec)
+ ReportMmapWriteExec(prot);
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return (void *)internal_mmap(addr, sz, prot, flags, fd, off);
+ COMMON_INTERCEPTOR_ENTER(ctx, mmap64, addr, sz, prot, flags, fd, off);
+ COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap64, addr, sz, prot, flags, fd, off);
+}
+#define INIT_MMAP64 COMMON_INTERCEPT_FUNCTION(mmap64);
+#else
+#define INIT_MMAP64
+#endif
+
+#if SANITIZER_INTERCEPT_DEVNAME
+INTERCEPTOR(char *, devname, u64 dev, u32 type) {
+ void *ctx;
+ char *name;
+ COMMON_INTERCEPTOR_ENTER(ctx, devname, dev, type);
+ name = REAL(devname)(dev, type);
+ if (name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ return name;
+}
+#define INIT_DEVNAME COMMON_INTERCEPT_FUNCTION(devname);
+#else
+#define INIT_DEVNAME
+#endif
+
+#if SANITIZER_INTERCEPT_DEVNAME_R
+#if SANITIZER_NETBSD
+#define DEVNAME_R_RETTYPE int
+#define DEVNAME_R_SUCCESS(x) (!(x))
+#else
+#define DEVNAME_R_RETTYPE char*
+#define DEVNAME_R_SUCCESS(x) (x)
+#endif
+INTERCEPTOR(DEVNAME_R_RETTYPE, devname_r, u64 dev, u32 type, char *path,
+ uptr len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, devname_r, dev, type, path, len);
+ DEVNAME_R_RETTYPE res = REAL(devname_r)(dev, type, path, len);
+ if (DEVNAME_R_SUCCESS(res))
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ return res;
+}
+#define INIT_DEVNAME_R COMMON_INTERCEPT_FUNCTION(devname_r);
+#else
+#define INIT_DEVNAME_R
+#endif
+
+#if SANITIZER_INTERCEPT_FGETLN
+INTERCEPTOR(char *, fgetln, __sanitizer_FILE *stream, SIZE_T *len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetln, stream, len);
+ char *str = REAL(fgetln)(stream, len);
+ if (str && len) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, str, *len);
+ }
+ return str;
+}
+#define INIT_FGETLN COMMON_INTERCEPT_FUNCTION(fgetln)
+#else
+#define INIT_FGETLN
+#endif
+
+#if SANITIZER_INTERCEPT_STRMODE
+INTERCEPTOR(void, strmode, u32 mode, char *bp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strmode, mode, bp);
+ REAL(strmode)(mode, bp);
+ if (bp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, bp, REAL(strlen)(bp) + 1);
+}
+#define INIT_STRMODE COMMON_INTERCEPT_FUNCTION(strmode)
+#else
+#define INIT_STRMODE
+#endif
+
+#if SANITIZER_INTERCEPT_TTYENT
+INTERCEPTOR(struct __sanitizer_ttyent *, getttyent, void) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getttyent);
+ struct __sanitizer_ttyent *ttyent = REAL(getttyent)();
+ if (ttyent)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz);
+ return ttyent;
+}
+INTERCEPTOR(struct __sanitizer_ttyent *, getttynam, char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getttynam, name);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ struct __sanitizer_ttyent *ttyent = REAL(getttynam)(name);
+ if (ttyent)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ttyent, struct_ttyent_sz);
+ return ttyent;
+}
+INTERCEPTOR(int, setttyentpath, char *path) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setttyentpath, path);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ return REAL(setttyentpath)(path);
+}
+#define INIT_TTYENT \
+ COMMON_INTERCEPT_FUNCTION(getttyent); \
+ COMMON_INTERCEPT_FUNCTION(getttynam); \
+ COMMON_INTERCEPT_FUNCTION(setttyentpath)
+#else
+#define INIT_TTYENT
+#endif
+
+#if SANITIZER_INTERCEPT_PROTOENT
+INTERCEPTOR(struct __sanitizer_protoent *, getprotoent) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotoent);
+ struct __sanitizer_protoent *p = REAL(getprotoent)();
+ if (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
+
+ SIZE_T pp_size = 1; // One handles the trailing \0
+
+ for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
+ pp_size * sizeof(char **));
+ }
+ return p;
+}
+
+INTERCEPTOR(struct __sanitizer_protoent *, getprotobyname, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotobyname, name);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ struct __sanitizer_protoent *p = REAL(getprotobyname)(name);
+ if (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
+
+ SIZE_T pp_size = 1; // One handles the trailing \0
+
+ for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
+ pp_size * sizeof(char **));
+ }
+ return p;
+}
+
+INTERCEPTOR(struct __sanitizer_protoent *, getprotobynumber, int proto) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getprotobynumber, proto);
+ struct __sanitizer_protoent *p = REAL(getprotobynumber)(proto);
+ if (p) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p, sizeof(*p));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_name, REAL(strlen)(p->p_name) + 1);
+
+ SIZE_T pp_size = 1; // One handles the trailing \0
+
+ for (char **pp = p->p_aliases; *pp; ++pp, ++pp_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *pp, REAL(strlen)(*pp) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, p->p_aliases,
+ pp_size * sizeof(char **));
+ }
+ return p;
+}
+#define INIT_PROTOENT \
+ COMMON_INTERCEPT_FUNCTION(getprotoent); \
+ COMMON_INTERCEPT_FUNCTION(getprotobyname); \
+ COMMON_INTERCEPT_FUNCTION(getprotobynumber)
+#else
+#define INIT_PROTOENT
+#endif
+
+#if SANITIZER_INTERCEPT_NETENT
+INTERCEPTOR(struct __sanitizer_netent *, getnetent) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getnetent);
+ struct __sanitizer_netent *n = REAL(getnetent)();
+ if (n) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+
+ SIZE_T nn_size = 1; // One handles the trailing \0
+
+ for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
+ nn_size * sizeof(char **));
+ }
+ return n;
+}
+
+INTERCEPTOR(struct __sanitizer_netent *, getnetbyname, const char *name) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getnetbyname, name);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ struct __sanitizer_netent *n = REAL(getnetbyname)(name);
+ if (n) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+
+ SIZE_T nn_size = 1; // One handles the trailing \0
+
+ for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
+ nn_size * sizeof(char **));
+ }
+ return n;
+}
+
+INTERCEPTOR(struct __sanitizer_netent *, getnetbyaddr, u32 net, int type) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getnetbyaddr, net, type);
+ struct __sanitizer_netent *n = REAL(getnetbyaddr)(net, type);
+ if (n) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n, sizeof(*n));
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_name, REAL(strlen)(n->n_name) + 1);
+
+ SIZE_T nn_size = 1; // One handles the trailing \0
+
+ for (char **nn = n->n_aliases; *nn; ++nn, ++nn_size)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *nn, REAL(strlen)(*nn) + 1);
+
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, n->n_aliases,
+ nn_size * sizeof(char **));
+ }
+ return n;
+}
+#define INIT_NETENT \
+ COMMON_INTERCEPT_FUNCTION(getnetent); \
+ COMMON_INTERCEPT_FUNCTION(getnetbyname); \
+ COMMON_INTERCEPT_FUNCTION(getnetbyaddr)
+#else
+#define INIT_NETENT
+#endif
+
+#if SANITIZER_INTERCEPT_GETMNTINFO
+INTERCEPTOR(int, getmntinfo, void **mntbufp, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getmntinfo, mntbufp, flags);
+ int cnt = REAL(getmntinfo)(mntbufp, flags);
+ if (cnt > 0 && mntbufp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, mntbufp, sizeof(void *));
+ if (*mntbufp)
+#if SANITIZER_NETBSD
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statvfs_sz);
+#else
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *mntbufp, cnt * struct_statfs_sz);
+#endif
+ }
+ return cnt;
+}
+#define INIT_GETMNTINFO COMMON_INTERCEPT_FUNCTION(getmntinfo)
+#else
+#define INIT_GETMNTINFO
+#endif
+
+#if SANITIZER_INTERCEPT_MI_VECTOR_HASH
+INTERCEPTOR(void, mi_vector_hash, const void *key, SIZE_T len, u32 seed,
+ u32 hashes[3]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, mi_vector_hash, key, len, seed, hashes);
+ if (key)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, len);
+ REAL(mi_vector_hash)(key, len, seed, hashes);
+ if (hashes)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, hashes, sizeof(hashes[0]) * 3);
+}
+#define INIT_MI_VECTOR_HASH COMMON_INTERCEPT_FUNCTION(mi_vector_hash)
+#else
+#define INIT_MI_VECTOR_HASH
+#endif
+
+#if SANITIZER_INTERCEPT_SETVBUF
+INTERCEPTOR(int, setvbuf, __sanitizer_FILE *stream, char *buf, int mode,
+ SIZE_T size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setvbuf, stream, buf, mode, size);
+ int ret = REAL(setvbuf)(stream, buf, mode, size);
+ if (buf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, size);
+ if (stream)
+ unpoison_file(stream);
+ return ret;
+}
+
+INTERCEPTOR(void, setbuf, __sanitizer_FILE *stream, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setbuf, stream, buf);
+ REAL(setbuf)(stream, buf);
+ if (buf) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz);
+ }
+ if (stream)
+ unpoison_file(stream);
+}
+
+INTERCEPTOR(void, setbuffer, __sanitizer_FILE *stream, char *buf, int mode) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setbuffer, stream, buf, mode);
+ REAL(setbuffer)(stream, buf, mode);
+ if (buf) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, __sanitizer_bufsiz);
+ }
+ if (stream)
+ unpoison_file(stream);
+}
+
+INTERCEPTOR(void, setlinebuf, __sanitizer_FILE *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, setlinebuf, stream);
+ REAL(setlinebuf)(stream);
+ if (stream)
+ unpoison_file(stream);
+}
+#define INIT_SETVBUF COMMON_INTERCEPT_FUNCTION(setvbuf); \
+ COMMON_INTERCEPT_FUNCTION(setbuf); \
+ COMMON_INTERCEPT_FUNCTION(setbuffer); \
+ COMMON_INTERCEPT_FUNCTION(setlinebuf)
+#else
+#define INIT_SETVBUF
+#endif
+
+#if SANITIZER_INTERCEPT_GETVFSSTAT
+INTERCEPTOR(int, getvfsstat, void *buf, SIZE_T bufsize, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getvfsstat, buf, bufsize, flags);
+ int ret = REAL(getvfsstat)(buf, bufsize, flags);
+ if (buf && ret > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, ret * struct_statvfs_sz);
+ return ret;
+}
+#define INIT_GETVFSSTAT COMMON_INTERCEPT_FUNCTION(getvfsstat)
+#else
+#define INIT_GETVFSSTAT
+#endif
+
+#if SANITIZER_INTERCEPT_REGEX
+INTERCEPTOR(int, regcomp, void *preg, const char *pattern, int cflags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regcomp, preg, pattern, cflags);
+ if (pattern)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pattern, REAL(strlen)(pattern) + 1);
+ int res = REAL(regcomp)(preg, pattern, cflags);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, preg, struct_regex_sz);
+ return res;
+}
+INTERCEPTOR(int, regexec, const void *preg, const char *string, SIZE_T nmatch,
+ struct __sanitizer_regmatch *pmatch[], int eflags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regexec, preg, string, nmatch, pmatch, eflags);
+ if (preg)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
+ if (string)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, string, REAL(strlen)(string) + 1);
+ int res = REAL(regexec)(preg, string, nmatch, pmatch, eflags);
+ if (!res && pmatch)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pmatch, nmatch * struct_regmatch_sz);
+ return res;
+}
+INTERCEPTOR(SIZE_T, regerror, int errcode, const void *preg, char *errbuf,
+ SIZE_T errbuf_size) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regerror, errcode, preg, errbuf, errbuf_size);
+ if (preg)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
+ SIZE_T res = REAL(regerror)(errcode, preg, errbuf, errbuf_size);
+ if (errbuf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errbuf, REAL(strlen)(errbuf) + 1);
+ return res;
+}
+INTERCEPTOR(void, regfree, const void *preg) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regfree, preg);
+ if (preg)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, preg, struct_regex_sz);
+ REAL(regfree)(preg);
+}
+#define INIT_REGEX \
+ COMMON_INTERCEPT_FUNCTION(regcomp); \
+ COMMON_INTERCEPT_FUNCTION(regexec); \
+ COMMON_INTERCEPT_FUNCTION(regerror); \
+ COMMON_INTERCEPT_FUNCTION(regfree);
+#else
+#define INIT_REGEX
+#endif
+
+#if SANITIZER_INTERCEPT_REGEXSUB
+INTERCEPTOR(SSIZE_T, regnsub, char *buf, SIZE_T bufsiz, const char *sub,
+ const struct __sanitizer_regmatch *rm, const char *str) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regnsub, buf, bufsiz, sub, rm, str);
+ if (sub)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+ // The implementation demands and hardcodes 10 elements
+ if (rm)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
+ if (str)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, str, REAL(strlen)(str) + 1);
+ SSIZE_T res = REAL(regnsub)(buf, bufsiz, sub, rm, str);
+ if (res > 0 && buf)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ return res;
+}
+INTERCEPTOR(SSIZE_T, regasub, char **buf, const char *sub,
+ const struct __sanitizer_regmatch *rm, const char *sstr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, regasub, buf, sub, rm, sstr);
+ if (sub)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sub, REAL(strlen)(sub) + 1);
+ // Hardcode 10 elements as this is hardcoded size
+ if (rm)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rm, 10 * struct_regmatch_sz);
+ if (sstr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sstr, REAL(strlen)(sstr) + 1);
+ SSIZE_T res = REAL(regasub)(buf, sub, rm, sstr);
+ if (res > 0 && buf) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, sizeof(char *));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *buf, REAL(strlen)(*buf) + 1);
+ }
+ return res;
+}
+
+#define INIT_REGEXSUB \
+ COMMON_INTERCEPT_FUNCTION(regnsub); \
+ COMMON_INTERCEPT_FUNCTION(regasub);
+#else
+#define INIT_REGEXSUB
+#endif
+
+#if SANITIZER_INTERCEPT_FTS
+INTERCEPTOR(void *, fts_open, char *const *path_argv, int options,
+ int (*compar)(void **, void **)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_open, path_argv, options, compar);
+ if (path_argv) {
+ for (char *const *pa = path_argv; ; ++pa) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
+ if (!*pa)
+ break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ }
+ }
+ // TODO(kamil): handle compar callback
+ void *fts = REAL(fts_open)(path_argv, options, compar);
+ if (fts)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, fts, struct_FTS_sz);
+ return fts;
+}
+
+INTERCEPTOR(void *, fts_read, void *ftsp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_read, ftsp);
+ if (ftsp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);
+ void *ftsent = REAL(fts_read)(ftsp);
+ if (ftsent)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ftsent, struct_FTSENT_sz);
+ return ftsent;
+}
+
+INTERCEPTOR(void *, fts_children, void *ftsp, int options) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_children, ftsp, options);
+ if (ftsp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);
+ void *ftsent = REAL(fts_children)(ftsp, options);
+ if (ftsent)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ftsent, struct_FTSENT_sz);
+ return ftsent;
+}
+
+INTERCEPTOR(int, fts_set, void *ftsp, void *f, int options) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_set, ftsp, f, options);
+ if (ftsp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);
+ if (f)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, f, struct_FTSENT_sz);
+ return REAL(fts_set)(ftsp, f, options);
+}
+
+INTERCEPTOR(int, fts_close, void *ftsp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fts_close, ftsp);
+ if (ftsp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ftsp, struct_FTS_sz);
+ return REAL(fts_close)(ftsp);
+}
+#define INIT_FTS \
+ COMMON_INTERCEPT_FUNCTION(fts_open); \
+ COMMON_INTERCEPT_FUNCTION(fts_read); \
+ COMMON_INTERCEPT_FUNCTION(fts_children); \
+ COMMON_INTERCEPT_FUNCTION(fts_set); \
+ COMMON_INTERCEPT_FUNCTION(fts_close);
+#else
+#define INIT_FTS
+#endif
+
+#if SANITIZER_INTERCEPT_SYSCTL
+INTERCEPTOR(int, sysctl, int *name, unsigned int namelen, void *oldp,
+ SIZE_T *oldlenp, void *newp, SIZE_T newlen) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_sysctl(name, namelen, oldp, oldlenp, newp, newlen);
+ COMMON_INTERCEPTOR_ENTER(ctx, sysctl, name, namelen, oldp, oldlenp, newp,
+ newlen);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, namelen * sizeof(*name));
+ if (oldlenp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, oldlenp, sizeof(*oldlenp));
+ if (newp && newlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, newp, newlen);
+ int res = REAL(sysctl)(name, namelen, oldp, oldlenp, newp, newlen);
+ if (!res) {
+ if (oldlenp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldlenp, sizeof(*oldlenp));
+ if (oldp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldp, *oldlenp);
+ }
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sysctlbyname, char *sname, void *oldp, SIZE_T *oldlenp,
+ void *newp, SIZE_T newlen) {
+ void *ctx;
+ if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED)
+ return internal_sysctlbyname(sname, oldp, oldlenp, newp, newlen);
+ COMMON_INTERCEPTOR_ENTER(ctx, sysctlbyname, sname, oldp, oldlenp, newp,
+ newlen);
+ if (sname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ if (oldlenp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, oldlenp, sizeof(*oldlenp));
+ if (newp && newlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, newp, newlen);
+ int res = REAL(sysctlbyname)(sname, oldp, oldlenp, newp, newlen);
+ if (!res) {
+ if (oldlenp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldlenp, sizeof(*oldlenp));
+ if (oldp)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, oldp, *oldlenp);
+ }
+ }
+ return res;
+}
+
+INTERCEPTOR(int, sysctlnametomib, const char *sname, int *name,
+ SIZE_T *namelenp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sysctlnametomib, sname, name, namelenp);
+ if (sname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ if (namelenp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
+ int res = REAL(sysctlnametomib)(sname, name, namelenp);
+ if (!res) {
+ if (namelenp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelenp, sizeof(*namelenp));
+ if (name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, *namelenp * sizeof(*name));
+ }
+ }
+ return res;
+}
+
+#define INIT_SYSCTL \
+ COMMON_INTERCEPT_FUNCTION(sysctl); \
+ COMMON_INTERCEPT_FUNCTION(sysctlbyname); \
+ COMMON_INTERCEPT_FUNCTION(sysctlnametomib);
+#else
+#define INIT_SYSCTL
+#endif
+
+#if SANITIZER_INTERCEPT_ASYSCTL
+INTERCEPTOR(void *, asysctl, const int *name, SIZE_T namelen, SIZE_T *len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asysctl, name, namelen, len);
+ if (name)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, sizeof(*name) * namelen);
+ void *res = REAL(asysctl)(name, namelen, len);
+ if (res && len) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, *len);
+ }
+ return res;
+}
+
+INTERCEPTOR(void *, asysctlbyname, const char *sname, SIZE_T *len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, asysctlbyname, sname, len);
+ if (sname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ void *res = REAL(asysctlbyname)(sname, len);
+ if (res && len) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, *len);
+ }
+ return res;
+}
+#define INIT_ASYSCTL \
+ COMMON_INTERCEPT_FUNCTION(asysctl); \
+ COMMON_INTERCEPT_FUNCTION(asysctlbyname);
+#else
+#define INIT_ASYSCTL
+#endif
+
+#if SANITIZER_INTERCEPT_SYSCTLGETMIBINFO
+INTERCEPTOR(int, sysctlgetmibinfo, char *sname, int *name,
+ unsigned int *namelenp, char *cname, SIZE_T *csz, void **rnode,
+ int v) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sysctlgetmibinfo, sname, name, namelenp, cname,
+ csz, rnode, v);
+ if (sname)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sname, REAL(strlen)(sname) + 1);
+ if (namelenp)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, namelenp, sizeof(*namelenp));
+ if (csz)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, csz, sizeof(*csz));
+ // Skip rnode, it's rarely used and not trivial to sanitize
+ // It's also used mostly internally
+ int res = REAL(sysctlgetmibinfo)(sname, name, namelenp, cname, csz, rnode, v);
+ if (!res) {
+ if (namelenp) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, namelenp, sizeof(*namelenp));
+ if (name)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, *namelenp * sizeof(*name));
+ }
+ if (csz) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, csz, sizeof(*csz));
+ if (cname)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cname, *csz);
+ }
+ }
+ return res;
+}
+#define INIT_SYSCTLGETMIBINFO \
+ COMMON_INTERCEPT_FUNCTION(sysctlgetmibinfo);
+#else
+#define INIT_SYSCTLGETMIBINFO
+#endif
+
+#if SANITIZER_INTERCEPT_NL_LANGINFO
+INTERCEPTOR(char *, nl_langinfo, long item) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, nl_langinfo, item);
+ char *ret = REAL(nl_langinfo)(item);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+ return ret;
+}
+#define INIT_NL_LANGINFO COMMON_INTERCEPT_FUNCTION(nl_langinfo)
+#else
+#define INIT_NL_LANGINFO
+#endif
+
+#if SANITIZER_INTERCEPT_MODCTL
+INTERCEPTOR(int, modctl, int operation, void *argp) {
+ void *ctx;
+ int ret;
+ COMMON_INTERCEPTOR_ENTER(ctx, modctl, operation, argp);
+
+ if (operation == modctl_load) {
+ if (argp) {
+ __sanitizer_modctl_load_t *ml = (__sanitizer_modctl_load_t *)argp;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ml, sizeof(*ml));
+ if (ml->ml_filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_filename,
+ REAL(strlen)(ml->ml_filename) + 1);
+ if (ml->ml_props)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, ml->ml_props, ml->ml_propslen);
+ }
+ ret = REAL(modctl)(operation, argp);
+ } else if (operation == modctl_unload) {
+ if (argp) {
+ const char *name = (const char *)argp;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ }
+ ret = REAL(modctl)(operation, argp);
+ } else if (operation == modctl_stat) {
+ uptr iov_len;
+ struct __sanitizer_iovec *iov = (struct __sanitizer_iovec *)argp;
+ if (iov) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, iov, sizeof(*iov));
+ iov_len = iov->iov_len;
+ }
+ ret = REAL(modctl)(operation, argp);
+ if (iov)
+ COMMON_INTERCEPTOR_WRITE_RANGE(
+ ctx, iov->iov_base, Min(iov_len, iov->iov_len));
+ } else if (operation == modctl_exists)
+ ret = REAL(modctl)(operation, argp);
+ else
+ ret = REAL(modctl)(operation, argp);
+
+ return ret;
+}
+#define INIT_MODCTL COMMON_INTERCEPT_FUNCTION(modctl)
+#else
+#define INIT_MODCTL
+#endif
+
+#if SANITIZER_INTERCEPT_STRTONUM
+INTERCEPTOR(long long, strtonum, const char *nptr, long long minval,
+ long long maxval, const char **errstr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtonum, nptr, minval, maxval, errstr);
+
+ // TODO(kamil): Implement strtoll as a common inteceptor
+ char *real_endptr;
+ long long ret = (long long)REAL(strtoimax)(nptr, &real_endptr, 10);
+ StrtolFixAndCheck(ctx, nptr, nullptr, real_endptr, 10);
+
+ ret = REAL(strtonum)(nptr, minval, maxval, errstr);
+ if (errstr) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, errstr, sizeof(const char *));
+ if (*errstr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *errstr, REAL(strlen)(*errstr) + 1);
+ }
+ return ret;
+}
+#define INIT_STRTONUM COMMON_INTERCEPT_FUNCTION(strtonum)
+#else
+#define INIT_STRTONUM
+#endif
+
+#if SANITIZER_INTERCEPT_FPARSELN
+INTERCEPTOR(char *, fparseln, __sanitizer_FILE *stream, SIZE_T *len,
+ SIZE_T *lineno, const char delim[3], int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fparseln, stream, len, lineno, delim, flags);
+ if (lineno)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, lineno, sizeof(*lineno));
+ if (delim)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, delim, sizeof(delim[0]) * 3);
+ char *ret = REAL(fparseln)(stream, len, lineno, delim, flags);
+ if (ret) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, REAL(strlen)(ret) + 1);
+ if (len)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, len, sizeof(*len));
+ if (lineno)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, lineno, sizeof(*lineno));
+ }
+ return ret;
+}
+#define INIT_FPARSELN COMMON_INTERCEPT_FUNCTION(fparseln)
+#else
+#define INIT_FPARSELN
+#endif
+
+#if SANITIZER_INTERCEPT_STATVFS1
+INTERCEPTOR(int, statvfs1, const char *path, void *buf, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, statvfs1, path, buf, flags);
+ if (path) COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ int res = REAL(statvfs1)(path, buf, flags);
+ if (!res) COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
+ return res;
+}
+INTERCEPTOR(int, fstatvfs1, int fd, void *buf, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fstatvfs1, fd, buf, flags);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ int res = REAL(fstatvfs1)(fd, buf, flags);
+ if (!res) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, struct_statvfs_sz);
+ if (fd >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return res;
+}
+#define INIT_STATVFS1 \
+ COMMON_INTERCEPT_FUNCTION(statvfs1); \
+ COMMON_INTERCEPT_FUNCTION(fstatvfs1);
+#else
+#define INIT_STATVFS1
+#endif
+
+#if SANITIZER_INTERCEPT_STRTOI
+INTERCEPTOR(INTMAX_T, strtoi, const char *nptr, char **endptr, int base,
+ INTMAX_T low, INTMAX_T high, int *rstatus) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtoi, nptr, endptr, base, low, high, rstatus);
+ char *real_endptr;
+ INTMAX_T ret = REAL(strtoi)(nptr, &real_endptr, base, low, high, rstatus);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ if (rstatus)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rstatus, sizeof(*rstatus));
+ return ret;
+}
+
+INTERCEPTOR(UINTMAX_T, strtou, const char *nptr, char **endptr, int base,
+ UINTMAX_T low, UINTMAX_T high, int *rstatus) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strtou, nptr, endptr, base, low, high, rstatus);
+ char *real_endptr;
+ UINTMAX_T ret = REAL(strtou)(nptr, &real_endptr, base, low, high, rstatus);
+ StrtolFixAndCheck(ctx, nptr, endptr, real_endptr, base);
+ if (rstatus)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rstatus, sizeof(*rstatus));
+ return ret;
+}
+#define INIT_STRTOI \
+ COMMON_INTERCEPT_FUNCTION(strtoi); \
+ COMMON_INTERCEPT_FUNCTION(strtou)
+#else
+#define INIT_STRTOI
+#endif
+
+#if SANITIZER_INTERCEPT_CAPSICUM
+#define CAP_RIGHTS_INIT_INTERCEPTOR(cap_rights_init, rights, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_init, rights, ##__VA_ARGS__); \
+ if (rights) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights)); \
+ __sanitizer_cap_rights_t *ret = \
+ REAL(cap_rights_init)(rights, ##__VA_ARGS__); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret)); \
+ return ret; \
+ }
+
+#define CAP_RIGHTS_SET_INTERCEPTOR(cap_rights_set, rights, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_set, rights, ##__VA_ARGS__); \
+ if (rights) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights)); \
+ __sanitizer_cap_rights_t *ret = \
+ REAL(cap_rights_set)(rights, ##__VA_ARGS__); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret)); \
+ return ret; \
+ }
+
+#define CAP_RIGHTS_CLEAR_INTERCEPTOR(cap_rights_clear, rights, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_clear, rights, ##__VA_ARGS__); \
+ if (rights) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights)); \
+ __sanitizer_cap_rights_t *ret = \
+ REAL(cap_rights_clear)(rights, ##__VA_ARGS__); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret)); \
+ return ret; \
+ }
+
+#define CAP_RIGHTS_IS_SET_INTERCEPTOR(cap_rights_is_set, rights, ...) \
+ { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_is_set, rights, ##__VA_ARGS__); \
+ if (rights) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights)); \
+ return REAL(cap_rights_is_set)(rights, ##__VA_ARGS__); \
+ }
+
+INTERCEPTOR(__sanitizer_cap_rights_t *, cap_rights_init,
+ __sanitizer_cap_rights_t *rights) {
+ CAP_RIGHTS_INIT_INTERCEPTOR(cap_rights_init, rights);
+}
+
+INTERCEPTOR(__sanitizer_cap_rights_t *, cap_rights_set,
+ __sanitizer_cap_rights_t *rights) {
+ CAP_RIGHTS_SET_INTERCEPTOR(cap_rights_set, rights);
+}
+
+INTERCEPTOR(__sanitizer_cap_rights_t *, cap_rights_clear,
+ __sanitizer_cap_rights_t *rights) {
+ CAP_RIGHTS_CLEAR_INTERCEPTOR(cap_rights_clear, rights);
+}
+
+INTERCEPTOR(bool, cap_rights_is_set,
+ __sanitizer_cap_rights_t *rights) {
+ CAP_RIGHTS_IS_SET_INTERCEPTOR(cap_rights_is_set, rights);
+}
+
+INTERCEPTOR(int, cap_rights_limit, int fd,
+ const __sanitizer_cap_rights_t *rights) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_limit, fd, rights);
+ if (rights)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));
+
+ return REAL(cap_rights_limit)(fd, rights);
+}
+
+INTERCEPTOR(int, cap_rights_get, int fd, __sanitizer_cap_rights_t *rights) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_get, fd, rights);
+ int ret = REAL(cap_rights_get)(fd, rights);
+ if (!ret && rights)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, rights, sizeof(*rights));
+
+ return ret;
+}
+
+INTERCEPTOR(bool, cap_rights_is_valid, const __sanitizer_cap_rights_t *rights) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_is_valid, rights);
+ if (rights)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, rights, sizeof(*rights));
+
+ return REAL(cap_rights_is_valid(rights));
+}
+
+INTERCEPTOR(__sanitizer_cap_rights *, cap_rights_merge,
+ __sanitizer_cap_rights *dst, const __sanitizer_cap_rights *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_merge, dst, src);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+
+ __sanitizer_cap_rights *ret = REAL(cap_rights_merge)(dst, src);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
+
+ return ret;
+}
+
+INTERCEPTOR(__sanitizer_cap_rights *, cap_rights_remove,
+ __sanitizer_cap_rights *dst, const __sanitizer_cap_rights *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_remove, dst, src);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, sizeof(*src));
+
+ __sanitizer_cap_rights *ret = REAL(cap_rights_remove)(dst, src);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(*dst));
+
+ return ret;
+}
+
+INTERCEPTOR(bool, cap_rights_contains, const __sanitizer_cap_rights *big,
+ const __sanitizer_cap_rights *little) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_rights_contains, big, little);
+ if (little)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, little, sizeof(*little));
+ if (big)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, big, sizeof(*big));
+
+ return REAL(cap_rights_contains)(big, little);
+}
+
+INTERCEPTOR(int, cap_ioctls_limit, int fd, const uptr *cmds, SIZE_T ncmds) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_ioctls_limit, fd, cmds, ncmds);
+ if (cmds)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cmds, sizeof(*cmds) * ncmds);
+
+ return REAL(cap_ioctls_limit)(fd, cmds, ncmds);
+}
+
+INTERCEPTOR(int, cap_ioctls_get, int fd, uptr *cmds, SIZE_T maxcmds) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cap_ioctls_get, fd, cmds, maxcmds);
+ int ret = REAL(cap_ioctls_get)(fd, cmds, maxcmds);
+ if (!ret && cmds)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cmds, sizeof(*cmds) * maxcmds);
+
+ return ret;
+}
+#define INIT_CAPSICUM \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_init); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_set); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_clear); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_is_set); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_get); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_limit); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_contains); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_remove); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_merge); \
+ COMMON_INTERCEPT_FUNCTION(cap_rights_is_valid); \
+ COMMON_INTERCEPT_FUNCTION(cap_ioctls_get); \
+ COMMON_INTERCEPT_FUNCTION(cap_ioctls_limit)
+#else
+#define INIT_CAPSICUM
+#endif
+
+#if SANITIZER_INTERCEPT_SHA1
+INTERCEPTOR(void, SHA1Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Init, context);
+ REAL(SHA1Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA1_CTX_sz);
+}
+INTERCEPTOR(void, SHA1Update, void *context, const u8 *data, unsigned len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA1_CTX_sz);
+ REAL(SHA1Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA1_CTX_sz);
+}
+INTERCEPTOR(void, SHA1Final, u8 digest[20], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA1_CTX_sz);
+ REAL(SHA1Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(u8) * 20);
+}
+INTERCEPTOR(void, SHA1Transform, u32 state[5], u8 buffer[64]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Transform, state, buffer);
+ if (state)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, state, sizeof(u32) * 5);
+ if (buffer)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, sizeof(u8) * 64);
+ REAL(SHA1Transform)(state, buffer);
+ if (state)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, state, sizeof(u32) * 5);
+}
+INTERCEPTOR(char *, SHA1End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA1_CTX_sz);
+ char *ret = REAL(SHA1End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, SHA1File, char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(SHA1File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, SHA1FileChunk, char *filename, char *buf, OFF_T offset,
+ OFF_T length) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1FileChunk, filename, buf, offset, length);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(SHA1FileChunk)(filename, buf, offset, length);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, SHA1Data, u8 *data, SIZE_T len, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA1Data, data, len, buf);
+ if (data)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(SHA1Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA1_return_length);
+ return ret;
+}
+#define INIT_SHA1 \
+ COMMON_INTERCEPT_FUNCTION(SHA1Init); \
+ COMMON_INTERCEPT_FUNCTION(SHA1Update); \
+ COMMON_INTERCEPT_FUNCTION(SHA1Final); \
+ COMMON_INTERCEPT_FUNCTION(SHA1Transform); \
+ COMMON_INTERCEPT_FUNCTION(SHA1End); \
+ COMMON_INTERCEPT_FUNCTION(SHA1File); \
+ COMMON_INTERCEPT_FUNCTION(SHA1FileChunk); \
+ COMMON_INTERCEPT_FUNCTION(SHA1Data)
+#else
+#define INIT_SHA1
+#endif
+
+#if SANITIZER_INTERCEPT_MD4
+INTERCEPTOR(void, MD4Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4Init, context);
+ REAL(MD4Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD4_CTX_sz);
+}
+
+INTERCEPTOR(void, MD4Update, void *context, const unsigned char *data,
+ unsigned int len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD4_CTX_sz);
+ REAL(MD4Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD4_CTX_sz);
+}
+
+INTERCEPTOR(void, MD4Final, unsigned char digest[16], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD4_CTX_sz);
+ REAL(MD4Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(unsigned char) * 16);
+}
+
+INTERCEPTOR(char *, MD4End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD4_CTX_sz);
+ char *ret = REAL(MD4End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD4File, const char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(MD4File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD4Data, const unsigned char *data, unsigned int len,
+ char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD4Data, data, len, buf);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(MD4Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD4_return_length);
+ return ret;
+}
+
+#define INIT_MD4 \
+ COMMON_INTERCEPT_FUNCTION(MD4Init); \
+ COMMON_INTERCEPT_FUNCTION(MD4Update); \
+ COMMON_INTERCEPT_FUNCTION(MD4Final); \
+ COMMON_INTERCEPT_FUNCTION(MD4End); \
+ COMMON_INTERCEPT_FUNCTION(MD4File); \
+ COMMON_INTERCEPT_FUNCTION(MD4Data)
+#else
+#define INIT_MD4
+#endif
+
+#if SANITIZER_INTERCEPT_RMD160
+INTERCEPTOR(void, RMD160Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Init, context);
+ REAL(RMD160Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, RMD160_CTX_sz);
+}
+INTERCEPTOR(void, RMD160Update, void *context, const u8 *data, unsigned len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, RMD160_CTX_sz);
+ REAL(RMD160Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, RMD160_CTX_sz);
+}
+INTERCEPTOR(void, RMD160Final, u8 digest[20], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, RMD160_CTX_sz);
+ REAL(RMD160Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(u8) * 20);
+}
+INTERCEPTOR(void, RMD160Transform, u32 state[5], u16 buffer[16]) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Transform, state, buffer);
+ if (state)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, state, sizeof(u32) * 5);
+ if (buffer)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, buffer, sizeof(u32) * 16);
+ REAL(RMD160Transform)(state, buffer);
+ if (state)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, state, sizeof(u32) * 5);
+}
+INTERCEPTOR(char *, RMD160End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, RMD160_CTX_sz);
+ char *ret = REAL(RMD160End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, RMD160File, char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(RMD160File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, RMD160FileChunk, char *filename, char *buf, OFF_T offset,
+ OFF_T length) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160FileChunk, filename, buf, offset, length);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(RMD160FileChunk)(filename, buf, offset, length);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
+ return ret;
+}
+INTERCEPTOR(char *, RMD160Data, u8 *data, SIZE_T len, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, RMD160Data, data, len, buf);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(RMD160Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, RMD160_return_length);
+ return ret;
+}
+#define INIT_RMD160 \
+ COMMON_INTERCEPT_FUNCTION(RMD160Init); \
+ COMMON_INTERCEPT_FUNCTION(RMD160Update); \
+ COMMON_INTERCEPT_FUNCTION(RMD160Final); \
+ COMMON_INTERCEPT_FUNCTION(RMD160Transform); \
+ COMMON_INTERCEPT_FUNCTION(RMD160End); \
+ COMMON_INTERCEPT_FUNCTION(RMD160File); \
+ COMMON_INTERCEPT_FUNCTION(RMD160FileChunk); \
+ COMMON_INTERCEPT_FUNCTION(RMD160Data)
+#else
+#define INIT_RMD160
+#endif
+
+#if SANITIZER_INTERCEPT_MD5
+INTERCEPTOR(void, MD5Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5Init, context);
+ REAL(MD5Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD5_CTX_sz);
+}
+
+INTERCEPTOR(void, MD5Update, void *context, const unsigned char *data,
+ unsigned int len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD5_CTX_sz);
+ REAL(MD5Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD5_CTX_sz);
+}
+
+INTERCEPTOR(void, MD5Final, unsigned char digest[16], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD5_CTX_sz);
+ REAL(MD5Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(unsigned char) * 16);
+}
+
+INTERCEPTOR(char *, MD5End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD5_CTX_sz);
+ char *ret = REAL(MD5End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD5File, const char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(MD5File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD5Data, const unsigned char *data, unsigned int len,
+ char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD5Data, data, len, buf);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(MD5Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD5_return_length);
+ return ret;
+}
+
+#define INIT_MD5 \
+ COMMON_INTERCEPT_FUNCTION(MD5Init); \
+ COMMON_INTERCEPT_FUNCTION(MD5Update); \
+ COMMON_INTERCEPT_FUNCTION(MD5Final); \
+ COMMON_INTERCEPT_FUNCTION(MD5End); \
+ COMMON_INTERCEPT_FUNCTION(MD5File); \
+ COMMON_INTERCEPT_FUNCTION(MD5Data)
+#else
+#define INIT_MD5
+#endif
+
+#if SANITIZER_INTERCEPT_FSEEK
+INTERCEPTOR(int, fseek, __sanitizer_FILE *stream, long int offset, int whence) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fseek, stream, offset, whence);
+ return REAL(fseek)(stream, offset, whence);
+}
+INTERCEPTOR(int, fseeko, __sanitizer_FILE *stream, OFF_T offset, int whence) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fseeko, stream, offset, whence);
+ return REAL(fseeko)(stream, offset, whence);
+}
+INTERCEPTOR(long int, ftell, __sanitizer_FILE *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ftell, stream);
+ return REAL(ftell)(stream);
+}
+INTERCEPTOR(OFF_T, ftello, __sanitizer_FILE *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, ftello, stream);
+ return REAL(ftello)(stream);
+}
+INTERCEPTOR(void, rewind, __sanitizer_FILE *stream) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, rewind, stream);
+ return REAL(rewind)(stream);
+}
+INTERCEPTOR(int, fgetpos, __sanitizer_FILE *stream, void *pos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fgetpos, stream, pos);
+ int ret = REAL(fgetpos)(stream, pos);
+ if (pos && !ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, pos, fpos_t_sz);
+ return ret;
+}
+INTERCEPTOR(int, fsetpos, __sanitizer_FILE *stream, const void *pos) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fsetpos, stream, pos);
+ if (pos)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pos, fpos_t_sz);
+ return REAL(fsetpos)(stream, pos);
+}
+#define INIT_FSEEK \
+ COMMON_INTERCEPT_FUNCTION(fseek); \
+ COMMON_INTERCEPT_FUNCTION(fseeko); \
+ COMMON_INTERCEPT_FUNCTION(ftell); \
+ COMMON_INTERCEPT_FUNCTION(ftello); \
+ COMMON_INTERCEPT_FUNCTION(rewind); \
+ COMMON_INTERCEPT_FUNCTION(fgetpos); \
+ COMMON_INTERCEPT_FUNCTION(fsetpos)
+#else
+#define INIT_FSEEK
+#endif
+
+#if SANITIZER_INTERCEPT_MD2
+INTERCEPTOR(void, MD2Init, void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2Init, context);
+ REAL(MD2Init)(context);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD2_CTX_sz);
+}
+
+INTERCEPTOR(void, MD2Update, void *context, const unsigned char *data,
+ unsigned int len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2Update, context, data, len);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD2_CTX_sz);
+ REAL(MD2Update)(context, data, len);
+ if (context)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, MD2_CTX_sz);
+}
+
+INTERCEPTOR(void, MD2Final, unsigned char digest[16], void *context) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2Final, digest, context);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD2_CTX_sz);
+ REAL(MD2Final)(digest, context);
+ if (digest)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, sizeof(unsigned char) * 16);
+}
+
+INTERCEPTOR(char *, MD2End, void *context, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2End, context, buf);
+ if (context)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, MD2_CTX_sz);
+ char *ret = REAL(MD2End)(context, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD2File, const char *filename, char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2File, filename, buf);
+ if (filename)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);
+ char *ret = REAL(MD2File)(filename, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);
+ return ret;
+}
+
+INTERCEPTOR(char *, MD2Data, const unsigned char *data, unsigned int len,
+ char *buf) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, MD2Data, data, len, buf);
+ if (data && len > 0)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len);
+ char *ret = REAL(MD2Data)(data, len, buf);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, MD2_return_length);
+ return ret;
+}
+
+#define INIT_MD2 \
+ COMMON_INTERCEPT_FUNCTION(MD2Init); \
+ COMMON_INTERCEPT_FUNCTION(MD2Update); \
+ COMMON_INTERCEPT_FUNCTION(MD2Final); \
+ COMMON_INTERCEPT_FUNCTION(MD2End); \
+ COMMON_INTERCEPT_FUNCTION(MD2File); \
+ COMMON_INTERCEPT_FUNCTION(MD2Data)
+#else
+#define INIT_MD2
+#endif
+
+#if SANITIZER_INTERCEPT_SHA2
+#define SHA2_INTERCEPTORS(LEN, SHA2_STATE_T) \
+ INTERCEPTOR(void, SHA##LEN##_Init, void *context) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Init, context); \
+ REAL(SHA##LEN##_Init)(context); \
+ if (context) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ } \
+ INTERCEPTOR(void, SHA##LEN##_Update, void *context, \
+ const u8 *data, SIZE_T len) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Update, context, data, len); \
+ if (data && len > 0) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len); \
+ if (context) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ REAL(SHA##LEN##_Update)(context, data, len); \
+ if (context) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ } \
+ INTERCEPTOR(void, SHA##LEN##_Final, u8 digest[LEN/8], \
+ void *context) { \
+ void *ctx; \
+ CHECK_EQ(SHA##LEN##_digest_length, LEN/8); \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Final, digest, context); \
+ if (context) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ REAL(SHA##LEN##_Final)(digest, context); \
+ if (digest) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, digest, \
+ sizeof(digest[0]) * \
+ SHA##LEN##_digest_length); \
+ } \
+ INTERCEPTOR(char *, SHA##LEN##_End, void *context, char *buf) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_End, context, buf); \
+ if (context) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, context, SHA##LEN##_CTX_sz); \
+ char *ret = REAL(SHA##LEN##_End)(context, buf); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
+ return ret; \
+ } \
+ INTERCEPTOR(char *, SHA##LEN##_File, const char *filename, char *buf) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_File, filename, buf); \
+ if (filename) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+ char *ret = REAL(SHA##LEN##_File)(filename, buf); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
+ return ret; \
+ } \
+ INTERCEPTOR(char *, SHA##LEN##_FileChunk, const char *filename, char *buf, \
+ OFF_T offset, OFF_T length) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_FileChunk, filename, buf, offset, \
+ length); \
+ if (filename) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, filename, REAL(strlen)(filename) + 1);\
+ char *ret = REAL(SHA##LEN##_FileChunk)(filename, buf, offset, length); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
+ return ret; \
+ } \
+ INTERCEPTOR(char *, SHA##LEN##_Data, u8 *data, SIZE_T len, char *buf) { \
+ void *ctx; \
+ COMMON_INTERCEPTOR_ENTER(ctx, SHA##LEN##_Data, data, len, buf); \
+ if (data && len > 0) \
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, len); \
+ char *ret = REAL(SHA##LEN##_Data)(data, len, buf); \
+ if (ret) \
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, SHA##LEN##_return_length); \
+ return ret; \
+ }
+
+SHA2_INTERCEPTORS(224, u32);
+SHA2_INTERCEPTORS(256, u32);
+SHA2_INTERCEPTORS(384, u64);
+SHA2_INTERCEPTORS(512, u64);
+
+#define INIT_SHA2_INTECEPTORS(LEN) \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Init); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Update); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Final); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_End); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_File); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_FileChunk); \
+ COMMON_INTERCEPT_FUNCTION(SHA##LEN##_Data)
+
+#define INIT_SHA2 \
+ INIT_SHA2_INTECEPTORS(224); \
+ INIT_SHA2_INTECEPTORS(256); \
+ INIT_SHA2_INTECEPTORS(384); \
+ INIT_SHA2_INTECEPTORS(512)
+#undef SHA2_INTERCEPTORS
+#else
+#define INIT_SHA2
+#endif
+
+#if SANITIZER_INTERCEPT_VIS
+INTERCEPTOR(char *, vis, char *dst, int c, int flag, int nextc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, vis, dst, c, flag, nextc);
+ char *end = REAL(vis)(dst, c, flag, nextc);
+ // dst is NULL terminated and end points to the NULL char
+ if (dst && end)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);
+ return end;
+}
+INTERCEPTOR(char *, nvis, char *dst, SIZE_T dlen, int c, int flag, int nextc) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, nvis, dst, dlen, c, flag, nextc);
+ char *end = REAL(nvis)(dst, dlen, c, flag, nextc);
+ // nvis cannot make sure the dst is NULL terminated
+ if (dst && end)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);
+ return end;
+}
+INTERCEPTOR(int, strvis, char *dst, const char *src, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strvis, dst, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int len = REAL(strvis)(dst, src, flag);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
+ return len;
+}
+INTERCEPTOR(int, stravis, char **dst, const char *src, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, stravis, dst, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int len = REAL(stravis)(dst, src, flag);
+ if (dst) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, sizeof(char *));
+ if (*dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *dst, len + 1);
+ }
+ return len;
+}
+INTERCEPTOR(int, strnvis, char *dst, SIZE_T dlen, const char *src, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnvis, dst, dlen, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int len = REAL(strnvis)(dst, dlen, src, flag);
+ // The interface will be valid even if there is no space for NULL char
+ if (dst && len > 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
+ return len;
+}
+INTERCEPTOR(int, strvisx, char *dst, const char *src, SIZE_T len, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strvisx, dst, src, len, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ int ret = REAL(strvisx)(dst, src, len, flag);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strnvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,
+ int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnvisx, dst, dlen, src, len, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ int ret = REAL(strnvisx)(dst, dlen, src, len, flag);
+ if (dst && ret >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strenvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,
+ int flag, int *cerr_ptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strenvisx, dst, dlen, src, len, flag, cerr_ptr);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ // FIXME: only need to be checked when "flag | VIS_NOLOCALE" doesn't hold
+ // according to the implementation
+ if (cerr_ptr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cerr_ptr, sizeof(int));
+ int ret = REAL(strenvisx)(dst, dlen, src, len, flag, cerr_ptr);
+ if (dst && ret >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ if (cerr_ptr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cerr_ptr, sizeof(int));
+ return ret;
+}
+INTERCEPTOR(char *, svis, char *dst, int c, int flag, int nextc,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, svis, dst, c, flag, nextc, extra);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ char *end = REAL(svis)(dst, c, flag, nextc, extra);
+ if (dst && end)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, end - dst + 1);
+ return end;
+}
+INTERCEPTOR(char *, snvis, char *dst, SIZE_T dlen, int c, int flag, int nextc,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, snvis, dst, dlen, c, flag, nextc, extra);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ char *end = REAL(snvis)(dst, dlen, c, flag, nextc, extra);
+ if (dst && end)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst,
+ Min((SIZE_T)(end - dst + 1), dlen));
+ return end;
+}
+INTERCEPTOR(int, strsvis, char *dst, const char *src, int flag,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsvis, dst, src, flag, extra);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ int len = REAL(strsvis)(dst, src, flag, extra);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
+ return len;
+}
+INTERCEPTOR(int, strsnvis, char *dst, SIZE_T dlen, const char *src, int flag,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsnvis, dst, dlen, src, flag, extra);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ int len = REAL(strsnvis)(dst, dlen, src, flag, extra);
+ // The interface will be valid even if there is no space for NULL char
+ if (dst && len >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, len + 1);
+ return len;
+}
+INTERCEPTOR(int, strsvisx, char *dst, const char *src, SIZE_T len, int flag,
+ const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsvisx, dst, src, len, flag, extra);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ int ret = REAL(strsvisx)(dst, src, len, flag, extra);
+ if (dst)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strsnvisx, char *dst, SIZE_T dlen, const char *src, SIZE_T len,
+ int flag, const char *extra) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsnvisx, dst, dlen, src, len, flag, extra);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ int ret = REAL(strsnvisx)(dst, dlen, src, len, flag, extra);
+ if (dst && ret >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strsenvisx, char *dst, SIZE_T dlen, const char *src,
+ SIZE_T len, int flag, const char *extra, int *cerr_ptr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strsenvisx, dst, dlen, src, len, flag, extra,
+ cerr_ptr);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, len);
+ if (extra)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, extra, REAL(strlen)(extra) + 1);
+ // FIXME: only need to be checked when "flag | VIS_NOLOCALE" doesn't hold
+ // according to the implementation
+ if (cerr_ptr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cerr_ptr, sizeof(int));
+ int ret = REAL(strsenvisx)(dst, dlen, src, len, flag, extra, cerr_ptr);
+ if (dst && ret >= 0)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ if (cerr_ptr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cerr_ptr, sizeof(int));
+ return ret;
+}
+INTERCEPTOR(int, unvis, char *cp, int c, int *astate, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, unvis, cp, c, astate, flag);
+ if (astate)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, astate, sizeof(*astate));
+ int ret = REAL(unvis)(cp, c, astate, flag);
+ if (ret == unvis_valid || ret == unvis_validpush) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cp, sizeof(*cp));
+ }
+ return ret;
+}
+INTERCEPTOR(int, strunvis, char *dst, const char *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strunvis, dst, src);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int ret = REAL(strunvis)(dst, src);
+ if (ret != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strnunvis, char *dst, SIZE_T dlen, const char *src) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnunvis, dst, dlen, src);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int ret = REAL(strnunvis)(dst, dlen, src);
+ if (ret != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strunvisx, char *dst, const char *src, int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strunvisx, dst, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int ret = REAL(strunvisx)(dst, src, flag);
+ if (ret != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+INTERCEPTOR(int, strnunvisx, char *dst, SIZE_T dlen, const char *src,
+ int flag) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, strnunvisx, dst, dlen, src, flag);
+ if (src)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, src, REAL(strlen)(src) + 1);
+ int ret = REAL(strnunvisx)(dst, dlen, src, flag);
+ if (ret != -1)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, dst, ret + 1);
+ return ret;
+}
+#define INIT_VIS \
+ COMMON_INTERCEPT_FUNCTION(vis); \
+ COMMON_INTERCEPT_FUNCTION(nvis); \
+ COMMON_INTERCEPT_FUNCTION(strvis); \
+ COMMON_INTERCEPT_FUNCTION(stravis); \
+ COMMON_INTERCEPT_FUNCTION(strnvis); \
+ COMMON_INTERCEPT_FUNCTION(strvisx); \
+ COMMON_INTERCEPT_FUNCTION(strnvisx); \
+ COMMON_INTERCEPT_FUNCTION(strenvisx); \
+ COMMON_INTERCEPT_FUNCTION(svis); \
+ COMMON_INTERCEPT_FUNCTION(snvis); \
+ COMMON_INTERCEPT_FUNCTION(strsvis); \
+ COMMON_INTERCEPT_FUNCTION(strsnvis); \
+ COMMON_INTERCEPT_FUNCTION(strsvisx); \
+ COMMON_INTERCEPT_FUNCTION(strsnvisx); \
+ COMMON_INTERCEPT_FUNCTION(strsenvisx); \
+ COMMON_INTERCEPT_FUNCTION(unvis); \
+ COMMON_INTERCEPT_FUNCTION(strunvis); \
+ COMMON_INTERCEPT_FUNCTION(strnunvis); \
+ COMMON_INTERCEPT_FUNCTION(strunvisx); \
+ COMMON_INTERCEPT_FUNCTION(strnunvisx)
+#else
+#define INIT_VIS
+#endif
+
+#if SANITIZER_INTERCEPT_CDB
+INTERCEPTOR(struct __sanitizer_cdbr *, cdbr_open, const char *path, int flags) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_open, path, flags);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ struct __sanitizer_cdbr *cdbr = REAL(cdbr_open)(path, flags);
+ if (cdbr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbr, sizeof(*cdbr));
+ return cdbr;
+}
+
+INTERCEPTOR(struct __sanitizer_cdbr *, cdbr_open_mem, void *base, SIZE_T size,
+ int flags, void (*unmap)(void *, void *, SIZE_T), void *cookie) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_open_mem, base, size, flags, unmap,
+ cookie);
+ if (base && size)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, base, size);
+ struct __sanitizer_cdbr *cdbr =
+ REAL(cdbr_open_mem)(base, size, flags, unmap, cookie);
+ if (cdbr)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbr, sizeof(*cdbr));
+ return cdbr;
+}
+
+INTERCEPTOR(u32, cdbr_entries, struct __sanitizer_cdbr *cdbr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_entries, cdbr);
+ if (cdbr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));
+ return REAL(cdbr_entries)(cdbr);
+}
+
+INTERCEPTOR(int, cdbr_get, struct __sanitizer_cdbr *cdbr, u32 index,
+ const void **data, SIZE_T *datalen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_get, cdbr, index, data, datalen);
+ if (cdbr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));
+ int ret = REAL(cdbr_get)(cdbr, index, data, datalen);
+ if (!ret) {
+ if (data)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(*data));
+ if (datalen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datalen, sizeof(*datalen));
+ if (data && datalen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *data, *datalen);
+ }
+ return ret;
+}
+
+INTERCEPTOR(int, cdbr_find, struct __sanitizer_cdbr *cdbr, const void *key,
+ SIZE_T keylen, const void **data, SIZE_T *datalen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_find, cdbr, key, keylen, data, datalen);
+ if (cdbr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));
+ if (key)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, keylen);
+ int ret = REAL(cdbr_find)(cdbr, key, keylen, data, datalen);
+ if (!ret) {
+ if (data)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, data, sizeof(*data));
+ if (datalen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, datalen, sizeof(*datalen));
+ if (data && datalen)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, *data, *datalen);
+ }
+ return ret;
+}
+
+INTERCEPTOR(void, cdbr_close, struct __sanitizer_cdbr *cdbr) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbr_close, cdbr);
+ if (cdbr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbr, sizeof(*cdbr));
+ REAL(cdbr_close)(cdbr);
+}
+
+INTERCEPTOR(struct __sanitizer_cdbw *, cdbw_open) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_open);
+ struct __sanitizer_cdbw *ret = REAL(cdbw_open)();
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, sizeof(*ret));
+ return ret;
+}
+
+INTERCEPTOR(int, cdbw_put, struct __sanitizer_cdbw *cdbw, const void *key,
+ SIZE_T keylen, const void *data, SIZE_T datalen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_put, cdbw, key, keylen, data, datalen);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (data && datalen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, datalen);
+ if (key && keylen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, keylen);
+ int ret = REAL(cdbw_put)(cdbw, key, keylen, data, datalen);
+ if (!ret && cdbw)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));
+ return ret;
+}
+
+INTERCEPTOR(int, cdbw_put_data, struct __sanitizer_cdbw *cdbw, const void *data,
+ SIZE_T datalen, u32 *index) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_put_data, cdbw, data, datalen, index);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (data && datalen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, data, datalen);
+ int ret = REAL(cdbw_put_data)(cdbw, data, datalen, index);
+ if (!ret) {
+ if (index)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, index, sizeof(*index));
+ if (cdbw)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));
+ }
+ return ret;
+}
+
+INTERCEPTOR(int, cdbw_put_key, struct __sanitizer_cdbw *cdbw, const void *key,
+ SIZE_T keylen, u32 index) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_put_key, cdbw, key, keylen, index);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (key && keylen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, key, keylen);
+ int ret = REAL(cdbw_put_key)(cdbw, key, keylen, index);
+ if (!ret && cdbw)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));
+ return ret;
+}
+
+INTERCEPTOR(int, cdbw_output, struct __sanitizer_cdbw *cdbw, int output,
+ const char descr[16], u32 (*seedgen)(void)) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_output, cdbw, output, descr, seedgen);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, output);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (descr)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, descr, internal_strnlen(descr, 16));
+ if (seedgen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (void *)seedgen, sizeof(seedgen));
+ int ret = REAL(cdbw_output)(cdbw, output, descr, seedgen);
+ if (!ret) {
+ if (cdbw)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, cdbw, sizeof(*cdbw));
+ if (output >= 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, output);
+ }
+ return ret;
+}
+
+INTERCEPTOR(void, cdbw_close, struct __sanitizer_cdbw *cdbw) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, cdbw_close, cdbw);
+ if (cdbw)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, cdbw, sizeof(*cdbw));
+ REAL(cdbw_close)(cdbw);
+}
+
+#define INIT_CDB \
+ COMMON_INTERCEPT_FUNCTION(cdbr_open); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_open_mem); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_entries); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_get); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_find); \
+ COMMON_INTERCEPT_FUNCTION(cdbr_close); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_open); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_put); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_put_data); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_put_key); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_output); \
+ COMMON_INTERCEPT_FUNCTION(cdbw_close)
+#else
+#define INIT_CDB
+#endif
+
+#if SANITIZER_INTERCEPT_GETFSENT
+INTERCEPTOR(void *, getfsent) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getfsent);
+ void *ret = REAL(getfsent)();
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
+ return ret;
+}
+
+INTERCEPTOR(void *, getfsspec, const char *spec) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getfsspec, spec);
+ if (spec)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, spec, REAL(strlen)(spec) + 1);
+ void *ret = REAL(getfsspec)(spec);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
+ return ret;
+}
+
+INTERCEPTOR(void *, getfsfile, const char *file) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getfsfile, file);
+ if (file)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, file, REAL(strlen)(file) + 1);
+ void *ret = REAL(getfsfile)(file);
+ if (ret)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ret, struct_fstab_sz);
+ return ret;
+}
+
+#define INIT_GETFSENT \
+ COMMON_INTERCEPT_FUNCTION(getfsent); \
+ COMMON_INTERCEPT_FUNCTION(getfsspec); \
+ COMMON_INTERCEPT_FUNCTION(getfsfile);
+#else
+#define INIT_GETFSENT
+#endif
+
+#if SANITIZER_INTERCEPT_ARC4RANDOM
+INTERCEPTOR(void, arc4random_buf, void *buf, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, arc4random_buf, buf, len);
+ REAL(arc4random_buf)(buf, len);
+ if (buf && len)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, len);
+}
+
+INTERCEPTOR(void, arc4random_addrandom, u8 *dat, int datlen) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, arc4random_addrandom, dat, datlen);
+ if (dat && datlen)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, dat, datlen);
+ REAL(arc4random_addrandom)(dat, datlen);
+}
+
+#define INIT_ARC4RANDOM \
+ COMMON_INTERCEPT_FUNCTION(arc4random_buf); \
+ COMMON_INTERCEPT_FUNCTION(arc4random_addrandom);
+#else
+#define INIT_ARC4RANDOM
+#endif
+
+#if SANITIZER_INTERCEPT_POPEN
+INTERCEPTOR(__sanitizer_FILE *, popen, const char *command, const char *type) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, popen, command, type);
+ if (command)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, command, REAL(strlen)(command) + 1);
+ if (type)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+ __sanitizer_FILE *res = REAL(popen)(command, type);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_POPEN COMMON_INTERCEPT_FUNCTION(popen)
+#else
+#define INIT_POPEN
+#endif
+
+#if SANITIZER_INTERCEPT_POPENVE
+INTERCEPTOR(__sanitizer_FILE *, popenve, const char *path,
+ char *const *argv, char *const *envp, const char *type) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, popenve, path, argv, envp, type);
+ if (path)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, path, REAL(strlen)(path) + 1);
+ if (argv) {
+ for (char *const *pa = argv; ; ++pa) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
+ if (!*pa)
+ break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ }
+ }
+ if (envp) {
+ for (char *const *pa = envp; ; ++pa) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, pa, sizeof(char **));
+ if (!*pa)
+ break;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, *pa, REAL(strlen)(*pa) + 1);
+ }
+ }
+ if (type)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, type, REAL(strlen)(type) + 1);
+ __sanitizer_FILE *res = REAL(popenve)(path, argv, envp, type);
+ COMMON_INTERCEPTOR_FILE_OPEN(ctx, res, nullptr);
+ if (res) unpoison_file(res);
+ return res;
+}
+#define INIT_POPENVE COMMON_INTERCEPT_FUNCTION(popenve)
+#else
+#define INIT_POPENVE
+#endif
+
+#if SANITIZER_INTERCEPT_PCLOSE
+INTERCEPTOR(int, pclose, __sanitizer_FILE *fp) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, pclose, fp);
+ COMMON_INTERCEPTOR_FILE_CLOSE(ctx, fp);
+ const FileMetadata *m = GetInterceptorMetadata(fp);
+ int res = REAL(pclose)(fp);
+ if (m) {
+ COMMON_INTERCEPTOR_INITIALIZE_RANGE(*m->addr, *m->size);
+ DeleteInterceptorMetadata(fp);
+ }
+ return res;
+}
+#define INIT_PCLOSE COMMON_INTERCEPT_FUNCTION(pclose);
+#else
+#define INIT_PCLOSE
+#endif
+
+#if SANITIZER_INTERCEPT_FUNOPEN
+typedef int (*funopen_readfn)(void *cookie, char *buf, int len);
+typedef int (*funopen_writefn)(void *cookie, const char *buf, int len);
+typedef OFF_T (*funopen_seekfn)(void *cookie, OFF_T offset, int whence);
+typedef int (*funopen_closefn)(void *cookie);
+
+struct WrappedFunopenCookie {
+ void *real_cookie;
+ funopen_readfn real_read;
+ funopen_writefn real_write;
+ funopen_seekfn real_seek;
+ funopen_closefn real_close;
+};
+
+static int wrapped_funopen_read(void *cookie, char *buf, int len) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;
+ funopen_readfn real_read = wrapped_cookie->real_read;
+ return real_read(wrapped_cookie->real_cookie, buf, len);
+}
+
+static int wrapped_funopen_write(void *cookie, const char *buf, int len) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;
+ funopen_writefn real_write = wrapped_cookie->real_write;
+ return real_write(wrapped_cookie->real_cookie, buf, len);
+}
+
+static OFF_T wrapped_funopen_seek(void *cookie, OFF_T offset, int whence) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;
+ funopen_seekfn real_seek = wrapped_cookie->real_seek;
+ return real_seek(wrapped_cookie->real_cookie, offset, whence);
+}
+
+static int wrapped_funopen_close(void *cookie) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ WrappedFunopenCookie *wrapped_cookie = (WrappedFunopenCookie *)cookie;
+ funopen_closefn real_close = wrapped_cookie->real_close;
+ int res = real_close(wrapped_cookie->real_cookie);
+ InternalFree(wrapped_cookie);
+ return res;
+}
+
+INTERCEPTOR(__sanitizer_FILE *, funopen, void *cookie, funopen_readfn readfn,
+ funopen_writefn writefn, funopen_seekfn seekfn,
+ funopen_closefn closefn) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, funopen, cookie, readfn, writefn, seekfn,
+ closefn);
+
+ WrappedFunopenCookie *wrapped_cookie =
+ (WrappedFunopenCookie *)InternalAlloc(sizeof(WrappedFunopenCookie));
+ wrapped_cookie->real_cookie = cookie;
+ wrapped_cookie->real_read = readfn;
+ wrapped_cookie->real_write = writefn;
+ wrapped_cookie->real_seek = seekfn;
+ wrapped_cookie->real_close = closefn;
+
+ __sanitizer_FILE *res =
+ REAL(funopen)(wrapped_cookie,
+ readfn ? wrapped_funopen_read : nullptr,
+ writefn ? wrapped_funopen_write : nullptr,
+ seekfn ? wrapped_funopen_seek : nullptr,
+ closefn ? wrapped_funopen_close : nullptr);
+ if (res)
+ unpoison_file(res);
+ return res;
+}
+#define INIT_FUNOPEN COMMON_INTERCEPT_FUNCTION(funopen)
+#else
+#define INIT_FUNOPEN
+#endif
+
+#if SANITIZER_INTERCEPT_FUNOPEN2
+typedef SSIZE_T (*funopen2_readfn)(void *cookie, void *buf, SIZE_T len);
+typedef SSIZE_T (*funopen2_writefn)(void *cookie, const void *buf, SIZE_T len);
+typedef OFF_T (*funopen2_seekfn)(void *cookie, OFF_T offset, int whence);
+typedef int (*funopen2_flushfn)(void *cookie);
+typedef int (*funopen2_closefn)(void *cookie);
+
+struct WrappedFunopen2Cookie {
+ void *real_cookie;
+ funopen2_readfn real_read;
+ funopen2_writefn real_write;
+ funopen2_seekfn real_seek;
+ funopen2_flushfn real_flush;
+ funopen2_closefn real_close;
+};
+
+static SSIZE_T wrapped_funopen2_read(void *cookie, void *buf, SIZE_T len) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_readfn real_read = wrapped_cookie->real_read;
+ return real_read(wrapped_cookie->real_cookie, buf, len);
+}
+
+static SSIZE_T wrapped_funopen2_write(void *cookie, const void *buf,
+ SIZE_T len) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_writefn real_write = wrapped_cookie->real_write;
+ return real_write(wrapped_cookie->real_cookie, buf, len);
+}
+
+static OFF_T wrapped_funopen2_seek(void *cookie, OFF_T offset, int whence) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(3);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_seekfn real_seek = wrapped_cookie->real_seek;
+ return real_seek(wrapped_cookie->real_cookie, offset, whence);
+}
+
+static int wrapped_funopen2_flush(void *cookie) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_flushfn real_flush = wrapped_cookie->real_flush;
+ return real_flush(wrapped_cookie->real_cookie);
+}
+
+static int wrapped_funopen2_close(void *cookie) {
+ COMMON_INTERCEPTOR_UNPOISON_PARAM(1);
+ WrappedFunopen2Cookie *wrapped_cookie = (WrappedFunopen2Cookie *)cookie;
+ funopen2_closefn real_close = wrapped_cookie->real_close;
+ int res = real_close(wrapped_cookie->real_cookie);
+ InternalFree(wrapped_cookie);
+ return res;
+}
+
+INTERCEPTOR(__sanitizer_FILE *, funopen2, void *cookie, funopen2_readfn readfn,
+ funopen2_writefn writefn, funopen2_seekfn seekfn,
+ funopen2_flushfn flushfn, funopen2_closefn closefn) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, funopen2, cookie, readfn, writefn, seekfn,
+ flushfn, closefn);
+
+ WrappedFunopen2Cookie *wrapped_cookie =
+ (WrappedFunopen2Cookie *)InternalAlloc(sizeof(WrappedFunopen2Cookie));
+ wrapped_cookie->real_cookie = cookie;
+ wrapped_cookie->real_read = readfn;
+ wrapped_cookie->real_write = writefn;
+ wrapped_cookie->real_seek = seekfn;
+ wrapped_cookie->real_flush = flushfn;
+ wrapped_cookie->real_close = closefn;
+
+ __sanitizer_FILE *res =
+ REAL(funopen2)(wrapped_cookie,
+ readfn ? wrapped_funopen2_read : nullptr,
+ writefn ? wrapped_funopen2_write : nullptr,
+ seekfn ? wrapped_funopen2_seek : nullptr,
+ flushfn ? wrapped_funopen2_flush : nullptr,
+ closefn ? wrapped_funopen2_close : nullptr);
+ if (res)
+ unpoison_file(res);
+ return res;
+}
+#define INIT_FUNOPEN2 COMMON_INTERCEPT_FUNCTION(funopen2)
+#else
+#define INIT_FUNOPEN2
+#endif
+
+#if SANITIZER_INTERCEPT_FDEVNAME
+INTERCEPTOR(char *, fdevname, int fd) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fdevname, fd);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ char *name = REAL(fdevname)(fd);
+ if (name) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, name, REAL(strlen)(name) + 1);
+ if (fd > 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return name;
+}
+
+INTERCEPTOR(char *, fdevname_r, int fd, char *buf, SIZE_T len) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, fdevname_r, fd, buf, len);
+ COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd);
+ char *name = REAL(fdevname_r)(fd, buf, len);
+ if (name && buf && len > 0) {
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, buf, REAL(strlen)(buf) + 1);
+ if (fd > 0)
+ COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd);
+ }
+ return name;
+}
+
+#define INIT_FDEVNAME \
+ COMMON_INTERCEPT_FUNCTION(fdevname); \
+ COMMON_INTERCEPT_FUNCTION(fdevname_r);
+#else
+#define INIT_FDEVNAME
+#endif
+
+#if SANITIZER_INTERCEPT_GETUSERSHELL
+INTERCEPTOR(char *, getusershell) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, getusershell);
+ char *res = REAL(getusershell)();
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+
+#define INIT_GETUSERSHELL COMMON_INTERCEPT_FUNCTION(getusershell);
+#else
+#define INIT_GETUSERSHELL
+#endif
+
+#if SANITIZER_INTERCEPT_SL_INIT
+INTERCEPTOR(void *, sl_init) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sl_init);
+ void *res = REAL(sl_init)();
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, __sanitizer::struct_StringList_sz);
+ return res;
+}
+
+INTERCEPTOR(int, sl_add, void *sl, char *item) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sl_add, sl, item);
+ if (sl)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
+ if (item)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+ int res = REAL(sl_add)(sl, item);
+ if (!res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
+ return res;
+}
+
+INTERCEPTOR(char *, sl_find, void *sl, const char *item) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sl_find, sl, item);
+ if (sl)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
+ if (item)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, item, REAL(strlen)(item) + 1);
+ char *res = REAL(sl_find)(sl, item);
+ if (res)
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, res, REAL(strlen)(res) + 1);
+ return res;
+}
+
+INTERCEPTOR(void, sl_free, void *sl, int freeall) {
+ void *ctx;
+ COMMON_INTERCEPTOR_ENTER(ctx, sl_free, sl, freeall);
+ if (sl)
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, sl, __sanitizer::struct_StringList_sz);
+ REAL(sl_free)(sl, freeall);
+}
+
+#define INIT_SL_INIT \
+ COMMON_INTERCEPT_FUNCTION(sl_init); \
+ COMMON_INTERCEPT_FUNCTION(sl_add); \
+ COMMON_INTERCEPT_FUNCTION(sl_find); \
+ COMMON_INTERCEPT_FUNCTION(sl_free);
+#else
+#define INIT_SL_INIT
+#endif
+
+static void InitializeCommonInterceptors() {
+ static u64 metadata_mem[sizeof(MetadataHashMap) / sizeof(u64) + 1];
+ interceptor_metadata_map =
+ new ((void *)&metadata_mem) MetadataHashMap(); // NOLINT
+
+ INIT_MMAP;
+ INIT_MMAP64;
+ INIT_TEXTDOMAIN;
+ INIT_STRLEN;
+ INIT_STRNLEN;
+ INIT_STRNDUP;
+ INIT___STRNDUP;
+ INIT_STRCMP;
+ INIT_STRNCMP;
+ INIT_STRCASECMP;
+ INIT_STRNCASECMP;
+ INIT_STRSTR;
+ INIT_STRCASESTR;
+ INIT_STRCHR;
+ INIT_STRCHRNUL;
+ INIT_STRRCHR;
+ INIT_STRSPN;
+ INIT_STRTOK;
+ INIT_STRPBRK;
+ INIT_STRXFRM;
+ INIT___STRXFRM_L;
+ INIT_MEMSET;
+ INIT_MEMMOVE;
+ INIT_MEMCPY;
+ INIT_MEMCHR;
+ INIT_MEMCMP;
+ INIT_BCMP;
+ INIT_MEMRCHR;
+ INIT_MEMMEM;
+ INIT_READ;
+ INIT_FREAD;
+ INIT_PREAD;
+ INIT_PREAD64;
+ INIT_READV;
+ INIT_PREADV;
+ INIT_PREADV64;
+ INIT_WRITE;
+ INIT_FWRITE;
+ INIT_PWRITE;
+ INIT_PWRITE64;
+ INIT_WRITEV;
+ INIT_PWRITEV;
+ INIT_PWRITEV64;
+ INIT_FGETS;
+ INIT_FPUTS;
+ INIT_PUTS;
+ INIT_PRCTL;
+ INIT_LOCALTIME_AND_FRIENDS;
+ INIT_STRPTIME;
+ INIT_SCANF;
+ INIT_ISOC99_SCANF;
+ INIT_PRINTF;
+ INIT_PRINTF_L;
+ INIT_ISOC99_PRINTF;
+ INIT_FREXP;
+ INIT_FREXPF_FREXPL;
+ INIT_GETPWNAM_AND_FRIENDS;
+ INIT_GETPWNAM_R_AND_FRIENDS;
+ INIT_GETPWENT;
+ INIT_FGETPWENT;
+ INIT_GETPWENT_R;
+ INIT_FGETPWENT_R;
+ INIT_FGETGRENT_R;
+ INIT_SETPWENT;
+ INIT_CLOCK_GETTIME;
+ INIT_GETITIMER;
+ INIT_TIME;
+ INIT_GLOB;
+ INIT_GLOB64;
+ INIT_WAIT;
+ INIT_WAIT4;
+ INIT_INET;
+ INIT_PTHREAD_GETSCHEDPARAM;
+ INIT_GETADDRINFO;
+ INIT_GETNAMEINFO;
+ INIT_GETSOCKNAME;
+ INIT_GETHOSTBYNAME;
+ INIT_GETHOSTBYNAME2;
+ INIT_GETHOSTBYNAME_R;
+ INIT_GETHOSTBYNAME2_R;
+ INIT_GETHOSTBYADDR_R;
+ INIT_GETHOSTENT_R;
+ INIT_GETSOCKOPT;
+ INIT_ACCEPT;
+ INIT_ACCEPT4;
+ INIT_PACCEPT;
+ INIT_MODF;
+ INIT_RECVMSG;
+ INIT_SENDMSG;
+ INIT_RECVMMSG;
+ INIT_SENDMMSG;
+ INIT_GETPEERNAME;
+ INIT_IOCTL;
+ INIT_INET_ATON;
+ INIT_SYSINFO;
+ INIT_READDIR;
+ INIT_READDIR64;
+ INIT_PTRACE;
+ INIT_SETLOCALE;
+ INIT_GETCWD;
+ INIT_GET_CURRENT_DIR_NAME;
+ INIT_STRTOIMAX;
+ INIT_MBSTOWCS;
+ INIT_MBSNRTOWCS;
+ INIT_WCSTOMBS;
+ INIT_WCSNRTOMBS;
+ INIT_WCRTOMB;
+ INIT_WCTOMB;
+ INIT_TCGETATTR;
+ INIT_REALPATH;
+ INIT_CANONICALIZE_FILE_NAME;
+ INIT_CONFSTR;
+ INIT_SCHED_GETAFFINITY;
+ INIT_SCHED_GETPARAM;
+ INIT_STRERROR;
+ INIT_STRERROR_R;
+ INIT_XPG_STRERROR_R;
+ INIT_SCANDIR;
+ INIT_SCANDIR64;
+ INIT_GETGROUPS;
+ INIT_POLL;
+ INIT_PPOLL;
+ INIT_WORDEXP;
+ INIT_SIGWAIT;
+ INIT_SIGWAITINFO;
+ INIT_SIGTIMEDWAIT;
+ INIT_SIGSETOPS;
+ INIT_SIGPENDING;
+ INIT_SIGPROCMASK;
+ INIT_PTHREAD_SIGMASK;
+ INIT_BACKTRACE;
+ INIT__EXIT;
+ INIT_PTHREAD_MUTEX_LOCK;
+ INIT_PTHREAD_MUTEX_UNLOCK;
+ INIT___PTHREAD_MUTEX_LOCK;
+ INIT___PTHREAD_MUTEX_UNLOCK;
+ INIT___LIBC_MUTEX_LOCK;
+ INIT___LIBC_MUTEX_UNLOCK;
+ INIT___LIBC_THR_SETCANCELSTATE;
+ INIT_GETMNTENT;
+ INIT_GETMNTENT_R;
+ INIT_STATFS;
+ INIT_STATFS64;
+ INIT_STATVFS;
+ INIT_STATVFS64;
+ INIT_INITGROUPS;
+ INIT_ETHER_NTOA_ATON;
+ INIT_ETHER_HOST;
+ INIT_ETHER_R;
+ INIT_SHMCTL;
+ INIT_RANDOM_R;
+ INIT_PTHREAD_ATTR_GET;
+ INIT_PTHREAD_ATTR_GET_SCHED;
+ INIT_PTHREAD_ATTR_GETINHERITSCHED;
+ INIT_PTHREAD_ATTR_GETAFFINITY_NP;
+ INIT_PTHREAD_MUTEXATTR_GETPSHARED;
+ INIT_PTHREAD_MUTEXATTR_GETTYPE;
+ INIT_PTHREAD_MUTEXATTR_GETPROTOCOL;
+ INIT_PTHREAD_MUTEXATTR_GETPRIOCEILING;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST;
+ INIT_PTHREAD_MUTEXATTR_GETROBUST_NP;
+ INIT_PTHREAD_RWLOCKATTR_GETPSHARED;
+ INIT_PTHREAD_RWLOCKATTR_GETKIND_NP;
+ INIT_PTHREAD_CONDATTR_GETPSHARED;
+ INIT_PTHREAD_CONDATTR_GETCLOCK;
+ INIT_PTHREAD_BARRIERATTR_GETPSHARED;
+ INIT_TMPNAM;
+ INIT_TMPNAM_R;
+ INIT_TTYNAME;
+ INIT_TTYNAME_R;
+ INIT_TEMPNAM;
+ INIT_PTHREAD_SETNAME_NP;
+ INIT_PTHREAD_GETNAME_NP;
+ INIT_SINCOS;
+ INIT_REMQUO;
+ INIT_REMQUOL;
+ INIT_LGAMMA;
+ INIT_LGAMMAL;
+ INIT_LGAMMA_R;
+ INIT_LGAMMAL_R;
+ INIT_DRAND48_R;
+ INIT_RAND_R;
+ INIT_GETLINE;
+ INIT_ICONV;
+ INIT_TIMES;
+ INIT_TLS_GET_ADDR;
+ INIT_LISTXATTR;
+ INIT_GETXATTR;
+ INIT_GETRESID;
+ INIT_GETIFADDRS;
+ INIT_IF_INDEXTONAME;
+ INIT_CAPGET;
+ INIT_AEABI_MEM;
+ INIT___BZERO;
+ INIT_BZERO;
+ INIT_FTIME;
+ INIT_XDR;
+ INIT_TSEARCH;
+ INIT_LIBIO_INTERNALS;
+ INIT_FOPEN;
+ INIT_FOPEN64;
+ INIT_OPEN_MEMSTREAM;
+ INIT_OBSTACK;
+ INIT_FFLUSH;
+ INIT_FCLOSE;
+ INIT_DLOPEN_DLCLOSE;
+ INIT_GETPASS;
+ INIT_TIMERFD;
+ INIT_MLOCKX;
+ INIT_FOPENCOOKIE;
+ INIT_SEM;
+ INIT_PTHREAD_SETCANCEL;
+ INIT_MINCORE;
+ INIT_PROCESS_VM_READV;
+ INIT_CTERMID;
+ INIT_CTERMID_R;
+ INIT_RECV_RECVFROM;
+ INIT_SEND_SENDTO;
+ INIT_STAT;
+ INIT_EVENTFD_READ_WRITE;
+ INIT_LSTAT;
+ INIT___XSTAT;
+ INIT___XSTAT64;
+ INIT___LXSTAT;
+ INIT___LXSTAT64;
+ // FIXME: add other *stat interceptors.
+ INIT_UTMP;
+ INIT_UTMPX;
+ INIT_GETLOADAVG;
+ INIT_WCSLEN;
+ INIT_WCSCAT;
+ INIT_WCSDUP;
+ INIT_WCSXFRM;
+ INIT___WCSXFRM_L;
+ INIT_ACCT;
+ INIT_USER_FROM_UID;
+ INIT_UID_FROM_USER;
+ INIT_GROUP_FROM_GID;
+ INIT_GID_FROM_GROUP;
+ INIT_ACCESS;
+ INIT_FACCESSAT;
+ INIT_GETGROUPLIST;
+ INIT_GETGROUPMEMBERSHIP;
+ INIT_READLINK;
+ INIT_READLINKAT;
+ INIT_NAME_TO_HANDLE_AT;
+ INIT_OPEN_BY_HANDLE_AT;
+ INIT_STRLCPY;
+ INIT_DEVNAME;
+ INIT_DEVNAME_R;
+ INIT_FGETLN;
+ INIT_STRMODE;
+ INIT_TTYENT;
+ INIT_PROTOENT;
+ INIT_NETENT;
+ INIT_GETMNTINFO;
+ INIT_MI_VECTOR_HASH;
+ INIT_SETVBUF;
+ INIT_GETVFSSTAT;
+ INIT_REGEX;
+ INIT_REGEXSUB;
+ INIT_FTS;
+ INIT_SYSCTL;
+ INIT_ASYSCTL;
+ INIT_SYSCTLGETMIBINFO;
+ INIT_NL_LANGINFO;
+ INIT_MODCTL;
+ INIT_STRTONUM;
+ INIT_FPARSELN;
+ INIT_STATVFS1;
+ INIT_STRTOI;
+ INIT_CAPSICUM;
+ INIT_SHA1;
+ INIT_MD4;
+ INIT_RMD160;
+ INIT_MD5;
+ INIT_FSEEK;
+ INIT_MD2;
+ INIT_SHA2;
+ INIT_VIS;
+ INIT_CDB;
+ INIT_GETFSENT;
+ INIT_ARC4RANDOM;
+ INIT_POPEN;
+ INIT_POPENVE;
+ INIT_PCLOSE;
+ INIT_FUNOPEN;
+ INIT_FUNOPEN2;
+ INIT_FDEVNAME;
+ INIT_GETUSERSHELL;
+ INIT_SL_INIT;
+
+ INIT___PRINTF_CHK;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_format.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_format.inc (revision 351984)
@@ -0,0 +1,562 @@
+//===-- sanitizer_common_interceptors_format.inc ----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Scanf/printf implementation for use in *Sanitizer interceptors.
+// Follows http://pubs.opengroup.org/onlinepubs/9699919799/functions/fscanf.html
+// and http://pubs.opengroup.org/onlinepubs/9699919799/functions/fprintf.html
+// with a few common GNU extensions.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdarg.h>
+
+static const char *parse_number(const char *p, int *out) {
+ *out = internal_atoll(p);
+ while (*p >= '0' && *p <= '9')
+ ++p;
+ return p;
+}
+
+static const char *maybe_parse_param_index(const char *p, int *out) {
+ // n$
+ if (*p >= '0' && *p <= '9') {
+ int number;
+ const char *q = parse_number(p, &number);
+ CHECK(q);
+ if (*q == '$') {
+ *out = number;
+ p = q + 1;
+ }
+ }
+
+ // Otherwise, do not change p. This will be re-parsed later as the field
+ // width.
+ return p;
+}
+
+static bool char_is_one_of(char c, const char *s) {
+ return !!internal_strchr(s, c);
+}
+
+static const char *maybe_parse_length_modifier(const char *p, char ll[2]) {
+ if (char_is_one_of(*p, "jztLq")) {
+ ll[0] = *p;
+ ++p;
+ } else if (*p == 'h') {
+ ll[0] = 'h';
+ ++p;
+ if (*p == 'h') {
+ ll[1] = 'h';
+ ++p;
+ }
+ } else if (*p == 'l') {
+ ll[0] = 'l';
+ ++p;
+ if (*p == 'l') {
+ ll[1] = 'l';
+ ++p;
+ }
+ }
+ return p;
+}
+
+// Returns true if the character is an integer conversion specifier.
+static bool format_is_integer_conv(char c) {
+ return char_is_one_of(c, "diouxXn");
+}
+
+// Returns true if the character is an floating point conversion specifier.
+static bool format_is_float_conv(char c) {
+ return char_is_one_of(c, "aAeEfFgG");
+}
+
+// Returns string output character size for string-like conversions,
+// or 0 if the conversion is invalid.
+static int format_get_char_size(char convSpecifier,
+ const char lengthModifier[2]) {
+ if (char_is_one_of(convSpecifier, "CS")) {
+ return sizeof(wchar_t);
+ }
+
+ if (char_is_one_of(convSpecifier, "cs[")) {
+ if (lengthModifier[0] == 'l' && lengthModifier[1] == '\0')
+ return sizeof(wchar_t);
+ else if (lengthModifier[0] == '\0')
+ return sizeof(char);
+ }
+
+ return 0;
+}
+
+enum FormatStoreSize {
+ // Store size not known in advance; can be calculated as wcslen() of the
+ // destination buffer.
+ FSS_WCSLEN = -2,
+ // Store size not known in advance; can be calculated as strlen() of the
+ // destination buffer.
+ FSS_STRLEN = -1,
+ // Invalid conversion specifier.
+ FSS_INVALID = 0
+};
+
+// Returns the memory size of a format directive (if >0), or a value of
+// FormatStoreSize.
+static int format_get_value_size(char convSpecifier,
+ const char lengthModifier[2],
+ bool promote_float) {
+ if (format_is_integer_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'h':
+ return lengthModifier[1] == 'h' ? sizeof(char) : sizeof(short);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long long) : sizeof(long);
+ case 'q':
+ return sizeof(long long);
+ case 'L':
+ return sizeof(long long);
+ case 'j':
+ return sizeof(INTMAX_T);
+ case 'z':
+ return sizeof(SIZE_T);
+ case 't':
+ return sizeof(PTRDIFF_T);
+ case 0:
+ return sizeof(int);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (format_is_float_conv(convSpecifier)) {
+ switch (lengthModifier[0]) {
+ case 'L':
+ case 'q':
+ return sizeof(long double);
+ case 'l':
+ return lengthModifier[1] == 'l' ? sizeof(long double)
+ : sizeof(double);
+ case 0:
+ // Printf promotes floats to doubles but scanf does not
+ return promote_float ? sizeof(double) : sizeof(float);
+ default:
+ return FSS_INVALID;
+ }
+ }
+
+ if (convSpecifier == 'p') {
+ if (lengthModifier[0] != 0)
+ return FSS_INVALID;
+ return sizeof(void *);
+ }
+
+ return FSS_INVALID;
+}
+
+struct ScanfDirective {
+ int argIdx; // argument index, or -1 if not specified ("%n$")
+ int fieldWidth;
+ const char *begin;
+ const char *end;
+ bool suppressed; // suppress assignment ("*")
+ bool allocate; // allocate space ("m")
+ char lengthModifier[2];
+ char convSpecifier;
+ bool maybeGnuMalloc;
+};
+
+// Parse scanf format string. If a valid directive in encountered, it is
+// returned in dir. This function returns the pointer to the first
+// unprocessed character, or 0 in case of error.
+// In case of the end-of-string, a pointer to the closing \0 is returned.
+static const char *scanf_parse_next(const char *p, bool allowGnuMalloc,
+ ScanfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return nullptr;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->argIdx);
+ CHECK(p);
+ // *
+ if (*p == '*') {
+ dir->suppressed = true;
+ ++p;
+ }
+ // Field width
+ if (*p >= '0' && *p <= '9') {
+ p = parse_number(p, &dir->fieldWidth);
+ CHECK(p);
+ if (dir->fieldWidth <= 0) // Width if at all must be non-zero
+ return nullptr;
+ }
+ // m
+ if (*p == 'm') {
+ dir->allocate = true;
+ ++p;
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ // Consume %[...] expression.
+ if (dir->convSpecifier == '[') {
+ if (*p == '^')
+ ++p;
+ if (*p == ']')
+ ++p;
+ while (*p && *p != ']')
+ ++p;
+ if (*p == 0)
+ return nullptr; // unexpected end of string
+ // Consume the closing ']'.
+ ++p;
+ }
+ // This is unfortunately ambiguous between old GNU extension
+ // of %as, %aS and %a[...] and newer POSIX %a followed by
+ // letters s, S or [.
+ if (allowGnuMalloc && dir->convSpecifier == 'a' &&
+ !dir->lengthModifier[0]) {
+ if (*p == 's' || *p == 'S') {
+ dir->maybeGnuMalloc = true;
+ ++p;
+ } else if (*p == '[') {
+ // Watch for %a[h-j%d], if % appears in the
+ // [...] range, then we need to give up, we don't know
+ // if scanf will parse it as POSIX %a [h-j %d ] or
+ // GNU allocation of string with range dh-j plus %.
+ const char *q = p + 1;
+ if (*q == '^')
+ ++q;
+ if (*q == ']')
+ ++q;
+ while (*q && *q != ']' && *q != '%')
+ ++q;
+ if (*q == 0 || *q == '%')
+ return nullptr;
+ p = q + 1; // Consume the closing ']'.
+ dir->maybeGnuMalloc = true;
+ }
+ }
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int scanf_get_value_size(ScanfDirective *dir) {
+ if (dir->allocate) {
+ if (!char_is_one_of(dir->convSpecifier, "cCsS["))
+ return FSS_INVALID;
+ return sizeof(char *);
+ }
+
+ if (dir->maybeGnuMalloc) {
+ if (dir->convSpecifier != 'a' || dir->lengthModifier[0])
+ return FSS_INVALID;
+ // This is ambiguous, so check the smaller size of char * (if it is
+ // a GNU extension of %as, %aS or %a[...]) and float (if it is
+ // POSIX %a followed by s, S or [ letters).
+ return sizeof(char *) < sizeof(float) ? sizeof(char *) : sizeof(float);
+ }
+
+ if (char_is_one_of(dir->convSpecifier, "cCsS[")) {
+ bool needsTerminator = char_is_one_of(dir->convSpecifier, "sS[");
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (dir->fieldWidth == 0) {
+ if (!needsTerminator)
+ return charSize;
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return (dir->fieldWidth + needsTerminator) * charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, false);
+}
+
+// Common part of *scanf interceptors.
+// Process format string and va_list, and report all store ranges.
+// Stops when "consuming" n_inputs input items.
+static void scanf_common(void *ctx, int n_inputs, bool allowGnuMalloc,
+ const char *format, va_list aq) {
+ CHECK_GT(n_inputs, 0);
+ const char *p = format;
+
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ while (*p) {
+ ScanfDirective dir;
+ p = scanf_parse_next(p, allowGnuMalloc, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.suppressed)
+ continue;
+ int size = scanf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ Report("%s: WARNING: unexpected format specifier in scanf interceptor: ",
+ SanitizerToolName, "%.*s\n", dir.end - dir.begin, dir.begin);
+ break;
+ }
+ void *argp = va_arg(aq, void *);
+ if (dir.convSpecifier != 'n')
+ --n_inputs;
+ if (n_inputs < 0)
+ break;
+ if (size == FSS_STRLEN) {
+ size = internal_strlen((const char *)argp) + 1;
+ } else if (size == FSS_WCSLEN) {
+ // FIXME: actually use wcslen() to calculate it.
+ size = 0;
+ }
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ }
+}
+
+#if SANITIZER_INTERCEPT_PRINTF
+
+struct PrintfDirective {
+ int fieldWidth;
+ int fieldPrecision;
+ int argIdx; // width argument index, or -1 if not specified ("%*n$")
+ int precisionIdx; // precision argument index, or -1 if not specified (".*n$")
+ const char *begin;
+ const char *end;
+ bool starredWidth;
+ bool starredPrecision;
+ char lengthModifier[2];
+ char convSpecifier;
+};
+
+static const char *maybe_parse_number(const char *p, int *out) {
+ if (*p >= '0' && *p <= '9')
+ p = parse_number(p, out);
+ return p;
+}
+
+static const char *maybe_parse_number_or_star(const char *p, int *out,
+ bool *star) {
+ if (*p == '*') {
+ *star = true;
+ ++p;
+ } else {
+ *star = false;
+ p = maybe_parse_number(p, out);
+ }
+ return p;
+}
+
+// Parse printf format string. Same as scanf_parse_next.
+static const char *printf_parse_next(const char *p, PrintfDirective *dir) {
+ internal_memset(dir, 0, sizeof(*dir));
+ dir->argIdx = -1;
+ dir->precisionIdx = -1;
+
+ while (*p) {
+ if (*p != '%') {
+ ++p;
+ continue;
+ }
+ dir->begin = p;
+ ++p;
+ // %%
+ if (*p == '%') {
+ ++p;
+ continue;
+ }
+ if (*p == '\0') {
+ return nullptr;
+ }
+ // %n$
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ // Flags
+ while (char_is_one_of(*p, "'-+ #0")) {
+ ++p;
+ }
+ // Field width
+ p = maybe_parse_number_or_star(p, &dir->fieldWidth,
+ &dir->starredWidth);
+ if (!p)
+ return nullptr;
+ // Precision
+ if (*p == '.') {
+ ++p;
+ // Actual precision is optional (surprise!)
+ p = maybe_parse_number_or_star(p, &dir->fieldPrecision,
+ &dir->starredPrecision);
+ if (!p)
+ return nullptr;
+ // m$
+ if (dir->starredPrecision) {
+ p = maybe_parse_param_index(p, &dir->precisionIdx);
+ CHECK(p);
+ }
+ }
+ // Length modifier.
+ p = maybe_parse_length_modifier(p, dir->lengthModifier);
+ // Conversion specifier.
+ dir->convSpecifier = *p++;
+ dir->end = p;
+ break;
+ }
+ return p;
+}
+
+static int printf_get_value_size(PrintfDirective *dir) {
+ if (char_is_one_of(dir->convSpecifier, "cCsS")) {
+ unsigned charSize =
+ format_get_char_size(dir->convSpecifier, dir->lengthModifier);
+ if (charSize == 0)
+ return FSS_INVALID;
+ if (char_is_one_of(dir->convSpecifier, "sS")) {
+ return (charSize == sizeof(char)) ? FSS_STRLEN : FSS_WCSLEN;
+ }
+ return charSize;
+ }
+
+ return format_get_value_size(dir->convSpecifier, dir->lengthModifier, true);
+}
+
+#define SKIP_SCALAR_ARG(aq, convSpecifier, size) \
+ do { \
+ if (format_is_float_conv(convSpecifier)) { \
+ switch (size) { \
+ case 8: \
+ va_arg(*aq, double); \
+ break; \
+ case 12: \
+ va_arg(*aq, long double); \
+ break; \
+ case 16: \
+ va_arg(*aq, long double); \
+ break; \
+ default: \
+ Report("WARNING: unexpected floating-point arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } else { \
+ switch (size) { \
+ case 1: \
+ case 2: \
+ case 4: \
+ va_arg(*aq, u32); \
+ break; \
+ case 8: \
+ va_arg(*aq, u64); \
+ break; \
+ default: \
+ Report("WARNING: unexpected arg size" \
+ " in printf interceptor: %d\n", size); \
+ return; \
+ } \
+ } \
+ } while (0)
+
+// Common part of *printf interceptors.
+// Process format string and va_list, and report all load ranges.
+static void printf_common(void *ctx, const char *format, va_list aq) {
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, format, internal_strlen(format) + 1);
+
+ const char *p = format;
+
+ while (*p) {
+ PrintfDirective dir;
+ p = printf_parse_next(p, &dir);
+ if (!p)
+ break;
+ if (dir.convSpecifier == 0) {
+ // This can only happen at the end of the format string.
+ CHECK_EQ(*p, 0);
+ break;
+ }
+ // Here the directive is valid. Do what it says.
+ if (dir.argIdx != -1 || dir.precisionIdx != -1) {
+ // Unsupported.
+ break;
+ }
+ if (dir.starredWidth) {
+ // Dynamic width
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ if (dir.starredPrecision) {
+ // Dynamic precision
+ SKIP_SCALAR_ARG(&aq, 'd', sizeof(int));
+ }
+ // %m does not require an argument: strlen(errno).
+ if (dir.convSpecifier == 'm')
+ continue;
+ int size = printf_get_value_size(&dir);
+ if (size == FSS_INVALID) {
+ static int ReportedOnce;
+ if (!ReportedOnce++)
+ Report(
+ "%s: WARNING: unexpected format specifier in printf "
+ "interceptor: %.*s (reported once per process)\n",
+ SanitizerToolName, dir.end - dir.begin, dir.begin);
+ break;
+ }
+ if (dir.convSpecifier == 'n') {
+ void *argp = va_arg(aq, void *);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, argp, size);
+ continue;
+ } else if (size == FSS_STRLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ if (dir.starredPrecision) {
+ // FIXME: properly support starred precision for strings.
+ size = 0;
+ } else if (dir.fieldPrecision > 0) {
+ // Won't read more than "precision" symbols.
+ size = internal_strnlen((const char *)argp, dir.fieldPrecision);
+ if (size < dir.fieldPrecision) size++;
+ } else {
+ // Whole string will be accessed.
+ size = internal_strlen((const char *)argp) + 1;
+ }
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else if (size == FSS_WCSLEN) {
+ if (void *argp = va_arg(aq, void *)) {
+ // FIXME: Properly support wide-character strings (via wcsrtombs).
+ size = 0;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, argp, size);
+ }
+ } else {
+ // Skip non-pointer args
+ SKIP_SCALAR_ARG(&aq, dir.convSpecifier, size);
+ }
+ }
+}
+
+#endif // SANITIZER_INTERCEPT_PRINTF
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_format.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc (revision 351984)
@@ -0,0 +1,609 @@
+//===-- sanitizer_common_interceptors_ioctl.inc -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Ioctl handling in common sanitizer interceptors.
+//===----------------------------------------------------------------------===//
+
+#if !SANITIZER_NETBSD
+
+#include "sanitizer_flags.h"
+
+struct ioctl_desc {
+ unsigned req;
+ // FIXME: support read+write arguments. Currently READWRITE and WRITE do the
+ // same thing.
+ // XXX: The declarations below may use WRITE instead of READWRITE, unless
+ // explicitly noted.
+ enum {
+ NONE,
+ READ,
+ WRITE,
+ READWRITE,
+ CUSTOM
+ } type : 3;
+ unsigned size : 29;
+ const char* name;
+};
+
+const unsigned ioctl_table_max = 500;
+static ioctl_desc ioctl_table[ioctl_table_max];
+static unsigned ioctl_table_size = 0;
+
+// This can not be declared as a global, because references to struct_*_sz
+// require a global initializer. And this table must be available before global
+// initializers are run.
+static void ioctl_table_fill() {
+#define _(rq, tp, sz) \
+ if (IOCTL_##rq != IOCTL_NOT_PRESENT) { \
+ CHECK(ioctl_table_size < ioctl_table_max); \
+ ioctl_table[ioctl_table_size].req = IOCTL_##rq; \
+ ioctl_table[ioctl_table_size].type = ioctl_desc::tp; \
+ ioctl_table[ioctl_table_size].size = sz; \
+ ioctl_table[ioctl_table_size].name = #rq; \
+ ++ioctl_table_size; \
+ }
+
+ _(FIOASYNC, READ, sizeof(int));
+ _(FIOCLEX, NONE, 0);
+ _(FIOGETOWN, WRITE, sizeof(int));
+ _(FIONBIO, READ, sizeof(int));
+ _(FIONCLEX, NONE, 0);
+ _(FIOSETOWN, READ, sizeof(int));
+ _(SIOCATMARK, WRITE, sizeof(int));
+ _(SIOCGIFCONF, CUSTOM, 0);
+ _(SIOCGPGRP, WRITE, sizeof(int));
+ _(SIOCSPGRP, READ, sizeof(int));
+#if !SANITIZER_SOLARIS
+ _(TIOCCONS, NONE, 0);
+#endif
+ _(TIOCEXCL, NONE, 0);
+ _(TIOCGETD, WRITE, sizeof(int));
+ _(TIOCGPGRP, WRITE, pid_t_sz);
+ _(TIOCGWINSZ, WRITE, struct_winsize_sz);
+ _(TIOCMBIC, READ, sizeof(int));
+ _(TIOCMBIS, READ, sizeof(int));
+ _(TIOCMGET, WRITE, sizeof(int));
+ _(TIOCMSET, READ, sizeof(int));
+ _(TIOCNOTTY, NONE, 0);
+ _(TIOCNXCL, NONE, 0);
+ _(TIOCOUTQ, WRITE, sizeof(int));
+ _(TIOCPKT, READ, sizeof(int));
+ _(TIOCSCTTY, NONE, 0);
+ _(TIOCSETD, READ, sizeof(int));
+ _(TIOCSPGRP, READ, pid_t_sz);
+ _(TIOCSTI, READ, sizeof(char));
+ _(TIOCSWINSZ, READ, struct_winsize_sz);
+
+#if !SANITIZER_IOS
+ _(SIOCADDMULTI, READ, struct_ifreq_sz);
+ _(SIOCDELMULTI, READ, struct_ifreq_sz);
+ _(SIOCGIFADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFBRDADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFDSTADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFFLAGS, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMETRIC, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMTU, WRITE, struct_ifreq_sz);
+ _(SIOCGIFNETMASK, WRITE, struct_ifreq_sz);
+ _(SIOCSIFADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFBRDADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFDSTADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFFLAGS, READ, struct_ifreq_sz);
+ _(SIOCSIFMETRIC, READ, struct_ifreq_sz);
+ _(SIOCSIFMTU, READ, struct_ifreq_sz);
+ _(SIOCSIFNETMASK, READ, struct_ifreq_sz);
+#endif
+
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID)
+ _(SIOCGETSGCNT, WRITE, struct_sioc_sg_req_sz);
+ _(SIOCGETVIFCNT, WRITE, struct_sioc_vif_req_sz);
+#endif
+
+#if SANITIZER_LINUX
+ // Conflicting request ids.
+ // _(CDROMAUDIOBUFSIZ, NONE, 0);
+ // _(SNDCTL_TMR_CONTINUE, NONE, 0);
+ // _(SNDCTL_TMR_START, NONE, 0);
+ // _(SNDCTL_TMR_STOP, NONE, 0);
+ // _(SOUND_MIXER_READ_LOUD, WRITE, sizeof(int)); // same as ...READ_ENHANCE
+ // _(SOUND_MIXER_READ_MUTE, WRITE, sizeof(int)); // same as ...READ_ENHANCE
+ // _(SOUND_MIXER_WRITE_LOUD, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
+ // _(SOUND_MIXER_WRITE_MUTE, WRITE, sizeof(int)); // same as ...WRITE_ENHANCE
+ _(BLKFLSBUF, NONE, 0);
+ _(BLKGETSIZE, WRITE, sizeof(uptr));
+ _(BLKRAGET, WRITE, sizeof(int));
+ _(BLKRASET, NONE, 0);
+ _(BLKROGET, WRITE, sizeof(int));
+ _(BLKROSET, READ, sizeof(int));
+ _(BLKRRPART, NONE, 0);
+ _(CDROMEJECT, NONE, 0);
+ _(CDROMEJECT_SW, NONE, 0);
+ _(CDROMMULTISESSION, WRITE, struct_cdrom_multisession_sz);
+ _(CDROMPAUSE, NONE, 0);
+ _(CDROMPLAYMSF, READ, struct_cdrom_msf_sz);
+ _(CDROMPLAYTRKIND, READ, struct_cdrom_ti_sz);
+ _(CDROMREADAUDIO, READ, struct_cdrom_read_audio_sz);
+ _(CDROMREADCOOKED, READ, struct_cdrom_msf_sz);
+ _(CDROMREADMODE1, READ, struct_cdrom_msf_sz);
+ _(CDROMREADMODE2, READ, struct_cdrom_msf_sz);
+ _(CDROMREADRAW, READ, struct_cdrom_msf_sz);
+ _(CDROMREADTOCENTRY, WRITE, struct_cdrom_tocentry_sz);
+ _(CDROMREADTOCHDR, WRITE, struct_cdrom_tochdr_sz);
+ _(CDROMRESET, NONE, 0);
+ _(CDROMRESUME, NONE, 0);
+ _(CDROMSEEK, READ, struct_cdrom_msf_sz);
+ _(CDROMSTART, NONE, 0);
+ _(CDROMSTOP, NONE, 0);
+ _(CDROMSUBCHNL, WRITE, struct_cdrom_subchnl_sz);
+ _(CDROMVOLCTRL, READ, struct_cdrom_volctrl_sz);
+ _(CDROMVOLREAD, WRITE, struct_cdrom_volctrl_sz);
+ _(CDROM_GET_UPC, WRITE, 8);
+ _(EVIOCGABS, WRITE, struct_input_absinfo_sz); // fixup
+ _(EVIOCGBIT, WRITE, struct_input_id_sz); // fixup
+ _(EVIOCGEFFECTS, WRITE, sizeof(int));
+ _(EVIOCGID, WRITE, struct_input_id_sz);
+ _(EVIOCGKEY, WRITE, 0);
+ _(EVIOCGKEYCODE, WRITE, sizeof(int) * 2);
+ _(EVIOCGLED, WRITE, 0);
+ _(EVIOCGNAME, WRITE, 0);
+ _(EVIOCGPHYS, WRITE, 0);
+ _(EVIOCGRAB, READ, sizeof(int));
+ _(EVIOCGREP, WRITE, sizeof(int) * 2);
+ _(EVIOCGSND, WRITE, 0);
+ _(EVIOCGSW, WRITE, 0);
+ _(EVIOCGUNIQ, WRITE, 0);
+ _(EVIOCGVERSION, WRITE, sizeof(int));
+ _(EVIOCRMFF, READ, sizeof(int));
+ _(EVIOCSABS, READ, struct_input_absinfo_sz); // fixup
+ _(EVIOCSFF, READ, struct_ff_effect_sz);
+ _(EVIOCSKEYCODE, READ, sizeof(int) * 2);
+ _(EVIOCSREP, READ, sizeof(int) * 2);
+ _(FDCLRPRM, NONE, 0);
+ _(FDDEFPRM, READ, struct_floppy_struct_sz);
+ _(FDFLUSH, NONE, 0);
+ _(FDFMTBEG, NONE, 0);
+ _(FDFMTEND, NONE, 0);
+ _(FDFMTTRK, READ, struct_format_descr_sz);
+ _(FDGETDRVPRM, WRITE, struct_floppy_drive_params_sz);
+ _(FDGETDRVSTAT, WRITE, struct_floppy_drive_struct_sz);
+ _(FDGETDRVTYP, WRITE, 16);
+ _(FDGETFDCSTAT, WRITE, struct_floppy_fdc_state_sz);
+ _(FDGETMAXERRS, WRITE, struct_floppy_max_errors_sz);
+ _(FDGETPRM, WRITE, struct_floppy_struct_sz);
+ _(FDMSGOFF, NONE, 0);
+ _(FDMSGON, NONE, 0);
+ _(FDPOLLDRVSTAT, WRITE, struct_floppy_drive_struct_sz);
+ _(FDRAWCMD, WRITE, struct_floppy_raw_cmd_sz);
+ _(FDRESET, NONE, 0);
+ _(FDSETDRVPRM, READ, struct_floppy_drive_params_sz);
+ _(FDSETEMSGTRESH, NONE, 0);
+ _(FDSETMAXERRS, READ, struct_floppy_max_errors_sz);
+ _(FDSETPRM, READ, struct_floppy_struct_sz);
+ _(FDTWADDLE, NONE, 0);
+ _(FDWERRORCLR, NONE, 0);
+ _(FDWERRORGET, WRITE, struct_floppy_write_errors_sz);
+ _(HDIO_DRIVE_CMD, WRITE, sizeof(int));
+ _(HDIO_GETGEO, WRITE, struct_hd_geometry_sz);
+ _(HDIO_GET_32BIT, WRITE, sizeof(int));
+ _(HDIO_GET_DMA, WRITE, sizeof(int));
+ _(HDIO_GET_IDENTITY, WRITE, struct_hd_driveid_sz);
+ _(HDIO_GET_KEEPSETTINGS, WRITE, sizeof(int));
+ _(HDIO_GET_MULTCOUNT, WRITE, sizeof(int));
+ _(HDIO_GET_NOWERR, WRITE, sizeof(int));
+ _(HDIO_GET_UNMASKINTR, WRITE, sizeof(int));
+ _(HDIO_SET_32BIT, NONE, 0);
+ _(HDIO_SET_DMA, NONE, 0);
+ _(HDIO_SET_KEEPSETTINGS, NONE, 0);
+ _(HDIO_SET_MULTCOUNT, NONE, 0);
+ _(HDIO_SET_NOWERR, NONE, 0);
+ _(HDIO_SET_UNMASKINTR, NONE, 0);
+ _(MTIOCGET, WRITE, struct_mtget_sz);
+ _(MTIOCPOS, WRITE, struct_mtpos_sz);
+ _(MTIOCTOP, READ, struct_mtop_sz);
+ _(PPPIOCGASYNCMAP, WRITE, sizeof(int));
+ _(PPPIOCGDEBUG, WRITE, sizeof(int));
+ _(PPPIOCGFLAGS, WRITE, sizeof(int));
+ _(PPPIOCGUNIT, WRITE, sizeof(int));
+ _(PPPIOCGXASYNCMAP, WRITE, sizeof(int) * 8);
+ _(PPPIOCSASYNCMAP, READ, sizeof(int));
+ _(PPPIOCSDEBUG, READ, sizeof(int));
+ _(PPPIOCSFLAGS, READ, sizeof(int));
+ _(PPPIOCSMAXCID, READ, sizeof(int));
+ _(PPPIOCSMRU, READ, sizeof(int));
+ _(PPPIOCSXASYNCMAP, READ, sizeof(int) * 8);
+ _(SIOCADDRT, READ, struct_rtentry_sz);
+ _(SIOCDARP, READ, struct_arpreq_sz);
+ _(SIOCDELRT, READ, struct_rtentry_sz);
+ _(SIOCDRARP, READ, struct_arpreq_sz);
+ _(SIOCGARP, WRITE, struct_arpreq_sz);
+ _(SIOCGIFENCAP, WRITE, sizeof(int));
+ _(SIOCGIFHWADDR, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMAP, WRITE, struct_ifreq_sz);
+ _(SIOCGIFMEM, WRITE, struct_ifreq_sz);
+ _(SIOCGIFNAME, NONE, 0);
+ _(SIOCGIFSLAVE, NONE, 0);
+ _(SIOCGRARP, WRITE, struct_arpreq_sz);
+ _(SIOCGSTAMP, WRITE, timeval_sz);
+ _(SIOCSARP, READ, struct_arpreq_sz);
+ _(SIOCSIFENCAP, READ, sizeof(int));
+ _(SIOCSIFHWADDR, READ, struct_ifreq_sz);
+ _(SIOCSIFLINK, NONE, 0);
+ _(SIOCSIFMAP, READ, struct_ifreq_sz);
+ _(SIOCSIFMEM, READ, struct_ifreq_sz);
+ _(SIOCSIFSLAVE, NONE, 0);
+ _(SIOCSRARP, READ, struct_arpreq_sz);
+ _(SNDCTL_COPR_HALT, WRITE, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_LOAD, READ, struct_copr_buffer_sz);
+ _(SNDCTL_COPR_RCODE, WRITE, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_RCVMSG, WRITE, struct_copr_msg_sz);
+ _(SNDCTL_COPR_RDATA, WRITE, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_RESET, NONE, 0);
+ _(SNDCTL_COPR_RUN, WRITE, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_SENDMSG, READ, struct_copr_msg_sz);
+ _(SNDCTL_COPR_WCODE, READ, struct_copr_debug_buf_sz);
+ _(SNDCTL_COPR_WDATA, READ, struct_copr_debug_buf_sz);
+ _(SNDCTL_DSP_GETBLKSIZE, WRITE, sizeof(int));
+ _(SNDCTL_DSP_GETFMTS, WRITE, sizeof(int));
+ _(SNDCTL_DSP_NONBLOCK, NONE, 0);
+ _(SNDCTL_DSP_POST, NONE, 0);
+ _(SNDCTL_DSP_RESET, NONE, 0);
+ _(SNDCTL_DSP_SETFMT, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SETFRAGMENT, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SPEED, WRITE, sizeof(int));
+ _(SNDCTL_DSP_STEREO, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SUBDIVIDE, WRITE, sizeof(int));
+ _(SNDCTL_DSP_SYNC, NONE, 0);
+ _(SNDCTL_FM_4OP_ENABLE, READ, sizeof(int));
+ _(SNDCTL_FM_LOAD_INSTR, READ, struct_sbi_instrument_sz);
+ _(SNDCTL_MIDI_INFO, WRITE, struct_midi_info_sz);
+ _(SNDCTL_MIDI_PRETIME, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_CTRLRATE, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_GETINCOUNT, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_GETOUTCOUNT, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_NRMIDIS, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_NRSYNTHS, WRITE, sizeof(int));
+ _(SNDCTL_SEQ_OUTOFBAND, READ, struct_seq_event_rec_sz);
+ _(SNDCTL_SEQ_PANIC, NONE, 0);
+ _(SNDCTL_SEQ_PERCMODE, NONE, 0);
+ _(SNDCTL_SEQ_RESET, NONE, 0);
+ _(SNDCTL_SEQ_RESETSAMPLES, READ, sizeof(int));
+ _(SNDCTL_SEQ_SYNC, NONE, 0);
+ _(SNDCTL_SEQ_TESTMIDI, READ, sizeof(int));
+ _(SNDCTL_SEQ_THRESHOLD, READ, sizeof(int));
+ _(SNDCTL_SYNTH_INFO, WRITE, struct_synth_info_sz);
+ _(SNDCTL_SYNTH_MEMAVL, WRITE, sizeof(int));
+ _(SNDCTL_TMR_METRONOME, READ, sizeof(int));
+ _(SNDCTL_TMR_SELECT, WRITE, sizeof(int));
+ _(SNDCTL_TMR_SOURCE, WRITE, sizeof(int));
+ _(SNDCTL_TMR_TEMPO, WRITE, sizeof(int));
+ _(SNDCTL_TMR_TIMEBASE, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_ALTPCM, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_BASS, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_CAPS, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_CD, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_DEVMASK, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_ENHANCE, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_IGAIN, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_IMIX, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_LINE, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_LINE1, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_LINE2, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_LINE3, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_MIC, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_OGAIN, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_PCM, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_RECLEV, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_RECMASK, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_RECSRC, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_SPEAKER, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_STEREODEVS, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_SYNTH, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_TREBLE, WRITE, sizeof(int));
+ _(SOUND_MIXER_READ_VOLUME, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_ALTPCM, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_BASS, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_CD, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_ENHANCE, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_IGAIN, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_IMIX, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_LINE, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_LINE1, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_LINE2, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_LINE3, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_MIC, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_OGAIN, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_PCM, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_RECLEV, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_RECSRC, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_SPEAKER, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_SYNTH, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_TREBLE, WRITE, sizeof(int));
+ _(SOUND_MIXER_WRITE_VOLUME, WRITE, sizeof(int));
+ _(SOUND_PCM_READ_BITS, WRITE, sizeof(int));
+ _(SOUND_PCM_READ_CHANNELS, WRITE, sizeof(int));
+ _(SOUND_PCM_READ_FILTER, WRITE, sizeof(int));
+ _(SOUND_PCM_READ_RATE, WRITE, sizeof(int));
+ _(SOUND_PCM_WRITE_CHANNELS, WRITE, sizeof(int));
+ _(SOUND_PCM_WRITE_FILTER, WRITE, sizeof(int));
+ _(TCFLSH, NONE, 0);
+ _(TCGETA, WRITE, struct_termio_sz);
+ _(TCGETS, WRITE, struct_termios_sz);
+ _(TCSBRK, NONE, 0);
+ _(TCSBRKP, NONE, 0);
+ _(TCSETA, READ, struct_termio_sz);
+ _(TCSETAF, READ, struct_termio_sz);
+ _(TCSETAW, READ, struct_termio_sz);
+ _(TCSETS, READ, struct_termios_sz);
+ _(TCSETSF, READ, struct_termios_sz);
+ _(TCSETSW, READ, struct_termios_sz);
+ _(TCXONC, NONE, 0);
+ _(TIOCGLCKTRMIOS, WRITE, struct_termios_sz);
+ _(TIOCGSOFTCAR, WRITE, sizeof(int));
+ _(TIOCINQ, WRITE, sizeof(int));
+ _(TIOCLINUX, READ, sizeof(char));
+ _(TIOCSERCONFIG, NONE, 0);
+ _(TIOCSERGETLSR, WRITE, sizeof(int));
+ _(TIOCSERGWILD, WRITE, sizeof(int));
+ _(TIOCSERSWILD, READ, sizeof(int));
+ _(TIOCSLCKTRMIOS, READ, struct_termios_sz);
+ _(TIOCSSOFTCAR, READ, sizeof(int));
+ _(VT_ACTIVATE, NONE, 0);
+ _(VT_DISALLOCATE, NONE, 0);
+ _(VT_GETMODE, WRITE, struct_vt_mode_sz);
+ _(VT_GETSTATE, WRITE, struct_vt_stat_sz);
+ _(VT_OPENQRY, WRITE, sizeof(int));
+ _(VT_RELDISP, NONE, 0);
+ _(VT_RESIZE, READ, struct_vt_sizes_sz);
+ _(VT_RESIZEX, READ, struct_vt_consize_sz);
+ _(VT_SENDSIG, NONE, 0);
+ _(VT_SETMODE, READ, struct_vt_mode_sz);
+ _(VT_WAITACTIVE, NONE, 0);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ // _(SIOCDEVPLIP, WRITE, struct_ifreq_sz); // the same as EQL_ENSLAVE
+ _(CYGETDEFTHRESH, WRITE, sizeof(int));
+ _(CYGETDEFTIMEOUT, WRITE, sizeof(int));
+ _(CYGETMON, WRITE, struct_cyclades_monitor_sz);
+ _(CYGETTHRESH, WRITE, sizeof(int));
+ _(CYGETTIMEOUT, WRITE, sizeof(int));
+ _(CYSETDEFTHRESH, NONE, 0);
+ _(CYSETDEFTIMEOUT, NONE, 0);
+ _(CYSETTHRESH, NONE, 0);
+ _(CYSETTIMEOUT, NONE, 0);
+ _(EQL_EMANCIPATE, WRITE, struct_ifreq_sz);
+ _(EQL_ENSLAVE, WRITE, struct_ifreq_sz);
+ _(EQL_GETMASTRCFG, WRITE, struct_ifreq_sz);
+ _(EQL_GETSLAVECFG, WRITE, struct_ifreq_sz);
+ _(EQL_SETMASTRCFG, WRITE, struct_ifreq_sz);
+ _(EQL_SETSLAVECFG, WRITE, struct_ifreq_sz);
+ _(EVIOCGKEYCODE_V2, WRITE, struct_input_keymap_entry_sz);
+ _(EVIOCGPROP, WRITE, 0);
+ _(EVIOCSKEYCODE_V2, READ, struct_input_keymap_entry_sz);
+ _(FS_IOC_GETFLAGS, WRITE, sizeof(int));
+ _(FS_IOC_GETVERSION, WRITE, sizeof(int));
+ _(FS_IOC_SETFLAGS, READ, sizeof(int));
+ _(FS_IOC_SETVERSION, READ, sizeof(int));
+ _(GIO_CMAP, WRITE, 48);
+ _(GIO_FONT, WRITE, 8192);
+ _(GIO_SCRNMAP, WRITE, e_tabsz);
+ _(GIO_UNIMAP, WRITE, struct_unimapdesc_sz);
+ _(GIO_UNISCRNMAP, WRITE, sizeof(short) * e_tabsz);
+ _(KDADDIO, NONE, 0);
+ _(KDDELIO, NONE, 0);
+ _(KDDISABIO, NONE, 0);
+ _(KDENABIO, NONE, 0);
+ _(KDGETKEYCODE, WRITE, struct_kbkeycode_sz);
+ _(KDGETLED, WRITE, 1);
+ _(KDGETMODE, WRITE, sizeof(int));
+ _(KDGKBDIACR, WRITE, struct_kbdiacrs_sz);
+ _(KDGKBENT, WRITE, struct_kbentry_sz);
+ _(KDGKBLED, WRITE, sizeof(int));
+ _(KDGKBMETA, WRITE, sizeof(int));
+ _(KDGKBMODE, WRITE, sizeof(int));
+ _(KDGKBSENT, WRITE, struct_kbsentry_sz);
+ _(KDGKBTYPE, WRITE, 1);
+ _(KDMAPDISP, NONE, 0);
+ _(KDMKTONE, NONE, 0);
+ _(KDSETKEYCODE, READ, struct_kbkeycode_sz);
+ _(KDSETLED, NONE, 0);
+ _(KDSETMODE, NONE, 0);
+ _(KDSIGACCEPT, NONE, 0);
+ _(KDSKBDIACR, READ, struct_kbdiacrs_sz);
+ _(KDSKBENT, READ, struct_kbentry_sz);
+ _(KDSKBLED, NONE, 0);
+ _(KDSKBMETA, NONE, 0);
+ _(KDSKBMODE, NONE, 0);
+ _(KDSKBSENT, READ, struct_kbsentry_sz);
+ _(KDUNMAPDISP, NONE, 0);
+ _(KIOCSOUND, NONE, 0);
+ _(LPABORT, NONE, 0);
+ _(LPABORTOPEN, NONE, 0);
+ _(LPCAREFUL, NONE, 0);
+ _(LPCHAR, NONE, 0);
+ _(LPGETIRQ, WRITE, sizeof(int));
+ _(LPGETSTATUS, WRITE, sizeof(int));
+ _(LPRESET, NONE, 0);
+ _(LPSETIRQ, NONE, 0);
+ _(LPTIME, NONE, 0);
+ _(LPWAIT, NONE, 0);
+ _(MTIOCGETCONFIG, WRITE, struct_mtconfiginfo_sz);
+ _(MTIOCSETCONFIG, READ, struct_mtconfiginfo_sz);
+ _(PIO_CMAP, NONE, 0);
+ _(PIO_FONT, READ, 8192);
+ _(PIO_SCRNMAP, READ, e_tabsz);
+ _(PIO_UNIMAP, READ, struct_unimapdesc_sz);
+ _(PIO_UNIMAPCLR, READ, struct_unimapinit_sz);
+ _(PIO_UNISCRNMAP, READ, sizeof(short) * e_tabsz);
+ _(SCSI_IOCTL_PROBE_HOST, READ, sizeof(int));
+ _(SCSI_IOCTL_TAGGED_DISABLE, NONE, 0);
+ _(SCSI_IOCTL_TAGGED_ENABLE, NONE, 0);
+ _(SNDCTL_DSP_GETISPACE, WRITE, struct_audio_buf_info_sz);
+ _(SNDCTL_DSP_GETOSPACE, WRITE, struct_audio_buf_info_sz);
+ _(TIOCGSERIAL, WRITE, struct_serial_struct_sz);
+ _(TIOCSERGETMULTI, WRITE, struct_serial_multiport_struct_sz);
+ _(TIOCSERSETMULTI, READ, struct_serial_multiport_struct_sz);
+ _(TIOCSSERIAL, READ, struct_serial_struct_sz);
+
+ // The following ioctl requests are shared between AX25, IPX, netrom and
+ // mrouted.
+ // _(SIOCAIPXITFCRT, READ, sizeof(char));
+ // _(SIOCAX25GETUID, READ, struct_sockaddr_ax25_sz);
+ // _(SIOCNRGETPARMS, WRITE, struct_nr_parms_struct_sz);
+ // _(SIOCAIPXPRISLT, READ, sizeof(char));
+ // _(SIOCNRSETPARMS, READ, struct_nr_parms_struct_sz);
+ // _(SIOCAX25ADDUID, READ, struct_sockaddr_ax25_sz);
+ // _(SIOCNRDECOBS, NONE, 0);
+ // _(SIOCAX25DELUID, READ, struct_sockaddr_ax25_sz);
+ // _(SIOCIPXCFGDATA, WRITE, struct_ipx_config_data_sz);
+ // _(SIOCAX25NOUID, READ, sizeof(int));
+ // _(SIOCNRRTCTL, READ, sizeof(int));
+ // _(SIOCAX25DIGCTL, READ, sizeof(int));
+ // _(SIOCAX25GETPARMS, WRITE, struct_ax25_parms_struct_sz);
+ // _(SIOCAX25SETPARMS, READ, struct_ax25_parms_struct_sz);
+#endif
+#undef _
+}
+
+static bool ioctl_initialized = false;
+
+struct ioctl_desc_compare {
+ bool operator()(const ioctl_desc& left, const ioctl_desc& right) const {
+ return left.req < right.req;
+ }
+};
+
+static void ioctl_init() {
+ ioctl_table_fill();
+ Sort(ioctl_table, ioctl_table_size, ioctl_desc_compare());
+
+ bool bad = false;
+ for (unsigned i = 0; i < ioctl_table_size - 1; ++i) {
+ if (ioctl_table[i].req >= ioctl_table[i + 1].req) {
+ Printf("Duplicate or unsorted ioctl request id %x >= %x (%s vs %s)\n",
+ ioctl_table[i].req, ioctl_table[i + 1].req, ioctl_table[i].name,
+ ioctl_table[i + 1].name);
+ bad = true;
+ }
+ }
+
+ if (bad) Die();
+
+ ioctl_initialized = true;
+}
+
+// Handle the most evil ioctls that encode argument value as part of request id.
+static unsigned ioctl_request_fixup(unsigned req) {
+#if SANITIZER_LINUX
+ // Strip size and event number.
+ const unsigned kEviocgbitMask =
+ (IOC_SIZEMASK << IOC_SIZESHIFT) | EVIOC_EV_MAX;
+ if ((req & ~kEviocgbitMask) == IOCTL_EVIOCGBIT)
+ return IOCTL_EVIOCGBIT;
+ // Strip absolute axis number.
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCGABS)
+ return IOCTL_EVIOCGABS;
+ if ((req & ~EVIOC_ABS_MAX) == IOCTL_EVIOCSABS)
+ return IOCTL_EVIOCSABS;
+#endif
+ return req;
+}
+
+static const ioctl_desc *ioctl_table_lookup(unsigned req) {
+ int left = 0;
+ int right = ioctl_table_size;
+ while (left < right) {
+ int mid = (left + right) / 2;
+ if (ioctl_table[mid].req < req)
+ left = mid + 1;
+ else
+ right = mid;
+ }
+ if (left == right && ioctl_table[left].req == req)
+ return ioctl_table + left;
+ else
+ return nullptr;
+}
+
+static bool ioctl_decode(unsigned req, ioctl_desc *desc) {
+ CHECK(desc);
+ desc->req = req;
+ desc->name = "<DECODED_IOCTL>";
+ desc->size = IOC_SIZE(req);
+ // Sanity check.
+ if (desc->size > 0xFFFF) return false;
+ unsigned dir = IOC_DIR(req);
+ switch (dir) {
+ case IOC_NONE:
+ desc->type = ioctl_desc::NONE;
+ break;
+ case IOC_READ | IOC_WRITE:
+ desc->type = ioctl_desc::READWRITE;
+ break;
+ case IOC_READ:
+ desc->type = ioctl_desc::WRITE;
+ break;
+ case IOC_WRITE:
+ desc->type = ioctl_desc::READ;
+ break;
+ default:
+ return false;
+ }
+ // Size can be 0 iff type is NONE.
+ if ((desc->type == IOC_NONE) != (desc->size == 0)) return false;
+ // Sanity check.
+ if (IOC_TYPE(req) == 0) return false;
+ return true;
+}
+
+static const ioctl_desc *ioctl_lookup(unsigned req) {
+ req = ioctl_request_fixup(req);
+ const ioctl_desc *desc = ioctl_table_lookup(req);
+ if (desc) return desc;
+
+ // Try stripping access size from the request id.
+ desc = ioctl_table_lookup(req & ~(IOC_SIZEMASK << IOC_SIZESHIFT));
+ // Sanity check: requests that encode access size are either read or write and
+ // have size of 0 in the table.
+ if (desc && desc->size == 0 &&
+ (desc->type == ioctl_desc::READWRITE || desc->type == ioctl_desc::WRITE ||
+ desc->type == ioctl_desc::READ))
+ return desc;
+ return nullptr;
+}
+
+static void ioctl_common_pre(void *ctx, const ioctl_desc *desc, int d,
+ unsigned request, void *arg) {
+ if (desc->type == ioctl_desc::READ || desc->type == ioctl_desc::READWRITE) {
+ unsigned size = desc->size ? desc->size : IOC_SIZE(request);
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, arg, size);
+ }
+ if (desc->type != ioctl_desc::CUSTOM)
+ return;
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_READ_RANGE(ctx, (char*)&ifc->ifc_len,
+ sizeof(ifc->ifc_len));
+ }
+}
+
+static void ioctl_common_post(void *ctx, const ioctl_desc *desc, int res, int d,
+ unsigned request, void *arg) {
+ if (desc->type == ioctl_desc::WRITE || desc->type == ioctl_desc::READWRITE) {
+ // FIXME: add verbose output
+ unsigned size = desc->size ? desc->size : IOC_SIZE(request);
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, arg, size);
+ }
+ if (desc->type != ioctl_desc::CUSTOM)
+ return;
+ if (request == IOCTL_SIOCGIFCONF) {
+ struct __sanitizer_ifconf *ifc = (__sanitizer_ifconf *)arg;
+ COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ifc->ifc_ifcu.ifcu_req, ifc->ifc_len);
+ }
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_ioctl.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S (revision 351984)
@@ -0,0 +1,43 @@
+#if defined(__aarch64__) && defined(__linux__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
+
+.comm _ZN14__interception10real_vforkE,8,8
+.globl ASM_WRAPPER_NAME(vfork)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
+ASM_WRAPPER_NAME(vfork):
+ // Save x30 in the off-stack spill area.
+ stp xzr, x30, [sp, #-16]!
+ bl COMMON_INTERCEPTOR_SPILL_AREA
+ ldp xzr, x30, [sp], 16
+ str x30, [x0]
+
+ // Call real vfork. This may return twice. User code that runs between the first and the second return
+ // may clobber the stack frame of the interceptor; that's why it does not have a frame.
+ adrp x0, _ZN14__interception10real_vforkE
+ ldr x0, [x0, :lo12:_ZN14__interception10real_vforkE]
+ blr x0
+
+ stp x0, xzr, [sp, #-16]!
+ cmp x0, #0
+ b.eq .L_exit
+
+ // x0 != 0 => parent process. Clear stack shadow.
+ add x0, sp, #16
+ bl COMMON_INTERCEPTOR_HANDLE_VFORK
+
+.L_exit:
+ // Restore x30.
+ bl COMMON_INTERCEPTOR_SPILL_AREA
+ ldr x30, [x0]
+ ldp x0, xzr, [sp], 16
+
+ ret
+ASM_SIZE(vfork)
+
+.weak vfork
+.set vfork, ASM_WRAPPER_NAME(vfork)
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_arm.inc.S (revision 351984)
@@ -0,0 +1,49 @@
+#if defined(__arm__) && defined(__linux__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+ASM_HIDDEN(COMMON_INTERCEPTOR_SPILL_AREA)
+
+.comm _ZN14__interception10real_vforkE,4,4
+.globl ASM_WRAPPER_NAME(vfork)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
+ASM_WRAPPER_NAME(vfork):
+ // Save LR in the off-stack spill area.
+ push {r4, lr}
+ bl COMMON_INTERCEPTOR_SPILL_AREA
+ pop {r4, lr}
+ str lr, [r0]
+
+ // Call real vfork. This may return twice. User code that runs between the first and the second return
+ // may clobber the stack frame of the interceptor; that's why it does not have a frame.
+ ldr r0, .LCPI0_0
+.LPC0_0:
+ ldr r0, [pc, r0]
+ mov lr, pc
+ bx r0
+
+ push {r0, r4}
+ cmp r0, #0
+ beq .L_exit
+
+ // r0 != 0 => parent process. Clear stack shadow.
+ add r0, sp, #8
+ bl COMMON_INTERCEPTOR_HANDLE_VFORK
+
+.L_exit:
+ // Restore LR.
+ bl COMMON_INTERCEPTOR_SPILL_AREA
+ ldr lr, [r0]
+ pop {r0, r4}
+
+ mov pc, lr
+
+.LCPI0_0:
+ .long _ZN14__interception10real_vforkE - (.LPC0_0+8)
+
+ASM_SIZE(vfork)
+
+.weak vfork
+.set vfork, ASM_WRAPPER_NAME(vfork)
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_i386.inc.S (revision 351984)
@@ -0,0 +1,63 @@
+#if defined(__i386__) && defined(__linux__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+.comm _ZN14__interception10real_vforkE,4,4
+.globl ASM_WRAPPER_NAME(vfork)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
+ASM_WRAPPER_NAME(vfork):
+ // Store return address in the spill area and tear down the stack frame.
+ sub $12, %esp
+ call COMMON_INTERCEPTOR_SPILL_AREA
+ mov 12(%esp), %ecx
+ mov %ecx, (%eax)
+ add $16, %esp
+
+ call .L0$pb
+.L0$pb:
+ pop %eax
+.Ltmp0:
+ add $_GLOBAL_OFFSET_TABLE_+(.Ltmp0-.L0$pb), %eax
+ call *_ZN14__interception10real_vforkE@GOTOFF(%eax)
+
+ // Restore the stack frame.
+ // 12(%esp) return address
+ // 8(%esp) spill %ebx
+ // 4(%esp) spill REAL(vfork) return value
+ // (%esp) call frame (arg0) for __*_handle_vfork
+ sub $16, %esp
+ mov %ebx, 8(%esp)
+ mov %eax, 4(%esp)
+
+ // Form GOT address in %ebx.
+ call .L1$pb
+.L1$pb:
+ pop %ebx
+.Ltmp1:
+ add $_GLOBAL_OFFSET_TABLE_+(.Ltmp1-.L1$pb), %ebx
+
+ // Restore original return address.
+ call COMMON_INTERCEPTOR_SPILL_AREA
+ mov (%eax), %ecx
+ mov %ecx, 12(%esp)
+ mov 4(%esp), %eax
+
+ // Call handle_vfork in the parent process (%rax != 0).
+ test %eax, %eax
+ je .L_exit
+
+ lea 16(%esp), %ecx
+ mov %ecx, (%esp)
+ call COMMON_INTERCEPTOR_HANDLE_VFORK@PLT
+
+.L_exit:
+ mov 4(%esp), %eax
+ mov 8(%esp), %ebx
+ add $12, %esp
+ ret
+ASM_SIZE(vfork)
+
+.weak vfork
+.set vfork, ASM_WRAPPER_NAME(vfork)
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S (revision 351984)
@@ -0,0 +1,41 @@
+#if defined(__x86_64__) && defined(__linux__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+.comm _ZN14__interception10real_vforkE,8,8
+.globl ASM_WRAPPER_NAME(vfork)
+ASM_TYPE_FUNCTION(ASM_WRAPPER_NAME(vfork))
+ASM_WRAPPER_NAME(vfork):
+ // Store return address in the spill area and tear down the stack frame.
+ push %rcx
+ call COMMON_INTERCEPTOR_SPILL_AREA
+ pop %rcx
+ pop %rdi
+ mov %rdi, (%rax)
+
+ call *_ZN14__interception10real_vforkE(%rip)
+
+ // Restore return address from the spill area.
+ push %rcx
+ push %rax
+ call COMMON_INTERCEPTOR_SPILL_AREA
+ mov (%rax), %rdx
+ mov %rdx, 8(%rsp)
+ mov (%rsp), %rax
+
+ // Call handle_vfork in the parent process (%rax != 0).
+ test %rax, %rax
+ je .L_exit
+
+ lea 16(%rsp), %rdi
+ call COMMON_INTERCEPTOR_HANDLE_VFORK@PLT
+
+.L_exit:
+ pop %rax
+ ret
+ASM_SIZE(vfork)
+
+.weak vfork
+.set vfork, ASM_WRAPPER_NAME(vfork)
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interface.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interface.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interface.inc (revision 351984)
@@ -0,0 +1,40 @@
+//===-- sanitizer_common_interface.inc ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Common interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__sanitizer_acquire_crash_state)
+INTERFACE_FUNCTION(__sanitizer_annotate_contiguous_container)
+INTERFACE_FUNCTION(__sanitizer_contiguous_container_find_bad_address)
+INTERFACE_FUNCTION(__sanitizer_set_death_callback)
+INTERFACE_FUNCTION(__sanitizer_set_report_path)
+INTERFACE_FUNCTION(__sanitizer_set_report_fd)
+INTERFACE_FUNCTION(__sanitizer_verify_contiguous_container)
+INTERFACE_WEAK_FUNCTION(__sanitizer_report_error_summary)
+INTERFACE_WEAK_FUNCTION(__sanitizer_sandbox_on_notify)
+// Sanitizer weak hooks
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_memcmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strcmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strncmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_weak_hook_strstr)
+// Stacktrace interface.
+INTERFACE_FUNCTION(__sanitizer_get_module_and_offset_for_pc)
+INTERFACE_FUNCTION(__sanitizer_symbolize_global)
+INTERFACE_FUNCTION(__sanitizer_symbolize_pc)
+// Allocator interface.
+INTERFACE_FUNCTION(__sanitizer_get_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_current_allocated_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_estimated_allocated_size)
+INTERFACE_FUNCTION(__sanitizer_get_free_bytes)
+INTERFACE_FUNCTION(__sanitizer_get_heap_size)
+INTERFACE_FUNCTION(__sanitizer_get_ownership)
+INTERFACE_FUNCTION(__sanitizer_get_unmapped_bytes)
+INTERFACE_FUNCTION(__sanitizer_install_malloc_and_free_hooks)
+INTERFACE_FUNCTION(__sanitizer_purge_allocator)
+INTERFACE_FUNCTION(__sanitizer_print_memory_profile)
+INTERFACE_WEAK_FUNCTION(__sanitizer_free_hook)
+INTERFACE_WEAK_FUNCTION(__sanitizer_malloc_hook)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interface.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interface_posix.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interface_posix.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interface_posix.inc (revision 351984)
@@ -0,0 +1,13 @@
+//===-- sanitizer_common_interface_posix.inc ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Common interface list only available for Posix systems.
+//===----------------------------------------------------------------------===//
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_code)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_data)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_demangle)
+INTERFACE_WEAK_FUNCTION(__sanitizer_symbolize_flush)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_interface_posix.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_libcdep.cc (revision 351984)
@@ -0,0 +1,139 @@
+//===-- sanitizer_common_libcdep.cc ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_interface.h"
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_procmaps.h"
+
+
+namespace __sanitizer {
+
+static void (*SoftRssLimitExceededCallback)(bool exceeded);
+void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded)) {
+ CHECK_EQ(SoftRssLimitExceededCallback, nullptr);
+ SoftRssLimitExceededCallback = Callback;
+}
+
+#if (SANITIZER_LINUX || SANITIZER_NETBSD) && !SANITIZER_GO
+// Weak default implementation for when sanitizer_stackdepot is not linked in.
+SANITIZER_WEAK_ATTRIBUTE StackDepotStats *StackDepotGetStats() {
+ return nullptr;
+}
+
+void BackgroundThread(void *arg) {
+ const uptr hard_rss_limit_mb = common_flags()->hard_rss_limit_mb;
+ const uptr soft_rss_limit_mb = common_flags()->soft_rss_limit_mb;
+ const bool heap_profile = common_flags()->heap_profile;
+ uptr prev_reported_rss = 0;
+ uptr prev_reported_stack_depot_size = 0;
+ bool reached_soft_rss_limit = false;
+ uptr rss_during_last_reported_profile = 0;
+ while (true) {
+ SleepForMillis(100);
+ const uptr current_rss_mb = GetRSS() >> 20;
+ if (Verbosity()) {
+ // If RSS has grown 10% since last time, print some information.
+ if (prev_reported_rss * 11 / 10 < current_rss_mb) {
+ Printf("%s: RSS: %zdMb\n", SanitizerToolName, current_rss_mb);
+ prev_reported_rss = current_rss_mb;
+ }
+ // If stack depot has grown 10% since last time, print it too.
+ StackDepotStats *stack_depot_stats = StackDepotGetStats();
+ if (stack_depot_stats) {
+ if (prev_reported_stack_depot_size * 11 / 10 <
+ stack_depot_stats->allocated) {
+ Printf("%s: StackDepot: %zd ids; %zdM allocated\n",
+ SanitizerToolName,
+ stack_depot_stats->n_uniq_ids,
+ stack_depot_stats->allocated >> 20);
+ prev_reported_stack_depot_size = stack_depot_stats->allocated;
+ }
+ }
+ }
+ // Check RSS against the limit.
+ if (hard_rss_limit_mb && hard_rss_limit_mb < current_rss_mb) {
+ Report("%s: hard rss limit exhausted (%zdMb vs %zdMb)\n",
+ SanitizerToolName, hard_rss_limit_mb, current_rss_mb);
+ DumpProcessMap();
+ Die();
+ }
+ if (soft_rss_limit_mb) {
+ if (soft_rss_limit_mb < current_rss_mb && !reached_soft_rss_limit) {
+ reached_soft_rss_limit = true;
+ Report("%s: soft rss limit exhausted (%zdMb vs %zdMb)\n",
+ SanitizerToolName, soft_rss_limit_mb, current_rss_mb);
+ if (SoftRssLimitExceededCallback)
+ SoftRssLimitExceededCallback(true);
+ } else if (soft_rss_limit_mb >= current_rss_mb &&
+ reached_soft_rss_limit) {
+ reached_soft_rss_limit = false;
+ if (SoftRssLimitExceededCallback)
+ SoftRssLimitExceededCallback(false);
+ }
+ }
+ if (heap_profile &&
+ current_rss_mb > rss_during_last_reported_profile * 1.1) {
+ Printf("\n\nHEAP PROFILE at RSS %zdMb\n", current_rss_mb);
+ __sanitizer_print_memory_profile(90, 20);
+ rss_during_last_reported_profile = current_rss_mb;
+ }
+ }
+}
+#endif
+
+void WriteToSyslog(const char *msg) {
+ InternalScopedString msg_copy(kErrorMessageBufferSize);
+ msg_copy.append("%s", msg);
+ char *p = msg_copy.data();
+ char *q;
+
+ // Print one line at a time.
+ // syslog, at least on Android, has an implicit message length limit.
+ while ((q = internal_strchr(p, '\n'))) {
+ *q = '\0';
+ WriteOneLineToSyslog(p);
+ p = q + 1;
+ }
+ // Print remaining characters, if there are any.
+ // Note that this will add an extra newline at the end.
+ // FIXME: buffer extra output. This would need a thread-local buffer, which
+ // on Android requires plugging into the tools (ex. ASan's) Thread class.
+ if (*p)
+ WriteOneLineToSyslog(p);
+}
+
+void MaybeStartBackgroudThread() {
+#if (SANITIZER_LINUX || SANITIZER_NETBSD) && \
+ !SANITIZER_GO // Need to implement/test on other platforms.
+ // Start the background thread if one of the rss limits is given.
+ if (!common_flags()->hard_rss_limit_mb &&
+ !common_flags()->soft_rss_limit_mb &&
+ !common_flags()->heap_profile) return;
+ if (!&real_pthread_create) return; // Can't spawn the thread anyway.
+ internal_start_thread(BackgroundThread, nullptr);
+#endif
+}
+
+static void (*sandboxing_callback)();
+void SetSandboxingCallback(void (*f)()) {
+ sandboxing_callback = f;
+}
+
+} // namespace __sanitizer
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_sandbox_on_notify,
+ __sanitizer_sandbox_arguments *args) {
+ __sanitizer::PlatformPrepareForSandboxing(args);
+ if (__sanitizer::sandboxing_callback)
+ __sanitizer::sandboxing_callback();
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_nolibc.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_nolibc.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_nolibc.cc (revision 351984)
@@ -0,0 +1,34 @@
+//===-- sanitizer_common_nolibc.cc ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains stubs for libc function to facilitate optional use of
+// libc in no-libcdep sources.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+// The Windows implementations of these functions use the win32 API directly,
+// bypassing libc.
+#if !SANITIZER_WINDOWS
+#if SANITIZER_LINUX
+void LogMessageOnPrintf(const char *str) {}
+#endif
+void WriteToSyslog(const char *buffer) {}
+void Abort() { internal__exit(1); }
+void SleepForSeconds(int seconds) { internal_sleep(seconds); }
+#endif // !SANITIZER_WINDOWS
+
+#if !SANITIZER_WINDOWS && !SANITIZER_MAC
+void ListOfModules::init() {}
+#endif
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_nolibc.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_syscalls.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_syscalls.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_common_syscalls.inc (revision 351984)
@@ -0,0 +1,2885 @@
+//===-- sanitizer_common_syscalls.inc ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common syscalls handlers for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define it's own macros:
+// COMMON_SYSCALL_PRE_READ_RANGE
+// Called in prehook for regions that will be read by the kernel and
+// must be initialized.
+// COMMON_SYSCALL_PRE_WRITE_RANGE
+// Called in prehook for regions that will be written to by the kernel
+// and must be addressable. The actual write range may be smaller than
+// reported in the prehook. See POST_WRITE_RANGE.
+// COMMON_SYSCALL_POST_READ_RANGE
+// Called in posthook for regions that were read by the kernel. Does
+// not make much sense.
+// COMMON_SYSCALL_POST_WRITE_RANGE
+// Called in posthook for regions that were written to by the kernel
+// and are now initialized.
+// COMMON_SYSCALL_ACQUIRE(addr)
+// Acquire memory visibility from addr.
+// COMMON_SYSCALL_RELEASE(addr)
+// Release memory visibility to addr.
+// COMMON_SYSCALL_FD_CLOSE(fd)
+// Called before closing file descriptor fd.
+// COMMON_SYSCALL_FD_ACQUIRE(fd)
+// Acquire memory visibility from fd.
+// COMMON_SYSCALL_FD_RELEASE(fd)
+// Release memory visibility to fd.
+// COMMON_SYSCALL_PRE_FORK()
+// Called before fork syscall.
+// COMMON_SYSCALL_POST_FORK(long res)
+// Called after fork syscall.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "sanitizer_libc.h"
+
+#define PRE_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
+#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
+#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
+
+#define POST_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
+#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
+#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
+
+#ifndef COMMON_SYSCALL_ACQUIRE
+# define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
+#endif
+
+#ifndef COMMON_SYSCALL_RELEASE
+# define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_CLOSE
+# define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_ACQUIRE
+# define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_RELEASE
+# define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_PRE_FORK
+# define COMMON_SYSCALL_PRE_FORK() {}
+#endif
+
+#ifndef COMMON_SYSCALL_POST_FORK
+# define COMMON_SYSCALL_POST_FORK(res) {}
+#endif
+
+// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
+
+extern "C" {
+struct sanitizer_kernel_iovec {
+ void *iov_base;
+ unsigned long iov_len;
+};
+
+struct sanitizer_kernel_msghdr {
+ void *msg_name;
+ int msg_namelen;
+ struct sanitizer_kernel_iovec *msg_iov;
+ unsigned long msg_iovlen;
+ void *msg_control;
+ unsigned long msg_controllen;
+ unsigned msg_flags;
+};
+
+struct sanitizer_kernel_mmsghdr {
+ struct sanitizer_kernel_msghdr msg_hdr;
+ unsigned msg_len;
+};
+
+struct sanitizer_kernel_timespec {
+ long tv_sec;
+ long tv_nsec;
+};
+
+struct sanitizer_kernel_timeval {
+ long tv_sec;
+ long tv_usec;
+};
+
+struct sanitizer_kernel_rusage {
+ struct sanitizer_kernel_timeval ru_timeval[2];
+ long ru_long[14];
+};
+
+struct sanitizer_kernel_sockaddr {
+ unsigned short sa_family;
+ char sa_data[14];
+};
+
+// Real sigset size is always passed as a syscall argument.
+// Declare it "void" to catch sizeof(kernel_sigset_t).
+typedef void kernel_sigset_t;
+
+static void kernel_write_iovec(const __sanitizer_iovec *iovec,
+ SIZE_T iovlen, SIZE_T maxlen) {
+ for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
+ SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
+ POST_WRITE(iovec[i].iov_base, sz);
+ maxlen -= sz;
+ }
+}
+
+// This functions uses POST_READ, because it needs to run after syscall to know
+// the real read range.
+static void kernel_read_iovec(const __sanitizer_iovec *iovec,
+ SIZE_T iovlen, SIZE_T maxlen) {
+ POST_READ(iovec, sizeof(*iovec) * iovlen);
+ for (SIZE_T i = 0; i < iovlen && maxlen; ++i) {
+ SSIZE_T sz = Min(iovec[i].iov_len, maxlen);
+ POST_READ(iovec[i].iov_base, sz);
+ maxlen -= sz;
+ }
+}
+
+PRE_SYSCALL(recvmsg)(long sockfd, sanitizer_kernel_msghdr *msg, long flags) {
+ PRE_READ(msg, sizeof(*msg));
+}
+
+POST_SYSCALL(recvmsg)(long res, long sockfd, sanitizer_kernel_msghdr *msg,
+ long flags) {
+ if (res >= 0) {
+ if (msg) {
+ for (unsigned long i = 0; i < msg->msg_iovlen; ++i) {
+ POST_WRITE(msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len);
+ }
+ POST_WRITE(msg->msg_control, msg->msg_controllen);
+ }
+ }
+}
+
+PRE_SYSCALL(recvmmsg)(long fd, sanitizer_kernel_mmsghdr *msg, long vlen,
+ long flags, void *timeout) {
+ PRE_READ(msg, vlen * sizeof(*msg));
+}
+
+POST_SYSCALL(recvmmsg)(long res, long fd, sanitizer_kernel_mmsghdr *msg,
+ long vlen, long flags, void *timeout) {
+ if (res >= 0) {
+ if (msg) {
+ for (unsigned long i = 0; i < msg->msg_hdr.msg_iovlen; ++i) {
+ POST_WRITE(msg->msg_hdr.msg_iov[i].iov_base,
+ msg->msg_hdr.msg_iov[i].iov_len);
+ }
+ POST_WRITE(msg->msg_hdr.msg_control, msg->msg_hdr.msg_controllen);
+ POST_WRITE(&msg->msg_len, sizeof(msg->msg_len));
+ }
+ if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(read)(long fd, void *buf, uptr count) {
+ if (buf) {
+ PRE_WRITE(buf, count);
+ }
+}
+
+POST_SYSCALL(read)(long res, long fd, void *buf, uptr count) {
+ if (res > 0 && buf) {
+ POST_WRITE(buf, res);
+ }
+}
+
+PRE_SYSCALL(time)(void *tloc) {}
+
+POST_SYSCALL(time)(long res, void *tloc) {
+ if (res >= 0) {
+ if (tloc) POST_WRITE(tloc, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(stime)(void *tptr) {}
+
+POST_SYSCALL(stime)(long res, void *tptr) {
+ if (res >= 0) {
+ if (tptr) POST_WRITE(tptr, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(gettimeofday)(void *tv, void *tz) {}
+
+POST_SYSCALL(gettimeofday)(long res, void *tv, void *tz) {
+ if (res >= 0) {
+ if (tv) POST_WRITE(tv, timeval_sz);
+ if (tz) POST_WRITE(tz, struct_timezone_sz);
+ }
+}
+
+PRE_SYSCALL(settimeofday)(void *tv, void *tz) {}
+
+POST_SYSCALL(settimeofday)(long res, void *tv, void *tz) {
+ if (res >= 0) {
+ if (tv) POST_WRITE(tv, timeval_sz);
+ if (tz) POST_WRITE(tz, struct_timezone_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(adjtimex)(void *txc_p) {}
+
+POST_SYSCALL(adjtimex)(long res, void *txc_p) {
+ if (res >= 0) {
+ if (txc_p) POST_WRITE(txc_p, struct_timex_sz);
+ }
+}
+#endif
+
+PRE_SYSCALL(times)(void *tbuf) {}
+
+POST_SYSCALL(times)(long res, void *tbuf) {
+ if (res >= 0) {
+ if (tbuf) POST_WRITE(tbuf, struct_tms_sz);
+ }
+}
+
+PRE_SYSCALL(gettid)() {}
+
+POST_SYSCALL(gettid)(long res) {}
+
+PRE_SYSCALL(nanosleep)(void *rqtp, void *rmtp) {}
+
+POST_SYSCALL(nanosleep)(long res, void *rqtp, void *rmtp) {
+ if (res >= 0) {
+ if (rqtp) POST_WRITE(rqtp, struct_timespec_sz);
+ if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(alarm)(long seconds) {}
+
+POST_SYSCALL(alarm)(long res, long seconds) {}
+
+PRE_SYSCALL(getpid)() {}
+
+POST_SYSCALL(getpid)(long res) {}
+
+PRE_SYSCALL(getppid)() {}
+
+POST_SYSCALL(getppid)(long res) {}
+
+PRE_SYSCALL(getuid)() {}
+
+POST_SYSCALL(getuid)(long res) {}
+
+PRE_SYSCALL(geteuid)() {}
+
+POST_SYSCALL(geteuid)(long res) {}
+
+PRE_SYSCALL(getgid)() {}
+
+POST_SYSCALL(getgid)(long res) {}
+
+PRE_SYSCALL(getegid)() {}
+
+POST_SYSCALL(getegid)(long res) {}
+
+PRE_SYSCALL(getresuid)(void *ruid, void *euid, void *suid) {}
+
+POST_SYSCALL(getresuid)(long res, void *ruid, void *euid, void *suid) {
+ if (res >= 0) {
+ if (ruid) POST_WRITE(ruid, sizeof(unsigned));
+ if (euid) POST_WRITE(euid, sizeof(unsigned));
+ if (suid) POST_WRITE(suid, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(getresgid)(void *rgid, void *egid, void *sgid) {}
+
+POST_SYSCALL(getresgid)(long res, void *rgid, void *egid, void *sgid) {
+ if (res >= 0) {
+ if (rgid) POST_WRITE(rgid, sizeof(unsigned));
+ if (egid) POST_WRITE(egid, sizeof(unsigned));
+ if (sgid) POST_WRITE(sgid, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(getpgid)(long pid) {}
+
+POST_SYSCALL(getpgid)(long res, long pid) {}
+
+PRE_SYSCALL(getpgrp)() {}
+
+POST_SYSCALL(getpgrp)(long res) {}
+
+PRE_SYSCALL(getsid)(long pid) {}
+
+POST_SYSCALL(getsid)(long res, long pid) {}
+
+PRE_SYSCALL(getgroups)(long gidsetsize, void *grouplist) {}
+
+POST_SYSCALL(getgroups)(long res, long gidsetsize,
+ __sanitizer___kernel_gid_t *grouplist) {
+ if (res >= 0) {
+ if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+ }
+}
+
+PRE_SYSCALL(setregid)(long rgid, long egid) {}
+
+POST_SYSCALL(setregid)(long res, long rgid, long egid) {}
+
+PRE_SYSCALL(setgid)(long gid) {}
+
+POST_SYSCALL(setgid)(long res, long gid) {}
+
+PRE_SYSCALL(setreuid)(long ruid, long euid) {}
+
+POST_SYSCALL(setreuid)(long res, long ruid, long euid) {}
+
+PRE_SYSCALL(setuid)(long uid) {}
+
+POST_SYSCALL(setuid)(long res, long uid) {}
+
+PRE_SYSCALL(setresuid)(long ruid, long euid, long suid) {}
+
+POST_SYSCALL(setresuid)(long res, long ruid, long euid, long suid) {}
+
+PRE_SYSCALL(setresgid)(long rgid, long egid, long sgid) {}
+
+POST_SYSCALL(setresgid)(long res, long rgid, long egid, long sgid) {}
+
+PRE_SYSCALL(setfsuid)(long uid) {}
+
+POST_SYSCALL(setfsuid)(long res, long uid) {}
+
+PRE_SYSCALL(setfsgid)(long gid) {}
+
+POST_SYSCALL(setfsgid)(long res, long gid) {}
+
+PRE_SYSCALL(setpgid)(long pid, long pgid) {}
+
+POST_SYSCALL(setpgid)(long res, long pid, long pgid) {}
+
+PRE_SYSCALL(setsid)() {}
+
+POST_SYSCALL(setsid)(long res) {}
+
+PRE_SYSCALL(setgroups)(long gidsetsize, __sanitizer___kernel_gid_t *grouplist) {
+ if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+}
+
+POST_SYSCALL(setgroups)(long res, long gidsetsize,
+ __sanitizer___kernel_gid_t *grouplist) {}
+
+PRE_SYSCALL(acct)(const void *name) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(acct)(long res, const void *name) {}
+
+PRE_SYSCALL(capget)(void *header, void *dataptr) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
+}
+
+POST_SYSCALL(capget)(long res, void *header, void *dataptr) {
+ if (res >= 0)
+ if (dataptr) POST_WRITE(dataptr, __user_cap_data_struct_sz);
+}
+
+PRE_SYSCALL(capset)(void *header, const void *data) {
+ if (header) PRE_READ(header, __user_cap_header_struct_sz);
+ if (data) PRE_READ(data, __user_cap_data_struct_sz);
+}
+
+POST_SYSCALL(capset)(long res, void *header, const void *data) {}
+
+PRE_SYSCALL(personality)(long personality) {}
+
+POST_SYSCALL(personality)(long res, long personality) {}
+
+PRE_SYSCALL(sigpending)(void *set) {}
+
+POST_SYSCALL(sigpending)(long res, void *set) {
+ if (res >= 0) {
+ if (set) POST_WRITE(set, old_sigset_t_sz);
+ }
+}
+
+PRE_SYSCALL(sigprocmask)(long how, void *set, void *oset) {}
+
+POST_SYSCALL(sigprocmask)(long res, long how, void *set, void *oset) {
+ if (res >= 0) {
+ if (set) POST_WRITE(set, old_sigset_t_sz);
+ if (oset) POST_WRITE(oset, old_sigset_t_sz);
+ }
+}
+
+PRE_SYSCALL(getitimer)(long which, void *value) {}
+
+POST_SYSCALL(getitimer)(long res, long which, void *value) {
+ if (res >= 0) {
+ if (value) POST_WRITE(value, struct_itimerval_sz);
+ }
+}
+
+PRE_SYSCALL(setitimer)(long which, void *value, void *ovalue) {}
+
+POST_SYSCALL(setitimer)(long res, long which, void *value, void *ovalue) {
+ if (res >= 0) {
+ if (value) POST_WRITE(value, struct_itimerval_sz);
+ if (ovalue) POST_WRITE(ovalue, struct_itimerval_sz);
+ }
+}
+
+PRE_SYSCALL(timer_create)(long which_clock, void *timer_event_spec,
+ void *created_timer_id) {}
+
+POST_SYSCALL(timer_create)(long res, long which_clock, void *timer_event_spec,
+ void *created_timer_id) {
+ if (res >= 0) {
+ if (timer_event_spec) POST_WRITE(timer_event_spec, struct_sigevent_sz);
+ if (created_timer_id) POST_WRITE(created_timer_id, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(timer_gettime)(long timer_id, void *setting) {}
+
+POST_SYSCALL(timer_gettime)(long res, long timer_id, void *setting) {
+ if (res >= 0) {
+ if (setting) POST_WRITE(setting, struct_itimerspec_sz);
+ }
+}
+
+PRE_SYSCALL(timer_getoverrun)(long timer_id) {}
+
+POST_SYSCALL(timer_getoverrun)(long res, long timer_id) {}
+
+PRE_SYSCALL(timer_settime)(long timer_id, long flags, const void *new_setting,
+ void *old_setting) {
+ if (new_setting) PRE_READ(new_setting, struct_itimerspec_sz);
+}
+
+POST_SYSCALL(timer_settime)(long res, long timer_id, long flags,
+ const void *new_setting, void *old_setting) {
+ if (res >= 0) {
+ if (old_setting) POST_WRITE(old_setting, struct_itimerspec_sz);
+ }
+}
+
+PRE_SYSCALL(timer_delete)(long timer_id) {}
+
+POST_SYSCALL(timer_delete)(long res, long timer_id) {}
+
+PRE_SYSCALL(clock_settime)(long which_clock, const void *tp) {
+ if (tp) PRE_READ(tp, struct_timespec_sz);
+}
+
+POST_SYSCALL(clock_settime)(long res, long which_clock, const void *tp) {}
+
+PRE_SYSCALL(clock_gettime)(long which_clock, void *tp) {}
+
+POST_SYSCALL(clock_gettime)(long res, long which_clock, void *tp) {
+ if (res >= 0) {
+ if (tp) POST_WRITE(tp, struct_timespec_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(clock_adjtime)(long which_clock, void *tx) {}
+
+POST_SYSCALL(clock_adjtime)(long res, long which_clock, void *tx) {
+ if (res >= 0) {
+ if (tx) POST_WRITE(tx, struct_timex_sz);
+ }
+}
+#endif
+
+PRE_SYSCALL(clock_getres)(long which_clock, void *tp) {}
+
+POST_SYSCALL(clock_getres)(long res, long which_clock, void *tp) {
+ if (res >= 0) {
+ if (tp) POST_WRITE(tp, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(clock_nanosleep)(long which_clock, long flags, const void *rqtp,
+ void *rmtp) {
+ if (rqtp) PRE_READ(rqtp, struct_timespec_sz);
+}
+
+POST_SYSCALL(clock_nanosleep)(long res, long which_clock, long flags,
+ const void *rqtp, void *rmtp) {
+ if (res >= 0) {
+ if (rmtp) POST_WRITE(rmtp, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(nice)(long increment) {}
+
+POST_SYSCALL(nice)(long res, long increment) {}
+
+PRE_SYSCALL(sched_setscheduler)(long pid, long policy, void *param) {}
+
+POST_SYSCALL(sched_setscheduler)(long res, long pid, long policy, void *param) {
+ if (res >= 0) {
+ if (param) POST_WRITE(param, struct_sched_param_sz);
+ }
+}
+
+PRE_SYSCALL(sched_setparam)(long pid, void *param) {
+ if (param) PRE_READ(param, struct_sched_param_sz);
+}
+
+POST_SYSCALL(sched_setparam)(long res, long pid, void *param) {}
+
+PRE_SYSCALL(sched_getscheduler)(long pid) {}
+
+POST_SYSCALL(sched_getscheduler)(long res, long pid) {}
+
+PRE_SYSCALL(sched_getparam)(long pid, void *param) {}
+
+POST_SYSCALL(sched_getparam)(long res, long pid, void *param) {
+ if (res >= 0) {
+ if (param) POST_WRITE(param, struct_sched_param_sz);
+ }
+}
+
+PRE_SYSCALL(sched_setaffinity)(long pid, long len, void *user_mask_ptr) {
+ if (user_mask_ptr) PRE_READ(user_mask_ptr, len);
+}
+
+POST_SYSCALL(sched_setaffinity)(long res, long pid, long len,
+ void *user_mask_ptr) {}
+
+PRE_SYSCALL(sched_getaffinity)(long pid, long len, void *user_mask_ptr) {}
+
+POST_SYSCALL(sched_getaffinity)(long res, long pid, long len,
+ void *user_mask_ptr) {
+ if (res >= 0) {
+ if (user_mask_ptr) POST_WRITE(user_mask_ptr, len);
+ }
+}
+
+PRE_SYSCALL(sched_yield)() {}
+
+POST_SYSCALL(sched_yield)(long res) {}
+
+PRE_SYSCALL(sched_get_priority_max)(long policy) {}
+
+POST_SYSCALL(sched_get_priority_max)(long res, long policy) {}
+
+PRE_SYSCALL(sched_get_priority_min)(long policy) {}
+
+POST_SYSCALL(sched_get_priority_min)(long res, long policy) {}
+
+PRE_SYSCALL(sched_rr_get_interval)(long pid, void *interval) {}
+
+POST_SYSCALL(sched_rr_get_interval)(long res, long pid, void *interval) {
+ if (res >= 0) {
+ if (interval) POST_WRITE(interval, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(setpriority)(long which, long who, long niceval) {}
+
+POST_SYSCALL(setpriority)(long res, long which, long who, long niceval) {}
+
+PRE_SYSCALL(getpriority)(long which, long who) {}
+
+POST_SYSCALL(getpriority)(long res, long which, long who) {}
+
+PRE_SYSCALL(shutdown)(long arg0, long arg1) {}
+
+POST_SYSCALL(shutdown)(long res, long arg0, long arg1) {}
+
+PRE_SYSCALL(reboot)(long magic1, long magic2, long cmd, void *arg) {}
+
+POST_SYSCALL(reboot)(long res, long magic1, long magic2, long cmd, void *arg) {}
+
+PRE_SYSCALL(restart_syscall)() {}
+
+POST_SYSCALL(restart_syscall)(long res) {}
+
+PRE_SYSCALL(kexec_load)(long entry, long nr_segments, void *segments,
+ long flags) {}
+
+POST_SYSCALL(kexec_load)(long res, long entry, long nr_segments, void *segments,
+ long flags) {
+ if (res >= 0) {
+ if (segments) POST_WRITE(segments, struct_kexec_segment_sz);
+ }
+}
+
+PRE_SYSCALL(exit)(long error_code) {}
+
+POST_SYSCALL(exit)(long res, long error_code) {}
+
+PRE_SYSCALL(exit_group)(long error_code) {}
+
+POST_SYSCALL(exit_group)(long res, long error_code) {}
+
+PRE_SYSCALL(wait4)(long pid, void *stat_addr, long options, void *ru) {}
+
+POST_SYSCALL(wait4)(long res, long pid, void *stat_addr, long options,
+ void *ru) {
+ if (res >= 0) {
+ if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
+ if (ru) POST_WRITE(ru, struct_rusage_sz);
+ }
+}
+
+PRE_SYSCALL(waitid)(long which, long pid, void *infop, long options, void *ru) {
+}
+
+POST_SYSCALL(waitid)(long res, long which, long pid, void *infop, long options,
+ void *ru) {
+ if (res >= 0) {
+ if (infop) POST_WRITE(infop, siginfo_t_sz);
+ if (ru) POST_WRITE(ru, struct_rusage_sz);
+ }
+}
+
+PRE_SYSCALL(waitpid)(long pid, void *stat_addr, long options) {}
+
+POST_SYSCALL(waitpid)(long res, long pid, void *stat_addr, long options) {
+ if (res >= 0) {
+ if (stat_addr) POST_WRITE(stat_addr, sizeof(int));
+ }
+}
+
+PRE_SYSCALL(set_tid_address)(void *tidptr) {}
+
+POST_SYSCALL(set_tid_address)(long res, void *tidptr) {
+ if (res >= 0) {
+ if (tidptr) POST_WRITE(tidptr, sizeof(int));
+ }
+}
+
+PRE_SYSCALL(init_module)(void *umod, long len, const void *uargs) {
+ if (uargs)
+ PRE_READ(uargs, __sanitizer::internal_strlen((const char *)uargs) + 1);
+}
+
+POST_SYSCALL(init_module)(long res, void *umod, long len, const void *uargs) {}
+
+PRE_SYSCALL(delete_module)(const void *name_user, long flags) {
+ if (name_user)
+ PRE_READ(name_user,
+ __sanitizer::internal_strlen((const char *)name_user) + 1);
+}
+
+POST_SYSCALL(delete_module)(long res, const void *name_user, long flags) {}
+
+PRE_SYSCALL(rt_sigprocmask)(long how, void *set, void *oset, long sigsetsize) {}
+
+POST_SYSCALL(rt_sigprocmask)(long res, long how, kernel_sigset_t *set,
+ kernel_sigset_t *oset, long sigsetsize) {
+ if (res >= 0) {
+ if (set) POST_WRITE(set, sigsetsize);
+ if (oset) POST_WRITE(oset, sigsetsize);
+ }
+}
+
+PRE_SYSCALL(rt_sigpending)(void *set, long sigsetsize) {}
+
+POST_SYSCALL(rt_sigpending)(long res, kernel_sigset_t *set, long sigsetsize) {
+ if (res >= 0) {
+ if (set) POST_WRITE(set, sigsetsize);
+ }
+}
+
+PRE_SYSCALL(rt_sigtimedwait)(const kernel_sigset_t *uthese, void *uinfo,
+ const void *uts, long sigsetsize) {
+ if (uthese) PRE_READ(uthese, sigsetsize);
+ if (uts) PRE_READ(uts, struct_timespec_sz);
+}
+
+POST_SYSCALL(rt_sigtimedwait)(long res, const void *uthese, void *uinfo,
+ const void *uts, long sigsetsize) {
+ if (res >= 0) {
+ if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ }
+}
+
+PRE_SYSCALL(rt_tgsigqueueinfo)(long tgid, long pid, long sig, void *uinfo) {}
+
+POST_SYSCALL(rt_tgsigqueueinfo)(long res, long tgid, long pid, long sig,
+ void *uinfo) {
+ if (res >= 0) {
+ if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ }
+}
+
+PRE_SYSCALL(kill)(long pid, long sig) {}
+
+POST_SYSCALL(kill)(long res, long pid, long sig) {}
+
+PRE_SYSCALL(tgkill)(long tgid, long pid, long sig) {}
+
+POST_SYSCALL(tgkill)(long res, long tgid, long pid, long sig) {}
+
+PRE_SYSCALL(tkill)(long pid, long sig) {}
+
+POST_SYSCALL(tkill)(long res, long pid, long sig) {}
+
+PRE_SYSCALL(rt_sigqueueinfo)(long pid, long sig, void *uinfo) {}
+
+POST_SYSCALL(rt_sigqueueinfo)(long res, long pid, long sig, void *uinfo) {
+ if (res >= 0) {
+ if (uinfo) POST_WRITE(uinfo, siginfo_t_sz);
+ }
+}
+
+PRE_SYSCALL(sgetmask)() {}
+
+POST_SYSCALL(sgetmask)(long res) {}
+
+PRE_SYSCALL(ssetmask)(long newmask) {}
+
+POST_SYSCALL(ssetmask)(long res, long newmask) {}
+
+PRE_SYSCALL(signal)(long sig, long handler) {}
+
+POST_SYSCALL(signal)(long res, long sig, long handler) {}
+
+PRE_SYSCALL(pause)() {}
+
+POST_SYSCALL(pause)(long res) {}
+
+PRE_SYSCALL(sync)() {}
+
+POST_SYSCALL(sync)(long res) {}
+
+PRE_SYSCALL(fsync)(long fd) {}
+
+POST_SYSCALL(fsync)(long res, long fd) {}
+
+PRE_SYSCALL(fdatasync)(long fd) {}
+
+POST_SYSCALL(fdatasync)(long res, long fd) {}
+
+PRE_SYSCALL(bdflush)(long func, long data) {}
+
+POST_SYSCALL(bdflush)(long res, long func, long data) {}
+
+PRE_SYSCALL(mount)(void *dev_name, void *dir_name, void *type, long flags,
+ void *data) {}
+
+POST_SYSCALL(mount)(long res, void *dev_name, void *dir_name, void *type,
+ long flags, void *data) {
+ if (res >= 0) {
+ if (dev_name)
+ POST_WRITE(dev_name,
+ __sanitizer::internal_strlen((const char *)dev_name) + 1);
+ if (dir_name)
+ POST_WRITE(dir_name,
+ __sanitizer::internal_strlen((const char *)dir_name) + 1);
+ if (type)
+ POST_WRITE(type, __sanitizer::internal_strlen((const char *)type) + 1);
+ }
+}
+
+PRE_SYSCALL(umount)(void *name, long flags) {}
+
+POST_SYSCALL(umount)(long res, void *name, long flags) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(oldumount)(void *name) {}
+
+POST_SYSCALL(oldumount)(long res, void *name) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(truncate)(const void *path, long length) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(truncate)(long res, const void *path, long length) {}
+
+PRE_SYSCALL(ftruncate)(long fd, long length) {}
+
+POST_SYSCALL(ftruncate)(long res, long fd, long length) {}
+
+PRE_SYSCALL(stat)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(stat)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(statfs)(const void *path, void *buf) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(statfs)(long res, const void *path, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_statfs_sz);
+ }
+}
+
+PRE_SYSCALL(statfs64)(const void *path, long sz, void *buf) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(statfs64)(long res, const void *path, long sz, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_statfs64_sz);
+ }
+}
+
+PRE_SYSCALL(fstatfs)(long fd, void *buf) {}
+
+POST_SYSCALL(fstatfs)(long res, long fd, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_statfs_sz);
+ }
+}
+
+PRE_SYSCALL(fstatfs64)(long fd, long sz, void *buf) {}
+
+POST_SYSCALL(fstatfs64)(long res, long fd, long sz, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_statfs64_sz);
+ }
+}
+#endif // !SANITIZER_ANDROID
+
+PRE_SYSCALL(lstat)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(lstat)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(fstat)(long fd, void *statbuf) {}
+
+POST_SYSCALL(fstat)(long res, long fd, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct___old_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(newstat)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(newstat)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(newlstat)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(newlstat)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(newfstat)(long fd, void *statbuf) {}
+
+POST_SYSCALL(newfstat)(long res, long fd, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(ustat)(long dev, void *ubuf) {}
+
+POST_SYSCALL(ustat)(long res, long dev, void *ubuf) {
+ if (res >= 0) {
+ if (ubuf) POST_WRITE(ubuf, struct_ustat_sz);
+ }
+}
+#endif // !SANITIZER_ANDROID
+
+PRE_SYSCALL(stat64)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(stat64)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ }
+}
+
+PRE_SYSCALL(fstat64)(long fd, void *statbuf) {}
+
+POST_SYSCALL(fstat64)(long res, long fd, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ }
+}
+
+PRE_SYSCALL(lstat64)(const void *filename, void *statbuf) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(lstat64)(long res, const void *filename, void *statbuf) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ }
+}
+
+PRE_SYSCALL(setxattr)(const void *path, const void *name, const void *value,
+ long size, long flags) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ if (value) PRE_READ(value, size);
+}
+
+POST_SYSCALL(setxattr)(long res, const void *path, const void *name,
+ const void *value, long size, long flags) {}
+
+PRE_SYSCALL(lsetxattr)(const void *path, const void *name, const void *value,
+ long size, long flags) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ if (value) PRE_READ(value, size);
+}
+
+POST_SYSCALL(lsetxattr)(long res, const void *path, const void *name,
+ const void *value, long size, long flags) {}
+
+PRE_SYSCALL(fsetxattr)(long fd, const void *name, const void *value, long size,
+ long flags) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ if (value) PRE_READ(value, size);
+}
+
+POST_SYSCALL(fsetxattr)(long res, long fd, const void *name, const void *value,
+ long size, long flags) {}
+
+PRE_SYSCALL(getxattr)(const void *path, const void *name, void *value,
+ long size) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(getxattr)(long res, const void *path, const void *name,
+ void *value, long size) {
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
+ }
+}
+
+PRE_SYSCALL(lgetxattr)(const void *path, const void *name, void *value,
+ long size) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(lgetxattr)(long res, const void *path, const void *name,
+ void *value, long size) {
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
+ }
+}
+
+PRE_SYSCALL(fgetxattr)(long fd, const void *name, void *value, long size) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(fgetxattr)(long res, long fd, const void *name, void *value,
+ long size) {
+ if (size && res > 0) {
+ if (value) POST_WRITE(value, res);
+ }
+}
+
+PRE_SYSCALL(listxattr)(const void *path, void *list, long size) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(listxattr)(long res, const void *path, void *list, long size) {
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
+ }
+}
+
+PRE_SYSCALL(llistxattr)(const void *path, void *list, long size) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(llistxattr)(long res, const void *path, void *list, long size) {
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
+ }
+}
+
+PRE_SYSCALL(flistxattr)(long fd, void *list, long size) {}
+
+POST_SYSCALL(flistxattr)(long res, long fd, void *list, long size) {
+ if (size && res > 0) {
+ if (list) POST_WRITE(list, res);
+ }
+}
+
+PRE_SYSCALL(removexattr)(const void *path, const void *name) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(removexattr)(long res, const void *path, const void *name) {}
+
+PRE_SYSCALL(lremovexattr)(const void *path, const void *name) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(lremovexattr)(long res, const void *path, const void *name) {}
+
+PRE_SYSCALL(fremovexattr)(long fd, const void *name) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(fremovexattr)(long res, long fd, const void *name) {}
+
+PRE_SYSCALL(brk)(long brk) {}
+
+POST_SYSCALL(brk)(long res, long brk) {}
+
+PRE_SYSCALL(mprotect)(long start, long len, long prot) {}
+
+POST_SYSCALL(mprotect)(long res, long start, long len, long prot) {}
+
+PRE_SYSCALL(mremap)(long addr, long old_len, long new_len, long flags,
+ long new_addr) {}
+
+POST_SYSCALL(mremap)(long res, long addr, long old_len, long new_len,
+ long flags, long new_addr) {}
+
+PRE_SYSCALL(remap_file_pages)(long start, long size, long prot, long pgoff,
+ long flags) {}
+
+POST_SYSCALL(remap_file_pages)(long res, long start, long size, long prot,
+ long pgoff, long flags) {}
+
+PRE_SYSCALL(msync)(long start, long len, long flags) {}
+
+POST_SYSCALL(msync)(long res, long start, long len, long flags) {}
+
+PRE_SYSCALL(munmap)(long addr, long len) {}
+
+POST_SYSCALL(munmap)(long res, long addr, long len) {}
+
+PRE_SYSCALL(mlock)(long start, long len) {}
+
+POST_SYSCALL(mlock)(long res, long start, long len) {}
+
+PRE_SYSCALL(munlock)(long start, long len) {}
+
+POST_SYSCALL(munlock)(long res, long start, long len) {}
+
+PRE_SYSCALL(mlockall)(long flags) {}
+
+POST_SYSCALL(mlockall)(long res, long flags) {}
+
+PRE_SYSCALL(munlockall)() {}
+
+POST_SYSCALL(munlockall)(long res) {}
+
+PRE_SYSCALL(madvise)(long start, long len, long behavior) {}
+
+POST_SYSCALL(madvise)(long res, long start, long len, long behavior) {}
+
+PRE_SYSCALL(mincore)(long start, long len, void *vec) {}
+
+POST_SYSCALL(mincore)(long res, long start, long len, void *vec) {
+ if (res >= 0) {
+ if (vec) {
+ POST_WRITE(vec, (len + GetPageSizeCached() - 1) / GetPageSizeCached());
+ }
+ }
+}
+
+PRE_SYSCALL(pivot_root)(const void *new_root, const void *put_old) {
+ if (new_root)
+ PRE_READ(new_root,
+ __sanitizer::internal_strlen((const char *)new_root) + 1);
+ if (put_old)
+ PRE_READ(put_old, __sanitizer::internal_strlen((const char *)put_old) + 1);
+}
+
+POST_SYSCALL(pivot_root)(long res, const void *new_root, const void *put_old) {}
+
+PRE_SYSCALL(chroot)(const void *filename) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chroot)(long res, const void *filename) {}
+
+PRE_SYSCALL(mknod)(const void *filename, long mode, long dev) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(mknod)(long res, const void *filename, long mode, long dev) {}
+
+PRE_SYSCALL(link)(const void *oldname, const void *newname) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(link)(long res, const void *oldname, const void *newname) {}
+
+PRE_SYSCALL(symlink)(const void *old, const void *new_) {
+ if (old) PRE_READ(old, __sanitizer::internal_strlen((const char *)old) + 1);
+ if (new_)
+ PRE_READ(new_, __sanitizer::internal_strlen((const char *)new_) + 1);
+}
+
+POST_SYSCALL(symlink)(long res, const void *old, const void *new_) {}
+
+PRE_SYSCALL(unlink)(const void *pathname) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(unlink)(long res, const void *pathname) {}
+
+PRE_SYSCALL(rename)(const void *oldname, const void *newname) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(rename)(long res, const void *oldname, const void *newname) {}
+
+PRE_SYSCALL(chmod)(const void *filename, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chmod)(long res, const void *filename, long mode) {}
+
+PRE_SYSCALL(fchmod)(long fd, long mode) {}
+
+POST_SYSCALL(fchmod)(long res, long fd, long mode) {}
+
+PRE_SYSCALL(fcntl)(long fd, long cmd, long arg) {}
+
+POST_SYSCALL(fcntl)(long res, long fd, long cmd, long arg) {}
+
+PRE_SYSCALL(fcntl64)(long fd, long cmd, long arg) {}
+
+POST_SYSCALL(fcntl64)(long res, long fd, long cmd, long arg) {}
+
+PRE_SYSCALL(pipe)(void *fildes) {}
+
+POST_SYSCALL(pipe)(long res, void *fildes) {
+ if (res >= 0)
+ if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+}
+
+PRE_SYSCALL(pipe2)(void *fildes, long flags) {}
+
+POST_SYSCALL(pipe2)(long res, void *fildes, long flags) {
+ if (res >= 0)
+ if (fildes) POST_WRITE(fildes, sizeof(int) * 2);
+}
+
+PRE_SYSCALL(dup)(long fildes) {}
+
+POST_SYSCALL(dup)(long res, long fildes) {}
+
+PRE_SYSCALL(dup2)(long oldfd, long newfd) {}
+
+POST_SYSCALL(dup2)(long res, long oldfd, long newfd) {}
+
+PRE_SYSCALL(dup3)(long oldfd, long newfd, long flags) {}
+
+POST_SYSCALL(dup3)(long res, long oldfd, long newfd, long flags) {}
+
+PRE_SYSCALL(ioperm)(long from, long num, long on) {}
+
+POST_SYSCALL(ioperm)(long res, long from, long num, long on) {}
+
+PRE_SYSCALL(ioctl)(long fd, long cmd, long arg) {}
+
+POST_SYSCALL(ioctl)(long res, long fd, long cmd, long arg) {}
+
+PRE_SYSCALL(flock)(long fd, long cmd) {}
+
+POST_SYSCALL(flock)(long res, long fd, long cmd) {}
+
+PRE_SYSCALL(io_setup)(long nr_reqs, void **ctx) {
+ if (ctx) PRE_WRITE(ctx, sizeof(*ctx));
+}
+
+POST_SYSCALL(io_setup)(long res, long nr_reqs, void **ctx) {
+ if (res >= 0) {
+ if (ctx) POST_WRITE(ctx, sizeof(*ctx));
+ // (*ctx) is actually a pointer to a kernel mapped page, and there are
+ // people out there who are crazy enough to peek into that page's 32-byte
+ // header.
+ if (*ctx) POST_WRITE(*ctx, 32);
+ }
+}
+
+PRE_SYSCALL(io_destroy)(long ctx) {}
+
+POST_SYSCALL(io_destroy)(long res, long ctx) {}
+
+PRE_SYSCALL(io_getevents)(long ctx_id, long min_nr, long nr,
+ __sanitizer_io_event *ioevpp, void *timeout) {
+ if (timeout) PRE_READ(timeout, struct_timespec_sz);
+}
+
+POST_SYSCALL(io_getevents)(long res, long ctx_id, long min_nr, long nr,
+ __sanitizer_io_event *ioevpp, void *timeout) {
+ if (res >= 0) {
+ if (ioevpp) POST_WRITE(ioevpp, res * sizeof(*ioevpp));
+ if (timeout) POST_WRITE(timeout, struct_timespec_sz);
+ }
+ for (long i = 0; i < res; i++) {
+ // We synchronize io_submit -> io_getevents/io_cancel using the
+ // user-provided data context. Data is not necessary a pointer, it can be
+ // an int, 0 or whatever; acquire/release will correctly handle this.
+ // This scheme can lead to false negatives, e.g. when all operations
+ // synchronize on 0. But there does not seem to be a better solution
+ // (except wrapping all operations in own context, which is unreliable).
+ // We can not reliably extract fildes in io_getevents.
+ COMMON_SYSCALL_ACQUIRE((void*)ioevpp[i].data);
+ }
+}
+
+PRE_SYSCALL(io_submit)(long ctx_id, long nr, __sanitizer_iocb **iocbpp) {
+ for (long i = 0; i < nr; ++i) {
+ uptr op = iocbpp[i]->aio_lio_opcode;
+ void *data = (void*)iocbpp[i]->aio_data;
+ void *buf = (void*)iocbpp[i]->aio_buf;
+ uptr len = (uptr)iocbpp[i]->aio_nbytes;
+ if (op == iocb_cmd_pwrite && buf && len) {
+ PRE_READ(buf, len);
+ } else if (op == iocb_cmd_pread && buf && len) {
+ POST_WRITE(buf, len);
+ } else if (op == iocb_cmd_pwritev) {
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+ for (uptr v = 0; v < len; v++)
+ PRE_READ(iovec[v].iov_base, iovec[v].iov_len);
+ } else if (op == iocb_cmd_preadv) {
+ __sanitizer_iovec *iovec = (__sanitizer_iovec*)buf;
+ for (uptr v = 0; v < len; v++)
+ POST_WRITE(iovec[v].iov_base, iovec[v].iov_len);
+ }
+ // See comment in io_getevents.
+ COMMON_SYSCALL_RELEASE(data);
+ }
+}
+
+POST_SYSCALL(io_submit)(long res, long ctx_id, long nr,
+ __sanitizer_iocb **iocbpp) {}
+
+PRE_SYSCALL(io_cancel)(long ctx_id, __sanitizer_iocb *iocb,
+ __sanitizer_io_event *result) {
+}
+
+POST_SYSCALL(io_cancel)(long res, long ctx_id, __sanitizer_iocb *iocb,
+ __sanitizer_io_event *result) {
+ if (res == 0) {
+ if (result) {
+ // See comment in io_getevents.
+ COMMON_SYSCALL_ACQUIRE((void*)result->data);
+ POST_WRITE(result, sizeof(*result));
+ }
+ if (iocb)
+ POST_WRITE(iocb, sizeof(*iocb));
+ }
+}
+
+PRE_SYSCALL(sendfile)(long out_fd, long in_fd, void *offset, long count) {}
+
+POST_SYSCALL(sendfile)(long res, long out_fd, long in_fd,
+ __sanitizer___kernel_off_t *offset, long count) {
+ if (res >= 0) {
+ if (offset) POST_WRITE(offset, sizeof(*offset));
+ }
+}
+
+PRE_SYSCALL(sendfile64)(long out_fd, long in_fd, void *offset, long count) {}
+
+POST_SYSCALL(sendfile64)(long res, long out_fd, long in_fd,
+ __sanitizer___kernel_loff_t *offset, long count) {
+ if (res >= 0) {
+ if (offset) POST_WRITE(offset, sizeof(*offset));
+ }
+}
+
+PRE_SYSCALL(readlink)(const void *path, void *buf, long bufsiz) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(readlink)(long res, const void *path, void *buf, long bufsiz) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(creat)(const void *pathname, long mode) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(creat)(long res, const void *pathname, long mode) {}
+
+PRE_SYSCALL(open)(const void *filename, long flags, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(open)(long res, const void *filename, long flags, long mode) {}
+
+PRE_SYSCALL(close)(long fd) {
+ COMMON_SYSCALL_FD_CLOSE((int)fd);
+}
+
+POST_SYSCALL(close)(long res, long fd) {}
+
+PRE_SYSCALL(access)(const void *filename, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(access)(long res, const void *filename, long mode) {}
+
+PRE_SYSCALL(vhangup)() {}
+
+POST_SYSCALL(vhangup)(long res) {}
+
+PRE_SYSCALL(chown)(const void *filename, long user, long group) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chown)(long res, const void *filename, long user, long group) {}
+
+PRE_SYSCALL(lchown)(const void *filename, long user, long group) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(lchown)(long res, const void *filename, long user, long group) {}
+
+PRE_SYSCALL(fchown)(long fd, long user, long group) {}
+
+POST_SYSCALL(fchown)(long res, long fd, long user, long group) {}
+
+#if SANITIZER_USES_UID16_SYSCALLS
+PRE_SYSCALL(chown16)(const void *filename, long user, long group) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chown16)(long res, const void *filename, long user, long group) {}
+
+PRE_SYSCALL(lchown16)(const void *filename, long user, long group) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(lchown16)(long res, const void *filename, long user, long group) {}
+
+PRE_SYSCALL(fchown16)(long fd, long user, long group) {}
+
+POST_SYSCALL(fchown16)(long res, long fd, long user, long group) {}
+
+PRE_SYSCALL(setregid16)(long rgid, long egid) {}
+
+POST_SYSCALL(setregid16)(long res, long rgid, long egid) {}
+
+PRE_SYSCALL(setgid16)(long gid) {}
+
+POST_SYSCALL(setgid16)(long res, long gid) {}
+
+PRE_SYSCALL(setreuid16)(long ruid, long euid) {}
+
+POST_SYSCALL(setreuid16)(long res, long ruid, long euid) {}
+
+PRE_SYSCALL(setuid16)(long uid) {}
+
+POST_SYSCALL(setuid16)(long res, long uid) {}
+
+PRE_SYSCALL(setresuid16)(long ruid, long euid, long suid) {}
+
+POST_SYSCALL(setresuid16)(long res, long ruid, long euid, long suid) {}
+
+PRE_SYSCALL(getresuid16)(void *ruid, void *euid, void *suid) {}
+
+POST_SYSCALL(getresuid16)(long res, __sanitizer___kernel_old_uid_t *ruid,
+ __sanitizer___kernel_old_uid_t *euid,
+ __sanitizer___kernel_old_uid_t *suid) {
+ if (res >= 0) {
+ if (ruid) POST_WRITE(ruid, sizeof(*ruid));
+ if (euid) POST_WRITE(euid, sizeof(*euid));
+ if (suid) POST_WRITE(suid, sizeof(*suid));
+ }
+}
+
+PRE_SYSCALL(setresgid16)(long rgid, long egid, long sgid) {}
+
+POST_SYSCALL(setresgid16)(long res, long rgid, long egid, long sgid) {}
+
+PRE_SYSCALL(getresgid16)(void *rgid, void *egid, void *sgid) {}
+
+POST_SYSCALL(getresgid16)(long res, __sanitizer___kernel_old_gid_t *rgid,
+ __sanitizer___kernel_old_gid_t *egid,
+ __sanitizer___kernel_old_gid_t *sgid) {
+ if (res >= 0) {
+ if (rgid) POST_WRITE(rgid, sizeof(*rgid));
+ if (egid) POST_WRITE(egid, sizeof(*egid));
+ if (sgid) POST_WRITE(sgid, sizeof(*sgid));
+ }
+}
+
+PRE_SYSCALL(setfsuid16)(long uid) {}
+
+POST_SYSCALL(setfsuid16)(long res, long uid) {}
+
+PRE_SYSCALL(setfsgid16)(long gid) {}
+
+POST_SYSCALL(setfsgid16)(long res, long gid) {}
+
+PRE_SYSCALL(getgroups16)(long gidsetsize,
+ __sanitizer___kernel_old_gid_t *grouplist) {}
+
+POST_SYSCALL(getgroups16)(long res, long gidsetsize,
+ __sanitizer___kernel_old_gid_t *grouplist) {
+ if (res >= 0) {
+ if (grouplist) POST_WRITE(grouplist, res * sizeof(*grouplist));
+ }
+}
+
+PRE_SYSCALL(setgroups16)(long gidsetsize,
+ __sanitizer___kernel_old_gid_t *grouplist) {
+ if (grouplist) POST_WRITE(grouplist, gidsetsize * sizeof(*grouplist));
+}
+
+POST_SYSCALL(setgroups16)(long res, long gidsetsize,
+ __sanitizer___kernel_old_gid_t *grouplist) {}
+
+PRE_SYSCALL(getuid16)() {}
+
+POST_SYSCALL(getuid16)(long res) {}
+
+PRE_SYSCALL(geteuid16)() {}
+
+POST_SYSCALL(geteuid16)(long res) {}
+
+PRE_SYSCALL(getgid16)() {}
+
+POST_SYSCALL(getgid16)(long res) {}
+
+PRE_SYSCALL(getegid16)() {}
+
+POST_SYSCALL(getegid16)(long res) {}
+#endif // SANITIZER_USES_UID16_SYSCALLS
+
+PRE_SYSCALL(utime)(void *filename, void *times) {}
+
+POST_SYSCALL(utime)(long res, void *filename, void *times) {
+ if (res >= 0) {
+ if (filename)
+ POST_WRITE(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+ if (times) POST_WRITE(times, struct_utimbuf_sz);
+ }
+}
+
+PRE_SYSCALL(utimes)(void *filename, void *utimes) {}
+
+POST_SYSCALL(utimes)(long res, void *filename, void *utimes) {
+ if (res >= 0) {
+ if (filename)
+ POST_WRITE(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+ if (utimes) POST_WRITE(utimes, timeval_sz);
+ }
+}
+
+PRE_SYSCALL(lseek)(long fd, long offset, long origin) {}
+
+POST_SYSCALL(lseek)(long res, long fd, long offset, long origin) {}
+
+PRE_SYSCALL(llseek)(long fd, long offset_high, long offset_low, void *result,
+ long origin) {}
+
+POST_SYSCALL(llseek)(long res, long fd, long offset_high, long offset_low,
+ void *result, long origin) {
+ if (res >= 0) {
+ if (result) POST_WRITE(result, sizeof(long long));
+ }
+}
+
+PRE_SYSCALL(readv)(long fd, const __sanitizer_iovec *vec, long vlen) {}
+
+POST_SYSCALL(readv)(long res, long fd, const __sanitizer_iovec *vec,
+ long vlen) {
+ if (res >= 0) {
+ if (vec) kernel_write_iovec(vec, vlen, res);
+ }
+}
+
+PRE_SYSCALL(write)(long fd, const void *buf, long count) {
+ if (buf) PRE_READ(buf, count);
+}
+
+POST_SYSCALL(write)(long res, long fd, const void *buf, long count) {}
+
+PRE_SYSCALL(writev)(long fd, const __sanitizer_iovec *vec, long vlen) {}
+
+POST_SYSCALL(writev)(long res, long fd, const __sanitizer_iovec *vec,
+ long vlen) {
+ if (res >= 0) {
+ if (vec) kernel_read_iovec(vec, vlen, res);
+ }
+}
+
+#ifdef _LP64
+PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos) {}
+
+POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, res);
+ }
+}
+
+PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos) {
+ if (buf) PRE_READ(buf, count);
+}
+
+POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
+ long pos) {}
+#else
+PRE_SYSCALL(pread64)(long fd, void *buf, long count, long pos0, long pos1) {}
+
+POST_SYSCALL(pread64)(long res, long fd, void *buf, long count, long pos0,
+ long pos1) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, res);
+ }
+}
+
+PRE_SYSCALL(pwrite64)(long fd, const void *buf, long count, long pos0,
+ long pos1) {
+ if (buf) PRE_READ(buf, count);
+}
+
+POST_SYSCALL(pwrite64)(long res, long fd, const void *buf, long count,
+ long pos0, long pos1) {}
+#endif
+
+PRE_SYSCALL(preadv)(long fd, const __sanitizer_iovec *vec, long vlen,
+ long pos_l, long pos_h) {}
+
+POST_SYSCALL(preadv)(long res, long fd, const __sanitizer_iovec *vec, long vlen,
+ long pos_l, long pos_h) {
+ if (res >= 0) {
+ if (vec) kernel_write_iovec(vec, vlen, res);
+ }
+}
+
+PRE_SYSCALL(pwritev)(long fd, const __sanitizer_iovec *vec, long vlen,
+ long pos_l, long pos_h) {}
+
+POST_SYSCALL(pwritev)(long res, long fd, const __sanitizer_iovec *vec,
+ long vlen, long pos_l, long pos_h) {
+ if (res >= 0) {
+ if (vec) kernel_read_iovec(vec, vlen, res);
+ }
+}
+
+PRE_SYSCALL(getcwd)(void *buf, long size) {}
+
+POST_SYSCALL(getcwd)(long res, void *buf, long size) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(mkdir)(const void *pathname, long mode) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(mkdir)(long res, const void *pathname, long mode) {}
+
+PRE_SYSCALL(chdir)(const void *filename) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(chdir)(long res, const void *filename) {}
+
+PRE_SYSCALL(fchdir)(long fd) {}
+
+POST_SYSCALL(fchdir)(long res, long fd) {}
+
+PRE_SYSCALL(rmdir)(const void *pathname) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(rmdir)(long res, const void *pathname) {}
+
+PRE_SYSCALL(lookup_dcookie)(u64 cookie64, void *buf, long len) {}
+
+POST_SYSCALL(lookup_dcookie)(long res, u64 cookie64, void *buf, long len) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(quotactl)(long cmd, const void *special, long id, void *addr) {
+ if (special)
+ PRE_READ(special, __sanitizer::internal_strlen((const char *)special) + 1);
+}
+
+POST_SYSCALL(quotactl)(long res, long cmd, const void *special, long id,
+ void *addr) {}
+
+PRE_SYSCALL(getdents)(long fd, void *dirent, long count) {}
+
+POST_SYSCALL(getdents)(long res, long fd, void *dirent, long count) {
+ if (res >= 0) {
+ if (dirent) POST_WRITE(dirent, res);
+ }
+}
+
+PRE_SYSCALL(getdents64)(long fd, void *dirent, long count) {}
+
+POST_SYSCALL(getdents64)(long res, long fd, void *dirent, long count) {
+ if (res >= 0) {
+ if (dirent) POST_WRITE(dirent, res);
+ }
+}
+
+PRE_SYSCALL(setsockopt)(long fd, long level, long optname, void *optval,
+ long optlen) {}
+
+POST_SYSCALL(setsockopt)(long res, long fd, long level, long optname,
+ void *optval, long optlen) {
+ if (res >= 0) {
+ if (optval)
+ POST_WRITE(optval,
+ __sanitizer::internal_strlen((const char *)optval) + 1);
+ }
+}
+
+PRE_SYSCALL(getsockopt)(long fd, long level, long optname, void *optval,
+ void *optlen) {}
+
+POST_SYSCALL(getsockopt)(long res, long fd, long level, long optname,
+ void *optval, void *optlen) {
+ if (res >= 0) {
+ if (optval)
+ POST_WRITE(optval,
+ __sanitizer::internal_strlen((const char *)optval) + 1);
+ if (optlen) POST_WRITE(optlen, sizeof(int));
+ }
+}
+
+PRE_SYSCALL(bind)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
+
+POST_SYSCALL(bind)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ long arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ }
+}
+
+PRE_SYSCALL(connect)(long arg0, sanitizer_kernel_sockaddr *arg1, long arg2) {}
+
+POST_SYSCALL(connect)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ long arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ }
+}
+
+PRE_SYSCALL(accept)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2) {}
+
+POST_SYSCALL(accept)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(accept4)(long arg0, sanitizer_kernel_sockaddr *arg1, void *arg2,
+ long arg3) {}
+
+POST_SYSCALL(accept4)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2, long arg3) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(getsockname)(long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {}
+
+POST_SYSCALL(getsockname)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(getpeername)(long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {}
+
+POST_SYSCALL(getpeername)(long res, long arg0, sanitizer_kernel_sockaddr *arg1,
+ void *arg2) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(unsigned));
+ }
+}
+
+PRE_SYSCALL(send)(long arg0, void *arg1, long arg2, long arg3) {}
+
+POST_SYSCALL(send)(long res, long arg0, void *arg1, long arg2, long arg3) {
+ if (res) {
+ if (arg1) POST_READ(arg1, res);
+ }
+}
+
+PRE_SYSCALL(sendto)(long arg0, void *arg1, long arg2, long arg3,
+ sanitizer_kernel_sockaddr *arg4, long arg5) {}
+
+POST_SYSCALL(sendto)(long res, long arg0, void *arg1, long arg2, long arg3,
+ sanitizer_kernel_sockaddr *arg4, long arg5) {
+ if (res >= 0) {
+ if (arg1) POST_READ(arg1, res);
+ if (arg4) POST_WRITE(arg4, sizeof(*arg4));
+ }
+}
+
+PRE_SYSCALL(sendmsg)(long fd, void *msg, long flags) {}
+
+POST_SYSCALL(sendmsg)(long res, long fd, void *msg, long flags) {
+ // FIXME: POST_READ
+}
+
+PRE_SYSCALL(sendmmsg)(long fd, void *msg, long vlen, long flags) {}
+
+POST_SYSCALL(sendmmsg)(long res, long fd, void *msg, long vlen, long flags) {
+ // FIXME: POST_READ
+}
+
+PRE_SYSCALL(recv)(long arg0, void *buf, long len, long flags) {}
+
+POST_SYSCALL(recv)(long res, void *buf, long len, long flags) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, res);
+ }
+}
+
+PRE_SYSCALL(recvfrom)(long arg0, void *buf, long len, long flags,
+ sanitizer_kernel_sockaddr *arg4, void *arg5) {}
+
+POST_SYSCALL(recvfrom)(long res, long arg0, void *buf, long len, long flags,
+ sanitizer_kernel_sockaddr *arg4, void *arg5) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, res);
+ if (arg4) POST_WRITE(arg4, sizeof(*arg4));
+ if (arg5) POST_WRITE(arg5, sizeof(int));
+ }
+}
+
+PRE_SYSCALL(socket)(long arg0, long arg1, long arg2) {}
+
+POST_SYSCALL(socket)(long res, long arg0, long arg1, long arg2) {}
+
+PRE_SYSCALL(socketpair)(long arg0, long arg1, long arg2, int *sv) {}
+
+POST_SYSCALL(socketpair)(long res, long arg0, long arg1, long arg2, int *sv) {
+ if (res >= 0)
+ if (sv) POST_WRITE(sv, sizeof(int) * 2);
+}
+
+PRE_SYSCALL(socketcall)(long call, void *args) {}
+
+POST_SYSCALL(socketcall)(long res, long call, void *args) {
+ if (res >= 0) {
+ if (args) POST_WRITE(args, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(listen)(long arg0, long arg1) {}
+
+POST_SYSCALL(listen)(long res, long arg0, long arg1) {}
+
+PRE_SYSCALL(poll)(void *ufds, long nfds, long timeout) {}
+
+POST_SYSCALL(poll)(long res, __sanitizer_pollfd *ufds, long nfds,
+ long timeout) {
+ if (res >= 0) {
+ if (ufds) POST_WRITE(ufds, nfds * sizeof(*ufds));
+ }
+}
+
+PRE_SYSCALL(select)(long n, __sanitizer___kernel_fd_set *inp,
+ __sanitizer___kernel_fd_set *outp,
+ __sanitizer___kernel_fd_set *exp, void *tvp) {}
+
+POST_SYSCALL(select)(long res, long n, __sanitizer___kernel_fd_set *inp,
+ __sanitizer___kernel_fd_set *outp,
+ __sanitizer___kernel_fd_set *exp, void *tvp) {
+ if (res >= 0) {
+ if (inp) POST_WRITE(inp, sizeof(*inp));
+ if (outp) POST_WRITE(outp, sizeof(*outp));
+ if (exp) POST_WRITE(exp, sizeof(*exp));
+ if (tvp) POST_WRITE(tvp, timeval_sz);
+ }
+}
+
+PRE_SYSCALL(old_select)(void *arg) {}
+
+POST_SYSCALL(old_select)(long res, void *arg) {}
+
+PRE_SYSCALL(epoll_create)(long size) {}
+
+POST_SYSCALL(epoll_create)(long res, long size) {}
+
+PRE_SYSCALL(epoll_create1)(long flags) {}
+
+POST_SYSCALL(epoll_create1)(long res, long flags) {}
+
+PRE_SYSCALL(epoll_ctl)(long epfd, long op, long fd, void *event) {}
+
+POST_SYSCALL(epoll_ctl)(long res, long epfd, long op, long fd, void *event) {
+ if (res >= 0) {
+ if (event) POST_WRITE(event, struct_epoll_event_sz);
+ }
+}
+
+PRE_SYSCALL(epoll_wait)(long epfd, void *events, long maxevents, long timeout) {
+}
+
+POST_SYSCALL(epoll_wait)(long res, long epfd, void *events, long maxevents,
+ long timeout) {
+ if (res >= 0) {
+ if (events) POST_WRITE(events, struct_epoll_event_sz);
+ }
+}
+
+PRE_SYSCALL(epoll_pwait)(long epfd, void *events, long maxevents, long timeout,
+ const kernel_sigset_t *sigmask, long sigsetsize) {
+ if (sigmask) PRE_READ(sigmask, sigsetsize);
+}
+
+POST_SYSCALL(epoll_pwait)(long res, long epfd, void *events, long maxevents,
+ long timeout, const void *sigmask, long sigsetsize) {
+ if (res >= 0) {
+ if (events) POST_WRITE(events, struct_epoll_event_sz);
+ }
+}
+
+PRE_SYSCALL(gethostname)(void *name, long len) {}
+
+POST_SYSCALL(gethostname)(long res, void *name, long len) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(sethostname)(void *name, long len) {}
+
+POST_SYSCALL(sethostname)(long res, void *name, long len) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(setdomainname)(void *name, long len) {}
+
+POST_SYSCALL(setdomainname)(long res, void *name, long len) {
+ if (res >= 0) {
+ if (name)
+ POST_WRITE(name, __sanitizer::internal_strlen((const char *)name) + 1);
+ }
+}
+
+PRE_SYSCALL(newuname)(void *name) {}
+
+POST_SYSCALL(newuname)(long res, void *name) {
+ if (res >= 0) {
+ if (name) POST_WRITE(name, struct_new_utsname_sz);
+ }
+}
+
+PRE_SYSCALL(uname)(void *arg0) {}
+
+POST_SYSCALL(uname)(long res, void *arg0) {
+ if (res >= 0) {
+ if (arg0) POST_WRITE(arg0, struct_old_utsname_sz);
+ }
+}
+
+PRE_SYSCALL(olduname)(void *arg0) {}
+
+POST_SYSCALL(olduname)(long res, void *arg0) {
+ if (res >= 0) {
+ if (arg0) POST_WRITE(arg0, struct_oldold_utsname_sz);
+ }
+}
+
+PRE_SYSCALL(getrlimit)(long resource, void *rlim) {}
+
+POST_SYSCALL(getrlimit)(long res, long resource, void *rlim) {
+ if (res >= 0) {
+ if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ }
+}
+
+PRE_SYSCALL(old_getrlimit)(long resource, void *rlim) {}
+
+POST_SYSCALL(old_getrlimit)(long res, long resource, void *rlim) {
+ if (res >= 0) {
+ if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ }
+}
+
+PRE_SYSCALL(setrlimit)(long resource, void *rlim) {}
+
+POST_SYSCALL(setrlimit)(long res, long resource, void *rlim) {
+ if (res >= 0) {
+ if (rlim) POST_WRITE(rlim, struct_rlimit_sz);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(prlimit64)(long pid, long resource, const void *new_rlim,
+ void *old_rlim) {
+ if (new_rlim) PRE_READ(new_rlim, struct_rlimit64_sz);
+}
+
+POST_SYSCALL(prlimit64)(long res, long pid, long resource, const void *new_rlim,
+ void *old_rlim) {
+ if (res >= 0) {
+ if (old_rlim) POST_WRITE(old_rlim, struct_rlimit64_sz);
+ }
+}
+#endif
+
+PRE_SYSCALL(getrusage)(long who, void *ru) {}
+
+POST_SYSCALL(getrusage)(long res, long who, void *ru) {
+ if (res >= 0) {
+ if (ru) POST_WRITE(ru, struct_rusage_sz);
+ }
+}
+
+PRE_SYSCALL(umask)(long mask) {}
+
+POST_SYSCALL(umask)(long res, long mask) {}
+
+PRE_SYSCALL(msgget)(long key, long msgflg) {}
+
+POST_SYSCALL(msgget)(long res, long key, long msgflg) {}
+
+PRE_SYSCALL(msgsnd)(long msqid, void *msgp, long msgsz, long msgflg) {
+ if (msgp) PRE_READ(msgp, msgsz);
+}
+
+POST_SYSCALL(msgsnd)(long res, long msqid, void *msgp, long msgsz,
+ long msgflg) {}
+
+PRE_SYSCALL(msgrcv)(long msqid, void *msgp, long msgsz, long msgtyp,
+ long msgflg) {}
+
+POST_SYSCALL(msgrcv)(long res, long msqid, void *msgp, long msgsz, long msgtyp,
+ long msgflg) {
+ if (res >= 0) {
+ if (msgp) POST_WRITE(msgp, res);
+ }
+}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(msgctl)(long msqid, long cmd, void *buf) {}
+
+POST_SYSCALL(msgctl)(long res, long msqid, long cmd, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, struct_msqid_ds_sz);
+ }
+}
+#endif
+
+PRE_SYSCALL(semget)(long key, long nsems, long semflg) {}
+
+POST_SYSCALL(semget)(long res, long key, long nsems, long semflg) {}
+
+PRE_SYSCALL(semop)(long semid, void *sops, long nsops) {}
+
+POST_SYSCALL(semop)(long res, long semid, void *sops, long nsops) {}
+
+PRE_SYSCALL(semctl)(long semid, long semnum, long cmd, void *arg) {}
+
+POST_SYSCALL(semctl)(long res, long semid, long semnum, long cmd, void *arg) {}
+
+PRE_SYSCALL(semtimedop)(long semid, void *sops, long nsops,
+ const void *timeout) {
+ if (timeout) PRE_READ(timeout, struct_timespec_sz);
+}
+
+POST_SYSCALL(semtimedop)(long res, long semid, void *sops, long nsops,
+ const void *timeout) {}
+
+PRE_SYSCALL(shmat)(long shmid, void *shmaddr, long shmflg) {}
+
+POST_SYSCALL(shmat)(long res, long shmid, void *shmaddr, long shmflg) {
+ if (res >= 0) {
+ if (shmaddr)
+ POST_WRITE(shmaddr,
+ __sanitizer::internal_strlen((const char *)shmaddr) + 1);
+ }
+}
+
+PRE_SYSCALL(shmget)(long key, long size, long flag) {}
+
+POST_SYSCALL(shmget)(long res, long key, long size, long flag) {}
+
+PRE_SYSCALL(shmdt)(void *shmaddr) {}
+
+POST_SYSCALL(shmdt)(long res, void *shmaddr) {
+ if (res >= 0) {
+ if (shmaddr)
+ POST_WRITE(shmaddr,
+ __sanitizer::internal_strlen((const char *)shmaddr) + 1);
+ }
+}
+
+PRE_SYSCALL(ipc)(long call, long first, long second, long third, void *ptr,
+ long fifth) {}
+
+POST_SYSCALL(ipc)(long res, long call, long first, long second, long third,
+ void *ptr, long fifth) {}
+
+#if !SANITIZER_ANDROID
+PRE_SYSCALL(shmctl)(long shmid, long cmd, void *buf) {}
+
+POST_SYSCALL(shmctl)(long res, long shmid, long cmd, void *buf) {
+ if (res >= 0) {
+ if (buf) POST_WRITE(buf, sizeof(__sanitizer_shmid_ds));
+ }
+}
+
+PRE_SYSCALL(mq_open)(const void *name, long oflag, long mode, void *attr) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(mq_open)(long res, const void *name, long oflag, long mode,
+ void *attr) {
+ if (res >= 0) {
+ if (attr) POST_WRITE(attr, struct_mq_attr_sz);
+ }
+}
+
+PRE_SYSCALL(mq_unlink)(const void *name) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(mq_unlink)(long res, const void *name) {}
+
+PRE_SYSCALL(mq_timedsend)(long mqdes, const void *msg_ptr, long msg_len,
+ long msg_prio, const void *abs_timeout) {
+ if (msg_ptr) PRE_READ(msg_ptr, msg_len);
+ if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+}
+
+POST_SYSCALL(mq_timedsend)(long res, long mqdes, const void *msg_ptr,
+ long msg_len, long msg_prio,
+ const void *abs_timeout) {}
+
+PRE_SYSCALL(mq_timedreceive)(long mqdes, void *msg_ptr, long msg_len,
+ void *msg_prio, const void *abs_timeout) {
+ if (abs_timeout) PRE_READ(abs_timeout, struct_timespec_sz);
+}
+
+POST_SYSCALL(mq_timedreceive)(long res, long mqdes, void *msg_ptr, long msg_len,
+ int *msg_prio, const void *abs_timeout) {
+ if (res >= 0) {
+ if (msg_ptr) POST_WRITE(msg_ptr, res);
+ if (msg_prio) POST_WRITE(msg_prio, sizeof(*msg_prio));
+ }
+}
+
+PRE_SYSCALL(mq_notify)(long mqdes, const void *notification) {
+ if (notification) PRE_READ(notification, struct_sigevent_sz);
+}
+
+POST_SYSCALL(mq_notify)(long res, long mqdes, const void *notification) {}
+
+PRE_SYSCALL(mq_getsetattr)(long mqdes, const void *mqstat, void *omqstat) {
+ if (mqstat) PRE_READ(mqstat, struct_mq_attr_sz);
+}
+
+POST_SYSCALL(mq_getsetattr)(long res, long mqdes, const void *mqstat,
+ void *omqstat) {
+ if (res >= 0) {
+ if (omqstat) POST_WRITE(omqstat, struct_mq_attr_sz);
+ }
+}
+#endif // SANITIZER_ANDROID
+
+PRE_SYSCALL(pciconfig_iobase)(long which, long bus, long devfn) {}
+
+POST_SYSCALL(pciconfig_iobase)(long res, long which, long bus, long devfn) {}
+
+PRE_SYSCALL(pciconfig_read)(long bus, long dfn, long off, long len, void *buf) {
+}
+
+POST_SYSCALL(pciconfig_read)(long res, long bus, long dfn, long off, long len,
+ void *buf) {}
+
+PRE_SYSCALL(pciconfig_write)(long bus, long dfn, long off, long len,
+ void *buf) {}
+
+POST_SYSCALL(pciconfig_write)(long res, long bus, long dfn, long off, long len,
+ void *buf) {}
+
+PRE_SYSCALL(swapon)(const void *specialfile, long swap_flags) {
+ if (specialfile)
+ PRE_READ(specialfile,
+ __sanitizer::internal_strlen((const char *)specialfile) + 1);
+}
+
+POST_SYSCALL(swapon)(long res, const void *specialfile, long swap_flags) {}
+
+PRE_SYSCALL(swapoff)(const void *specialfile) {
+ if (specialfile)
+ PRE_READ(specialfile,
+ __sanitizer::internal_strlen((const char *)specialfile) + 1);
+}
+
+POST_SYSCALL(swapoff)(long res, const void *specialfile) {}
+
+PRE_SYSCALL(sysctl)(__sanitizer___sysctl_args *args) {
+ if (args) {
+ if (args->name) PRE_READ(args->name, args->nlen * sizeof(*args->name));
+ if (args->newval) PRE_READ(args->name, args->newlen);
+ }
+}
+
+POST_SYSCALL(sysctl)(long res, __sanitizer___sysctl_args *args) {
+ if (res >= 0) {
+ if (args && args->oldval && args->oldlenp) {
+ POST_WRITE(args->oldlenp, sizeof(*args->oldlenp));
+ POST_WRITE(args->oldval, *args->oldlenp);
+ }
+ }
+}
+
+PRE_SYSCALL(sysinfo)(void *info) {}
+
+POST_SYSCALL(sysinfo)(long res, void *info) {
+ if (res >= 0) {
+ if (info) POST_WRITE(info, struct_sysinfo_sz);
+ }
+}
+
+PRE_SYSCALL(sysfs)(long option, long arg1, long arg2) {}
+
+POST_SYSCALL(sysfs)(long res, long option, long arg1, long arg2) {}
+
+PRE_SYSCALL(syslog)(long type, void *buf, long len) {}
+
+POST_SYSCALL(syslog)(long res, long type, void *buf, long len) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(uselib)(const void *library) {
+ if (library)
+ PRE_READ(library, __sanitizer::internal_strlen((const char *)library) + 1);
+}
+
+POST_SYSCALL(uselib)(long res, const void *library) {}
+
+PRE_SYSCALL(ni_syscall)() {}
+
+POST_SYSCALL(ni_syscall)(long res) {}
+
+PRE_SYSCALL(ptrace)(long request, long pid, long addr, long data) {
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
+ if (data) {
+ if (request == ptrace_setregs) {
+ PRE_READ((void *)data, struct_user_regs_struct_sz);
+ } else if (request == ptrace_setfpregs) {
+ PRE_READ((void *)data, struct_user_fpregs_struct_sz);
+ } else if (request == ptrace_setfpxregs) {
+ PRE_READ((void *)data, struct_user_fpxregs_struct_sz);
+ } else if (request == ptrace_setsiginfo) {
+ PRE_READ((void *)data, siginfo_t_sz);
+ } else if (request == ptrace_setregset) {
+ __sanitizer_iovec *iov = (__sanitizer_iovec *)data;
+ PRE_READ(iov->iov_base, iov->iov_len);
+ }
+ }
+#endif
+}
+
+POST_SYSCALL(ptrace)(long res, long request, long pid, long addr, long data) {
+#if !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__s390__))
+ if (res >= 0 && data) {
+ // Note that this is different from the interceptor in
+ // sanitizer_common_interceptors.inc.
+ // PEEK* requests return resulting values through data pointer.
+ if (request == ptrace_getregs) {
+ POST_WRITE((void *)data, struct_user_regs_struct_sz);
+ } else if (request == ptrace_getfpregs) {
+ POST_WRITE((void *)data, struct_user_fpregs_struct_sz);
+ } else if (request == ptrace_getfpxregs) {
+ POST_WRITE((void *)data, struct_user_fpxregs_struct_sz);
+ } else if (request == ptrace_getsiginfo) {
+ POST_WRITE((void *)data, siginfo_t_sz);
+ } else if (request == ptrace_getregset) {
+ __sanitizer_iovec *iov = (__sanitizer_iovec *)data;
+ POST_WRITE(iov->iov_base, iov->iov_len);
+ } else if (request == ptrace_peekdata || request == ptrace_peektext ||
+ request == ptrace_peekuser) {
+ POST_WRITE((void *)data, sizeof(void *));
+ }
+ }
+#endif
+}
+
+PRE_SYSCALL(add_key)(const void *_type, const void *_description,
+ const void *_payload, long plen, long destringid) {
+ if (_type)
+ PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
+ if (_description)
+ PRE_READ(_description,
+ __sanitizer::internal_strlen((const char *)_description) + 1);
+}
+
+POST_SYSCALL(add_key)(long res, const void *_type, const void *_description,
+ const void *_payload, long plen, long destringid) {}
+
+PRE_SYSCALL(request_key)(const void *_type, const void *_description,
+ const void *_callout_info, long destringid) {
+ if (_type)
+ PRE_READ(_type, __sanitizer::internal_strlen((const char *)_type) + 1);
+ if (_description)
+ PRE_READ(_description,
+ __sanitizer::internal_strlen((const char *)_description) + 1);
+ if (_callout_info)
+ PRE_READ(_callout_info,
+ __sanitizer::internal_strlen((const char *)_callout_info) + 1);
+}
+
+POST_SYSCALL(request_key)(long res, const void *_type, const void *_description,
+ const void *_callout_info, long destringid) {}
+
+PRE_SYSCALL(keyctl)(long cmd, long arg2, long arg3, long arg4, long arg5) {}
+
+POST_SYSCALL(keyctl)(long res, long cmd, long arg2, long arg3, long arg4,
+ long arg5) {}
+
+PRE_SYSCALL(ioprio_set)(long which, long who, long ioprio) {}
+
+POST_SYSCALL(ioprio_set)(long res, long which, long who, long ioprio) {}
+
+PRE_SYSCALL(ioprio_get)(long which, long who) {}
+
+POST_SYSCALL(ioprio_get)(long res, long which, long who) {}
+
+PRE_SYSCALL(set_mempolicy)(long mode, void *nmask, long maxnode) {}
+
+POST_SYSCALL(set_mempolicy)(long res, long mode, void *nmask, long maxnode) {
+ if (res >= 0) {
+ if (nmask) POST_WRITE(nmask, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(migrate_pages)(long pid, long maxnode, const void *from,
+ const void *to) {
+ if (from) PRE_READ(from, sizeof(long));
+ if (to) PRE_READ(to, sizeof(long));
+}
+
+POST_SYSCALL(migrate_pages)(long res, long pid, long maxnode, const void *from,
+ const void *to) {}
+
+PRE_SYSCALL(move_pages)(long pid, long nr_pages, const void **pages,
+ const int *nodes, int *status, long flags) {
+ if (pages) PRE_READ(pages, nr_pages * sizeof(*pages));
+ if (nodes) PRE_READ(nodes, nr_pages * sizeof(*nodes));
+}
+
+POST_SYSCALL(move_pages)(long res, long pid, long nr_pages, const void **pages,
+ const int *nodes, int *status, long flags) {
+ if (res >= 0) {
+ if (status) POST_WRITE(status, nr_pages * sizeof(*status));
+ }
+}
+
+PRE_SYSCALL(mbind)(long start, long len, long mode, void *nmask, long maxnode,
+ long flags) {}
+
+POST_SYSCALL(mbind)(long res, long start, long len, long mode, void *nmask,
+ long maxnode, long flags) {
+ if (res >= 0) {
+ if (nmask) POST_WRITE(nmask, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(get_mempolicy)(void *policy, void *nmask, long maxnode, long addr,
+ long flags) {}
+
+POST_SYSCALL(get_mempolicy)(long res, void *policy, void *nmask, long maxnode,
+ long addr, long flags) {
+ if (res >= 0) {
+ if (policy) POST_WRITE(policy, sizeof(int));
+ if (nmask) POST_WRITE(nmask, sizeof(long));
+ }
+}
+
+PRE_SYSCALL(inotify_init)() {}
+
+POST_SYSCALL(inotify_init)(long res) {}
+
+PRE_SYSCALL(inotify_init1)(long flags) {}
+
+POST_SYSCALL(inotify_init1)(long res, long flags) {}
+
+PRE_SYSCALL(inotify_add_watch)(long fd, const void *path, long mask) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(inotify_add_watch)(long res, long fd, const void *path,
+ long mask) {}
+
+PRE_SYSCALL(inotify_rm_watch)(long fd, long wd) {}
+
+POST_SYSCALL(inotify_rm_watch)(long res, long fd, long wd) {}
+
+PRE_SYSCALL(spu_run)(long fd, void *unpc, void *ustatus) {}
+
+POST_SYSCALL(spu_run)(long res, long fd, unsigned *unpc, unsigned *ustatus) {
+ if (res >= 0) {
+ if (unpc) POST_WRITE(unpc, sizeof(*unpc));
+ if (ustatus) POST_WRITE(ustatus, sizeof(*ustatus));
+ }
+}
+
+PRE_SYSCALL(spu_create)(const void *name, long flags, long mode, long fd) {
+ if (name)
+ PRE_READ(name, __sanitizer::internal_strlen((const char *)name) + 1);
+}
+
+POST_SYSCALL(spu_create)(long res, const void *name, long flags, long mode,
+ long fd) {}
+
+PRE_SYSCALL(mknodat)(long dfd, const void *filename, long mode, long dev) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(mknodat)(long res, long dfd, const void *filename, long mode,
+ long dev) {}
+
+PRE_SYSCALL(mkdirat)(long dfd, const void *pathname, long mode) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(mkdirat)(long res, long dfd, const void *pathname, long mode) {}
+
+PRE_SYSCALL(unlinkat)(long dfd, const void *pathname, long flag) {
+ if (pathname)
+ PRE_READ(pathname,
+ __sanitizer::internal_strlen((const char *)pathname) + 1);
+}
+
+POST_SYSCALL(unlinkat)(long res, long dfd, const void *pathname, long flag) {}
+
+PRE_SYSCALL(symlinkat)(const void *oldname, long newdfd, const void *newname) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(symlinkat)(long res, const void *oldname, long newdfd,
+ const void *newname) {}
+
+PRE_SYSCALL(linkat)(long olddfd, const void *oldname, long newdfd,
+ const void *newname, long flags) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(linkat)(long res, long olddfd, const void *oldname, long newdfd,
+ const void *newname, long flags) {}
+
+PRE_SYSCALL(renameat)(long olddfd, const void *oldname, long newdfd,
+ const void *newname) {
+ if (oldname)
+ PRE_READ(oldname, __sanitizer::internal_strlen((const char *)oldname) + 1);
+ if (newname)
+ PRE_READ(newname, __sanitizer::internal_strlen((const char *)newname) + 1);
+}
+
+POST_SYSCALL(renameat)(long res, long olddfd, const void *oldname, long newdfd,
+ const void *newname) {}
+
+PRE_SYSCALL(futimesat)(long dfd, const void *filename, void *utimes) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(futimesat)(long res, long dfd, const void *filename,
+ void *utimes) {
+ if (res >= 0) {
+ if (utimes) POST_WRITE(utimes, timeval_sz);
+ }
+}
+
+PRE_SYSCALL(faccessat)(long dfd, const void *filename, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(faccessat)(long res, long dfd, const void *filename, long mode) {}
+
+PRE_SYSCALL(fchmodat)(long dfd, const void *filename, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(fchmodat)(long res, long dfd, const void *filename, long mode) {}
+
+PRE_SYSCALL(fchownat)(long dfd, const void *filename, long user, long group,
+ long flag) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(fchownat)(long res, long dfd, const void *filename, long user,
+ long group, long flag) {}
+
+PRE_SYSCALL(openat)(long dfd, const void *filename, long flags, long mode) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(openat)(long res, long dfd, const void *filename, long flags,
+ long mode) {}
+
+PRE_SYSCALL(newfstatat)(long dfd, const void *filename, void *statbuf,
+ long flag) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(newfstatat)(long res, long dfd, const void *filename,
+ void *statbuf, long flag) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat_sz);
+ }
+}
+
+PRE_SYSCALL(fstatat64)(long dfd, const void *filename, void *statbuf,
+ long flag) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(fstatat64)(long res, long dfd, const void *filename, void *statbuf,
+ long flag) {
+ if (res >= 0) {
+ if (statbuf) POST_WRITE(statbuf, struct_kernel_stat64_sz);
+ }
+}
+
+PRE_SYSCALL(readlinkat)(long dfd, const void *path, void *buf, long bufsiz) {
+ if (path)
+ PRE_READ(path, __sanitizer::internal_strlen((const char *)path) + 1);
+}
+
+POST_SYSCALL(readlinkat)(long res, long dfd, const void *path, void *buf,
+ long bufsiz) {
+ if (res >= 0) {
+ if (buf)
+ POST_WRITE(buf, __sanitizer::internal_strlen((const char *)buf) + 1);
+ }
+}
+
+PRE_SYSCALL(utimensat)(long dfd, const void *filename, void *utimes,
+ long flags) {
+ if (filename)
+ PRE_READ(filename,
+ __sanitizer::internal_strlen((const char *)filename) + 1);
+}
+
+POST_SYSCALL(utimensat)(long res, long dfd, const void *filename, void *utimes,
+ long flags) {
+ if (res >= 0) {
+ if (utimes) POST_WRITE(utimes, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(unshare)(long unshare_flags) {}
+
+POST_SYSCALL(unshare)(long res, long unshare_flags) {}
+
+PRE_SYSCALL(splice)(long fd_in, void *off_in, long fd_out, void *off_out,
+ long len, long flags) {}
+
+POST_SYSCALL(splice)(long res, long fd_in, void *off_in, long fd_out,
+ void *off_out, long len, long flags) {
+ if (res >= 0) {
+ if (off_in) POST_WRITE(off_in, sizeof(long long));
+ if (off_out) POST_WRITE(off_out, sizeof(long long));
+ }
+}
+
+PRE_SYSCALL(vmsplice)(long fd, const __sanitizer_iovec *iov, long nr_segs,
+ long flags) {}
+
+POST_SYSCALL(vmsplice)(long res, long fd, const __sanitizer_iovec *iov,
+ long nr_segs, long flags) {
+ if (res >= 0) {
+ if (iov) kernel_read_iovec(iov, nr_segs, res);
+ }
+}
+
+PRE_SYSCALL(tee)(long fdin, long fdout, long len, long flags) {}
+
+POST_SYSCALL(tee)(long res, long fdin, long fdout, long len, long flags) {}
+
+PRE_SYSCALL(get_robust_list)(long pid, void *head_ptr, void *len_ptr) {}
+
+POST_SYSCALL(get_robust_list)(long res, long pid, void *head_ptr,
+ void *len_ptr) {}
+
+PRE_SYSCALL(set_robust_list)(void *head, long len) {}
+
+POST_SYSCALL(set_robust_list)(long res, void *head, long len) {}
+
+PRE_SYSCALL(getcpu)(void *cpu, void *node, void *cache) {}
+
+POST_SYSCALL(getcpu)(long res, void *cpu, void *node, void *cache) {
+ if (res >= 0) {
+ if (cpu) POST_WRITE(cpu, sizeof(unsigned));
+ if (node) POST_WRITE(node, sizeof(unsigned));
+ // The third argument to this system call is nowadays unused.
+ }
+}
+
+PRE_SYSCALL(signalfd)(long ufd, void *user_mask, long sizemask) {}
+
+POST_SYSCALL(signalfd)(long res, long ufd, kernel_sigset_t *user_mask,
+ long sizemask) {
+ if (res >= 0) {
+ if (user_mask) POST_WRITE(user_mask, sizemask);
+ }
+}
+
+PRE_SYSCALL(signalfd4)(long ufd, void *user_mask, long sizemask, long flags) {}
+
+POST_SYSCALL(signalfd4)(long res, long ufd, kernel_sigset_t *user_mask,
+ long sizemask, long flags) {
+ if (res >= 0) {
+ if (user_mask) POST_WRITE(user_mask, sizemask);
+ }
+}
+
+PRE_SYSCALL(timerfd_create)(long clockid, long flags) {}
+
+POST_SYSCALL(timerfd_create)(long res, long clockid, long flags) {}
+
+PRE_SYSCALL(timerfd_settime)(long ufd, long flags, const void *utmr,
+ void *otmr) {
+ if (utmr) PRE_READ(utmr, struct_itimerspec_sz);
+}
+
+POST_SYSCALL(timerfd_settime)(long res, long ufd, long flags, const void *utmr,
+ void *otmr) {
+ if (res >= 0) {
+ if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+ }
+}
+
+PRE_SYSCALL(timerfd_gettime)(long ufd, void *otmr) {}
+
+POST_SYSCALL(timerfd_gettime)(long res, long ufd, void *otmr) {
+ if (res >= 0) {
+ if (otmr) POST_WRITE(otmr, struct_itimerspec_sz);
+ }
+}
+
+PRE_SYSCALL(eventfd)(long count) {}
+
+POST_SYSCALL(eventfd)(long res, long count) {}
+
+PRE_SYSCALL(eventfd2)(long count, long flags) {}
+
+POST_SYSCALL(eventfd2)(long res, long count, long flags) {}
+
+PRE_SYSCALL(old_readdir)(long arg0, void *arg1, long arg2) {}
+
+POST_SYSCALL(old_readdir)(long res, long arg0, void *arg1, long arg2) {
+ // Missing definition of 'struct old_linux_dirent'.
+}
+
+PRE_SYSCALL(pselect6)(long arg0, __sanitizer___kernel_fd_set *arg1,
+ __sanitizer___kernel_fd_set *arg2,
+ __sanitizer___kernel_fd_set *arg3, void *arg4,
+ void *arg5) {}
+
+POST_SYSCALL(pselect6)(long res, long arg0, __sanitizer___kernel_fd_set *arg1,
+ __sanitizer___kernel_fd_set *arg2,
+ __sanitizer___kernel_fd_set *arg3, void *arg4,
+ void *arg5) {
+ if (res >= 0) {
+ if (arg1) POST_WRITE(arg1, sizeof(*arg1));
+ if (arg2) POST_WRITE(arg2, sizeof(*arg2));
+ if (arg3) POST_WRITE(arg3, sizeof(*arg3));
+ if (arg4) POST_WRITE(arg4, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(ppoll)(__sanitizer_pollfd *arg0, long arg1, void *arg2,
+ const kernel_sigset_t *arg3, long arg4) {
+ if (arg3) PRE_READ(arg3, arg4);
+}
+
+POST_SYSCALL(ppoll)(long res, __sanitizer_pollfd *arg0, long arg1, void *arg2,
+ const void *arg3, long arg4) {
+ if (res >= 0) {
+ if (arg0) POST_WRITE(arg0, sizeof(*arg0));
+ if (arg2) POST_WRITE(arg2, struct_timespec_sz);
+ }
+}
+
+PRE_SYSCALL(syncfs)(long fd) {}
+
+POST_SYSCALL(syncfs)(long res, long fd) {}
+
+PRE_SYSCALL(perf_event_open)(__sanitizer_perf_event_attr *attr_uptr, long pid,
+ long cpu, long group_fd, long flags) {
+ if (attr_uptr) PRE_READ(attr_uptr, attr_uptr->size);
+}
+
+POST_SYSCALL(perf_event_open)(long res, __sanitizer_perf_event_attr *attr_uptr,
+ long pid, long cpu, long group_fd, long flags) {}
+
+PRE_SYSCALL(mmap_pgoff)(long addr, long len, long prot, long flags, long fd,
+ long pgoff) {}
+
+POST_SYSCALL(mmap_pgoff)(long res, long addr, long len, long prot, long flags,
+ long fd, long pgoff) {}
+
+PRE_SYSCALL(old_mmap)(void *arg) {}
+
+POST_SYSCALL(old_mmap)(long res, void *arg) {}
+
+PRE_SYSCALL(name_to_handle_at)(long dfd, const void *name, void *handle,
+ void *mnt_id, long flag) {}
+
+POST_SYSCALL(name_to_handle_at)(long res, long dfd, const void *name,
+ void *handle, void *mnt_id, long flag) {}
+
+PRE_SYSCALL(open_by_handle_at)(long mountdirfd, void *handle, long flags) {}
+
+POST_SYSCALL(open_by_handle_at)(long res, long mountdirfd, void *handle,
+ long flags) {}
+
+PRE_SYSCALL(setns)(long fd, long nstype) {}
+
+POST_SYSCALL(setns)(long res, long fd, long nstype) {}
+
+PRE_SYSCALL(process_vm_readv)(long pid, const __sanitizer_iovec *lvec,
+ long liovcnt, const void *rvec, long riovcnt,
+ long flags) {}
+
+POST_SYSCALL(process_vm_readv)(long res, long pid,
+ const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
+ if (res >= 0) {
+ if (lvec) kernel_write_iovec(lvec, liovcnt, res);
+ }
+}
+
+PRE_SYSCALL(process_vm_writev)(long pid, const __sanitizer_iovec *lvec,
+ long liovcnt, const void *rvec, long riovcnt,
+ long flags) {}
+
+POST_SYSCALL(process_vm_writev)(long res, long pid,
+ const __sanitizer_iovec *lvec, long liovcnt,
+ const void *rvec, long riovcnt, long flags) {
+ if (res >= 0) {
+ if (lvec) kernel_read_iovec(lvec, liovcnt, res);
+ }
+}
+
+PRE_SYSCALL(fork)() {
+ COMMON_SYSCALL_PRE_FORK();
+}
+
+POST_SYSCALL(fork)(long res) {
+ COMMON_SYSCALL_POST_FORK(res);
+}
+
+PRE_SYSCALL(vfork)() {
+ COMMON_SYSCALL_PRE_FORK();
+}
+
+POST_SYSCALL(vfork)(long res) {
+ COMMON_SYSCALL_POST_FORK(res);
+}
+
+PRE_SYSCALL(sigaction)(long signum, const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
+ if (act) {
+ PRE_READ(&act->sigaction, sizeof(act->sigaction));
+ PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
+ PRE_READ(&act->sa_mask, sizeof(act->sa_mask));
+ }
+}
+
+POST_SYSCALL(sigaction)(long res, long signum,
+ const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact) {
+ if (res >= 0 && oldact) POST_WRITE(oldact, sizeof(*oldact));
+}
+
+PRE_SYSCALL(rt_sigaction)(long signum,
+ const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+ if (act) {
+ PRE_READ(&act->sigaction, sizeof(act->sigaction));
+ PRE_READ(&act->sa_flags, sizeof(act->sa_flags));
+ PRE_READ(&act->sa_mask, sz);
+ }
+}
+
+POST_SYSCALL(rt_sigaction)(long res, long signum,
+ const __sanitizer_kernel_sigaction_t *act,
+ __sanitizer_kernel_sigaction_t *oldact, SIZE_T sz) {
+ if (res >= 0 && oldact) {
+ SIZE_T oldact_sz = ((char *)&oldact->sa_mask) - ((char *)oldact) + sz;
+ POST_WRITE(oldact, oldact_sz);
+ }
+}
+} // extern "C"
+
+#undef PRE_SYSCALL
+#undef PRE_READ
+#undef PRE_WRITE
+#undef POST_SYSCALL
+#undef POST_READ
+#undef POST_WRITE
+
+#endif // SANITIZER_LINUX
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc (revision 351984)
@@ -0,0 +1,240 @@
+//===-- sanitizer_coverage_fuchsia.cc -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer Coverage Controller for Trace PC Guard, Fuchsia-specific version.
+//
+// This Fuchsia-specific implementation uses the same basic scheme and the
+// same simple '.sancov' file format as the generic implementation. The
+// difference is that we just produce a single blob of output for the whole
+// program, not a separate one per DSO. We do not sort the PC table and do
+// not prune the zeros, so the resulting file is always as large as it
+// would be to report 100% coverage. Implicit tracing information about
+// the address ranges of DSOs allows offline tools to split the one big
+// blob into separate files that the 'sancov' tool can understand.
+//
+// Unlike the traditional implementation that uses an atexit hook to write
+// out data files at the end, the results on Fuchsia do not go into a file
+// per se. The 'coverage_dir' option is ignored. Instead, they are stored
+// directly into a shared memory object (a Zircon VMO). At exit, that VMO
+// is handed over to a system service that's responsible for getting the
+// data out to somewhere that it can be fed into the sancov tool (where and
+// how is not our problem).
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FUCHSIA
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_symbolizer_fuchsia.h"
+
+#include <zircon/process.h>
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls.h>
+
+using namespace __sanitizer; // NOLINT
+
+namespace __sancov {
+namespace {
+
+// TODO(mcgrathr): Move the constant into a header shared with other impls.
+constexpr u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
+static_assert(SANITIZER_WORDSIZE == 64, "Fuchsia is always LP64");
+
+constexpr const char kSancovSinkName[] = "sancov";
+
+// Collects trace-pc guard coverage.
+// This class relies on zero-initialization.
+class TracePcGuardController final {
+ public:
+ // For each PC location being tracked, there is a u32 reserved in global
+ // data called the "guard". At startup, we assign each guard slot a
+ // unique index into the big results array. Later during runtime, the
+ // first call to TracePcGuard (below) will store the corresponding PC at
+ // that index in the array. (Each later call with the same guard slot is
+ // presumed to be from the same PC.) Then it clears the guard slot back
+ // to zero, which tells the compiler not to bother calling in again. At
+ // the end of the run, we have a big array where each element is either
+ // zero or is a tracked PC location that was hit in the trace.
+
+ // This is called from global constructors. Each translation unit has a
+ // contiguous array of guard slots, and a constructor that calls here
+ // with the bounds of its array. Those constructors are allowed to call
+ // here more than once for the same array. Usually all of these
+ // constructors run in the initial thread, but it's possible that a
+ // dlopen call on a secondary thread will run constructors that get here.
+ void InitTracePcGuard(u32 *start, u32 *end) {
+ if (end > start && *start == 0 && common_flags()->coverage) {
+ // Complete the setup before filling in any guards with indices.
+ // This avoids the possibility of code called from Setup reentering
+ // TracePcGuard.
+ u32 idx = Setup(end - start);
+ for (u32 *p = start; p < end; ++p) {
+ *p = idx++;
+ }
+ }
+ }
+
+ void TracePcGuard(u32 *guard, uptr pc) {
+ atomic_uint32_t *guard_ptr = reinterpret_cast<atomic_uint32_t *>(guard);
+ u32 idx = atomic_exchange(guard_ptr, 0, memory_order_relaxed);
+ if (idx > 0) array_[idx] = pc;
+ }
+
+ void Dump() {
+ BlockingMutexLock locked(&setup_lock_);
+ if (array_) {
+ CHECK_NE(vmo_, ZX_HANDLE_INVALID);
+
+ // Publish the VMO to the system, where it can be collected and
+ // analyzed after this process exits. This always consumes the VMO
+ // handle. Any failure is just logged and not indicated to us.
+ __sanitizer_publish_data(kSancovSinkName, vmo_);
+ vmo_ = ZX_HANDLE_INVALID;
+
+ // This will route to __sanitizer_log_write, which will ensure that
+ // information about shared libraries is written out. This message
+ // uses the `dumpfile` symbolizer markup element to highlight the
+ // dump. See the explanation for this in:
+ // https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
+ Printf("SanitizerCoverage: " FORMAT_DUMPFILE " with up to %u PCs\n",
+ kSancovSinkName, vmo_name_, next_index_ - 1);
+ }
+ }
+
+ private:
+ // We map in the largest possible view into the VMO: one word
+ // for every possible 32-bit index value. This avoids the need
+ // to change the mapping when increasing the size of the VMO.
+ // We can always spare the 32G of address space.
+ static constexpr size_t MappingSize = sizeof(uptr) << 32;
+
+ BlockingMutex setup_lock_ = BlockingMutex(LINKER_INITIALIZED);
+ uptr *array_ = nullptr;
+ u32 next_index_ = 0;
+ zx_handle_t vmo_ = {};
+ char vmo_name_[ZX_MAX_NAME_LEN] = {};
+
+ size_t DataSize() const { return next_index_ * sizeof(uintptr_t); }
+
+ u32 Setup(u32 num_guards) {
+ BlockingMutexLock locked(&setup_lock_);
+ DCHECK(common_flags()->coverage);
+
+ if (next_index_ == 0) {
+ CHECK_EQ(vmo_, ZX_HANDLE_INVALID);
+ CHECK_EQ(array_, nullptr);
+
+ // The first sample goes at [1] to reserve [0] for the magic number.
+ next_index_ = 1 + num_guards;
+
+ zx_status_t status = _zx_vmo_create(DataSize(), ZX_VMO_RESIZABLE, &vmo_);
+ CHECK_EQ(status, ZX_OK);
+
+ // Give the VMO a name including our process KOID so it's easy to spot.
+ internal_snprintf(vmo_name_, sizeof(vmo_name_), "%s.%zu", kSancovSinkName,
+ internal_getpid());
+ _zx_object_set_property(vmo_, ZX_PROP_NAME, vmo_name_,
+ internal_strlen(vmo_name_));
+
+ // Map the largest possible view we might need into the VMO. Later
+ // we might need to increase the VMO's size before we can use larger
+ // indices, but we'll never move the mapping address so we don't have
+ // any multi-thread synchronization issues with that.
+ uintptr_t mapping;
+ status =
+ _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE,
+ 0, vmo_, 0, MappingSize, &mapping);
+ CHECK_EQ(status, ZX_OK);
+
+ // Hereafter other threads are free to start storing into
+ // elements [1, next_index_) of the big array.
+ array_ = reinterpret_cast<uptr *>(mapping);
+
+ // Store the magic number.
+ // Hereafter, the VMO serves as the contents of the '.sancov' file.
+ array_[0] = Magic64;
+
+ return 1;
+ } else {
+ // The VMO is already mapped in, but it's not big enough to use the
+ // new indices. So increase the size to cover the new maximum index.
+
+ CHECK_NE(vmo_, ZX_HANDLE_INVALID);
+ CHECK_NE(array_, nullptr);
+
+ uint32_t first_index = next_index_;
+ next_index_ += num_guards;
+
+ zx_status_t status = _zx_vmo_set_size(vmo_, DataSize());
+ CHECK_EQ(status, ZX_OK);
+
+ return first_index;
+ }
+ }
+};
+
+static TracePcGuardController pc_guard_controller;
+
+} // namespace
+} // namespace __sancov
+
+namespace __sanitizer {
+void InitializeCoverage(bool enabled, const char *dir) {
+ CHECK_EQ(enabled, common_flags()->coverage);
+ CHECK_EQ(dir, common_flags()->coverage_dir);
+
+ static bool coverage_enabled = false;
+ if (!coverage_enabled) {
+ coverage_enabled = enabled;
+ Atexit(__sanitizer_cov_dump);
+ AddDieCallback(__sanitizer_cov_dump);
+ }
+}
+} // namespace __sanitizer
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
+ const uptr *pcs, uptr len) {
+ UNIMPLEMENTED();
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *guard) {
+ if (!*guard) return;
+ __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
+ u32 *start, u32 *end) {
+ if (start == end || *start) return;
+ __sancov::pc_guard_controller.InitTracePcGuard(start, end);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
+ __sancov::pc_guard_controller.Dump();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
+ __sanitizer_dump_trace_pc_guard_coverage();
+}
+// Default empty implementations (weak). Users should redefine them.
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
+} // extern "C"
+
+#endif // !SANITIZER_FUCHSIA
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_interface.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_interface.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_interface.inc (revision 351984)
@@ -0,0 +1,32 @@
+//===-- sanitizer_coverage_interface.inc ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Coverage interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__sanitizer_cov_dump)
+INTERFACE_FUNCTION(__sanitizer_cov_reset)
+INTERFACE_FUNCTION(__sanitizer_dump_coverage)
+INTERFACE_FUNCTION(__sanitizer_dump_trace_pc_guard_coverage)
+INTERFACE_WEAK_FUNCTION(__sancov_default_options)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_cmp8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp1)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp2)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_const_cmp8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div4)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_div8)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_gep)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_guard_init)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_pc_indir)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_trace_switch)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_8bit_counters_init)
+INTERFACE_WEAK_FUNCTION(__sanitizer_cov_pcs_init)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_interface.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc (revision 351984)
@@ -0,0 +1,218 @@
+//===-- sanitizer_coverage_libcdep_new.cc ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Sanitizer Coverage Controller for Trace PC Guard.
+
+#include "sanitizer_platform.h"
+
+#if !SANITIZER_FUCHSIA
+#include "sancov_flags.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+
+using namespace __sanitizer;
+
+using AddressRange = LoadedModule::AddressRange;
+
+namespace __sancov {
+namespace {
+
+static const u64 Magic64 = 0xC0BFFFFFFFFFFF64ULL;
+static const u64 Magic32 = 0xC0BFFFFFFFFFFF32ULL;
+static const u64 Magic = SANITIZER_WORDSIZE == 64 ? Magic64 : Magic32;
+
+static fd_t OpenFile(const char* path) {
+ error_t err;
+ fd_t fd = OpenFile(path, WrOnly, &err);
+ if (fd == kInvalidFd)
+ Report("SanitizerCoverage: failed to open %s for writing (reason: %d)\n",
+ path, err);
+ return fd;
+}
+
+static void GetCoverageFilename(char* path, const char* name,
+ const char* extension) {
+ CHECK(name);
+ internal_snprintf(path, kMaxPathLength, "%s/%s.%zd.%s",
+ common_flags()->coverage_dir, name, internal_getpid(),
+ extension);
+}
+
+static void WriteModuleCoverage(char* file_path, const char* module_name,
+ const uptr* pcs, uptr len) {
+ GetCoverageFilename(file_path, StripModuleName(module_name), "sancov");
+ fd_t fd = OpenFile(file_path);
+ WriteToFile(fd, &Magic, sizeof(Magic));
+ WriteToFile(fd, pcs, len * sizeof(*pcs));
+ CloseFile(fd);
+ Printf("SanitizerCoverage: %s: %zd PCs written\n", file_path, len);
+}
+
+static void SanitizerDumpCoverage(const uptr* unsorted_pcs, uptr len) {
+ if (!len) return;
+
+ char* file_path = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ char* module_name = static_cast<char*>(InternalAlloc(kMaxPathLength));
+ uptr* pcs = static_cast<uptr*>(InternalAlloc(len * sizeof(uptr)));
+
+ internal_memcpy(pcs, unsorted_pcs, len * sizeof(uptr));
+ Sort(pcs, len);
+
+ bool module_found = false;
+ uptr last_base = 0;
+ uptr module_start_idx = 0;
+
+ for (uptr i = 0; i < len; ++i) {
+ const uptr pc = pcs[i];
+ if (!pc) continue;
+
+ if (!__sanitizer_get_module_and_offset_for_pc(pc, nullptr, 0, &pcs[i])) {
+ Printf("ERROR: unknown pc 0x%x (may happen if dlclose is used)\n", pc);
+ continue;
+ }
+ uptr module_base = pc - pcs[i];
+
+ if (module_base != last_base || !module_found) {
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ i - module_start_idx);
+ }
+
+ last_base = module_base;
+ module_start_idx = i;
+ module_found = true;
+ __sanitizer_get_module_and_offset_for_pc(pc, module_name, kMaxPathLength,
+ &pcs[i]);
+ }
+ }
+
+ if (module_found) {
+ WriteModuleCoverage(file_path, module_name, &pcs[module_start_idx],
+ len - module_start_idx);
+ }
+
+ InternalFree(file_path);
+ InternalFree(module_name);
+ InternalFree(pcs);
+}
+
+// Collects trace-pc guard coverage.
+// This class relies on zero-initialization.
+class TracePcGuardController {
+ public:
+ void Initialize() {
+ CHECK(!initialized);
+
+ initialized = true;
+ InitializeSancovFlags();
+
+ pc_vector.Initialize(0);
+ }
+
+ void InitTracePcGuard(u32* start, u32* end) {
+ if (!initialized) Initialize();
+ CHECK(!*start);
+ CHECK_NE(start, end);
+
+ u32 i = pc_vector.size();
+ for (u32* p = start; p < end; p++) *p = ++i;
+ pc_vector.resize(i);
+ }
+
+ void TracePcGuard(u32* guard, uptr pc) {
+ u32 idx = *guard;
+ if (!idx) return;
+ // we start indices from 1.
+ atomic_uintptr_t* pc_ptr =
+ reinterpret_cast<atomic_uintptr_t*>(&pc_vector[idx - 1]);
+ if (atomic_load(pc_ptr, memory_order_relaxed) == 0)
+ atomic_store(pc_ptr, pc, memory_order_relaxed);
+ }
+
+ void Reset() {
+ internal_memset(&pc_vector[0], 0, sizeof(pc_vector[0]) * pc_vector.size());
+ }
+
+ void Dump() {
+ if (!initialized || !common_flags()->coverage) return;
+ __sanitizer_dump_coverage(pc_vector.data(), pc_vector.size());
+ }
+
+ private:
+ bool initialized;
+ InternalMmapVectorNoCtor<uptr> pc_vector;
+};
+
+static TracePcGuardController pc_guard_controller;
+
+} // namespace
+} // namespace __sancov
+
+namespace __sanitizer {
+void InitializeCoverage(bool enabled, const char *dir) {
+ static bool coverage_enabled = false;
+ if (coverage_enabled)
+ return; // May happen if two sanitizer enable coverage in the same process.
+ coverage_enabled = enabled;
+ Atexit(__sanitizer_cov_dump);
+ AddDieCallback(__sanitizer_cov_dump);
+}
+} // namespace __sanitizer
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage( // NOLINT
+ const uptr* pcs, uptr len) {
+ return __sancov::SanitizerDumpCoverage(pcs, len);
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32* guard) {
+ if (!*guard) return;
+ __sancov::pc_guard_controller.TracePcGuard(guard, GET_CALLER_PC() - 1);
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init,
+ u32* start, u32* end) {
+ if (start == end || *start) return;
+ __sancov::pc_guard_controller.InitTracePcGuard(start, end);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage() {
+ __sancov::pc_guard_controller.Dump();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump() {
+ __sanitizer_dump_trace_pc_guard_coverage();
+}
+SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_reset() {
+ __sancov::pc_guard_controller.Reset();
+}
+// Default empty implementations (weak). Users should redefine them.
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_const_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_switch, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_div8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_gep, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_8bit_counters_init, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {}
+} // extern "C"
+// Weak definition for code instrumented with -fsanitize-coverage=stack-depth
+// and later linked with code containing a strong definition.
+// E.g., -fsanitize=fuzzer-no-link
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE uptr __sancov_lowest_stack;
+
+#endif // !SANITIZER_FUCHSIA
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cc (revision 351984)
@@ -0,0 +1,20 @@
+//===-- sanitizer_coverage_win_dll_thunk.cc -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_win_dll_thunk.h"
+// Sanitizer Coverage interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DLL_THUNK
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_dll_thunk.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cc (revision 351984)
@@ -0,0 +1,26 @@
+//===-- sanitizer_coverage_win_dynamic_runtime_thunk.cc -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Sanitizer Coverage, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from sanitizer coverage.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
+
+namespace __sanitizer {
+// Add one, otherwise unused, external symbol to this object file so that the
+// Visual C++ linker includes it and reads the .drective section.
+void ForceWholeArchiveIncludeForSanCov() {}
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_dynamic_runtime_thunk.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_sections.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_sections.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_sections.cc (revision 351984)
@@ -0,0 +1,67 @@
+//===-- sanitizer_coverage_win_sections.cc --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines delimiters for Sanitizer Coverage's section. It contains
+// Windows specific tricks to coax the linker into giving us the start and stop
+// addresses of a section, as ELF linkers can do, to get the size of certain
+// arrays. According to https://msdn.microsoft.com/en-us/library/7977wcck.aspx
+// sections with the same name before "$" are sorted alphabetically by the
+// string that comes after "$" and merged into one section. We take advantage
+// of this by putting data we want the size of into the middle (M) of a section,
+// by using the letter "M" after "$". We get the start of this data (ie:
+// __start_section_name) by making the start variable come at the start of the
+// section (using the letter A after "$"). We do the same to get the end of the
+// data by using the letter "Z" after "$" to make the end variable come after
+// the data. Note that because of our technique the address of the start
+// variable is actually the address of data that comes before our middle
+// section. We also need to prevent the linker from adding any padding. Each
+// technique we use for this is explained in the comments below.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+#include <stdint.h>
+
+extern "C" {
+// Use uint64_t so the linker won't need to add any padding if it tries to word
+// align the start of the 8-bit counters array. The array will always start 8
+// bytes after __start_sancov_cntrs.
+#pragma section(".SCOV$CA", read, write) // NOLINT
+__declspec(allocate(".SCOV$CA")) uint64_t __start___sancov_cntrs = 0;
+
+// Even though we said not to align __stop__sancov_cntrs (using the "align"
+// declspec), MSVC's linker may try to align the section, .SCOV$CZ, containing
+// it. This can cause a mismatch between the number of PCs and counters since
+// each PCTable element is 8 bytes (unlike counters which are 1 byte) so no
+// padding would be added to align .SCOVP$Z, However, if .SCOV$CZ section is 1
+// byte, the linker won't try to align it on an 8-byte boundary, so use a
+// uint8_t for __stop_sancov_cntrs.
+#pragma section(".SCOV$CZ", read, write) // NOLINT
+__declspec(allocate(".SCOV$CZ")) __declspec(align(1)) uint8_t
+ __stop___sancov_cntrs = 0;
+
+#pragma section(".SCOV$GA", read, write) // NOLINT
+__declspec(allocate(".SCOV$GA")) uint64_t __start___sancov_guards = 0;
+#pragma section(".SCOV$GZ", read, write) // NOLINT
+__declspec(allocate(".SCOV$GZ")) __declspec(align(1)) uint8_t
+ __stop___sancov_guards = 0;
+
+// The guard array and counter array should both be merged into the .data
+// section to reduce the number of PE sections. However, because PCTable is
+// constant it should be merged with the .rdata section.
+#pragma comment(linker, "/MERGE:.SCOV=.data")
+
+#pragma section(".SCOVP$A", read) // NOLINT
+__declspec(allocate(".SCOVP$A")) uint64_t __start___sancov_pcs = 0;
+#pragma section(".SCOVP$Z", read) // NOLINT
+__declspec(allocate(".SCOVP$Z")) __declspec(align(1)) uint8_t
+ __stop___sancov_pcs = 0;
+
+#pragma comment(linker, "/MERGE:.SCOVP=.rdata")
+}
+#endif // SANITIZER_WINDOWS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_sections.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cc (revision 351984)
@@ -0,0 +1,23 @@
+//===-- sanitizer_coverage_win_weak_interception.cc -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Sanitizer Coverage when it implemented as a
+// shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_win_weak_interception.h"
+#include "sanitizer_interface_internal.h"
+#include "sancov_flags.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_coverage_interface.inc"
+#endif // SANITIZER_DYNAMIC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_coverage_win_weak_interception.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_dbghelp.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_dbghelp.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_dbghelp.h (revision 351984)
@@ -0,0 +1,41 @@
+//===-- sanitizer_dbghelp.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Wrappers for lazy loaded dbghelp.dll. Provides function pointers and a
+// callback to initialize them.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SYMBOLIZER_WIN_H
+#define SANITIZER_SYMBOLIZER_WIN_H
+
+#if !SANITIZER_WINDOWS
+#error "sanitizer_dbghelp.h is a Windows-only header"
+#endif
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <dbghelp.h>
+
+namespace __sanitizer {
+
+extern decltype(::StackWalk64) *StackWalk64;
+extern decltype(::SymCleanup) *SymCleanup;
+extern decltype(::SymFromAddr) *SymFromAddr;
+extern decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+extern decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+extern decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+extern decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+extern decltype(::SymInitialize) *SymInitialize;
+extern decltype(::SymSetOptions) *SymSetOptions;
+extern decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+extern decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_WIN_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_dbghelp.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector.h (revision 351984)
@@ -0,0 +1,410 @@
+//===-- sanitizer_deadlock_detector.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// The deadlock detector maintains a directed graph of lock acquisitions.
+// When a lock event happens, the detector checks if the locks already held by
+// the current thread are reachable from the newly acquired lock.
+//
+// The detector can handle only a fixed amount of simultaneously live locks
+// (a lock is alive if it has been locked at least once and has not been
+// destroyed). When the maximal number of locks is reached the entire graph
+// is flushed and the new lock epoch is started. The node ids from the old
+// epochs can not be used with any of the detector methods except for
+// nodeBelongsToCurrentEpoch().
+//
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_H
+#define SANITIZER_DEADLOCK_DETECTOR_H
+
+#include "sanitizer_bvgraph.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+// Thread-local state for DeadlockDetector.
+// It contains the locks currently held by the owning thread.
+template <class BV>
+class DeadlockDetectorTLS {
+ public:
+ // No CTOR.
+ void clear() {
+ bv_.clear();
+ epoch_ = 0;
+ n_recursive_locks = 0;
+ n_all_locks_ = 0;
+ }
+
+ bool empty() const { return bv_.empty(); }
+
+ void ensureCurrentEpoch(uptr current_epoch) {
+ if (epoch_ == current_epoch) return;
+ bv_.clear();
+ epoch_ = current_epoch;
+ n_recursive_locks = 0;
+ n_all_locks_ = 0;
+ }
+
+ uptr getEpoch() const { return epoch_; }
+
+ // Returns true if this is the first (non-recursive) acquisition of this lock.
+ bool addLock(uptr lock_id, uptr current_epoch, u32 stk) {
+ CHECK_EQ(epoch_, current_epoch);
+ if (!bv_.setBit(lock_id)) {
+ // The lock is already held by this thread, it must be recursive.
+ CHECK_LT(n_recursive_locks, ARRAY_SIZE(recursive_locks));
+ recursive_locks[n_recursive_locks++] = lock_id;
+ return false;
+ }
+ CHECK_LT(n_all_locks_, ARRAY_SIZE(all_locks_with_contexts_));
+ // lock_id < BV::kSize, can cast to a smaller int.
+ u32 lock_id_short = static_cast<u32>(lock_id);
+ LockWithContext l = {lock_id_short, stk};
+ all_locks_with_contexts_[n_all_locks_++] = l;
+ return true;
+ }
+
+ void removeLock(uptr lock_id) {
+ if (n_recursive_locks) {
+ for (sptr i = n_recursive_locks - 1; i >= 0; i--) {
+ if (recursive_locks[i] == lock_id) {
+ n_recursive_locks--;
+ Swap(recursive_locks[i], recursive_locks[n_recursive_locks]);
+ return;
+ }
+ }
+ }
+ if (!bv_.clearBit(lock_id))
+ return; // probably addLock happened before flush
+ if (n_all_locks_) {
+ for (sptr i = n_all_locks_ - 1; i >= 0; i--) {
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id)) {
+ Swap(all_locks_with_contexts_[i],
+ all_locks_with_contexts_[n_all_locks_ - 1]);
+ n_all_locks_--;
+ break;
+ }
+ }
+ }
+ }
+
+ u32 findLockContext(uptr lock_id) {
+ for (uptr i = 0; i < n_all_locks_; i++)
+ if (all_locks_with_contexts_[i].lock == static_cast<u32>(lock_id))
+ return all_locks_with_contexts_[i].stk;
+ return 0;
+ }
+
+ const BV &getLocks(uptr current_epoch) const {
+ CHECK_EQ(epoch_, current_epoch);
+ return bv_;
+ }
+
+ uptr getNumLocks() const { return n_all_locks_; }
+ uptr getLock(uptr idx) const { return all_locks_with_contexts_[idx].lock; }
+
+ private:
+ BV bv_;
+ uptr epoch_;
+ uptr recursive_locks[64];
+ uptr n_recursive_locks;
+ struct LockWithContext {
+ u32 lock;
+ u32 stk;
+ };
+ LockWithContext all_locks_with_contexts_[64];
+ uptr n_all_locks_;
+};
+
+// DeadlockDetector.
+// For deadlock detection to work we need one global DeadlockDetector object
+// and one DeadlockDetectorTLS object per evey thread.
+// This class is not thread safe, all concurrent accesses should be guarded
+// by an external lock.
+// Most of the methods of this class are not thread-safe (i.e. should
+// be protected by an external lock) unless explicitly told otherwise.
+template <class BV>
+class DeadlockDetector {
+ public:
+ typedef BV BitVector;
+
+ uptr size() const { return g_.size(); }
+
+ // No CTOR.
+ void clear() {
+ current_epoch_ = 0;
+ available_nodes_.clear();
+ recycled_nodes_.clear();
+ g_.clear();
+ n_edges_ = 0;
+ }
+
+ // Allocate new deadlock detector node.
+ // If we are out of available nodes first try to recycle some.
+ // If there is nothing to recycle, flush the graph and increment the epoch.
+ // Associate 'data' (opaque user's object) with the new node.
+ uptr newNode(uptr data) {
+ if (!available_nodes_.empty())
+ return getAvailableNode(data);
+ if (!recycled_nodes_.empty()) {
+ for (sptr i = n_edges_ - 1; i >= 0; i--) {
+ if (recycled_nodes_.getBit(edges_[i].from) ||
+ recycled_nodes_.getBit(edges_[i].to)) {
+ Swap(edges_[i], edges_[n_edges_ - 1]);
+ n_edges_--;
+ }
+ }
+ CHECK(available_nodes_.empty());
+ // removeEdgesFrom was called in removeNode.
+ g_.removeEdgesTo(recycled_nodes_);
+ available_nodes_.setUnion(recycled_nodes_);
+ recycled_nodes_.clear();
+ return getAvailableNode(data);
+ }
+ // We are out of vacant nodes. Flush and increment the current_epoch_.
+ current_epoch_ += size();
+ recycled_nodes_.clear();
+ available_nodes_.setAll();
+ g_.clear();
+ n_edges_ = 0;
+ return getAvailableNode(data);
+ }
+
+ // Get data associated with the node created by newNode().
+ uptr getData(uptr node) const { return data_[nodeToIndex(node)]; }
+
+ bool nodeBelongsToCurrentEpoch(uptr node) {
+ return node && (node / size() * size()) == current_epoch_;
+ }
+
+ void removeNode(uptr node) {
+ uptr idx = nodeToIndex(node);
+ CHECK(!available_nodes_.getBit(idx));
+ CHECK(recycled_nodes_.setBit(idx));
+ g_.removeEdgesFrom(idx);
+ }
+
+ void ensureCurrentEpoch(DeadlockDetectorTLS<BV> *dtls) {
+ dtls->ensureCurrentEpoch(current_epoch_);
+ }
+
+ // Returns true if there is a cycle in the graph after this lock event.
+ // Ideally should be called before the lock is acquired so that we can
+ // report a deadlock before a real deadlock happens.
+ bool onLockBefore(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ return g_.isReachable(cur_idx, dtls->getLocks(current_epoch_));
+ }
+
+ u32 findLockContext(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ return dtls->findLockContext(nodeToIndex(node));
+ }
+
+ // Add cur_node to the set of locks held currently by dtls.
+ void onLockAfter(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ }
+
+ // Experimental *racy* fast path function.
+ // Returns true if all edges from the currently held locks to cur_node exist.
+ bool hasAllEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node) {
+ uptr local_epoch = dtls->getEpoch();
+ // Read from current_epoch_ is racy.
+ if (cur_node && local_epoch == current_epoch_ &&
+ local_epoch == nodeToEpoch(cur_node)) {
+ uptr cur_idx = nodeToIndexUnchecked(cur_node);
+ for (uptr i = 0, n = dtls->getNumLocks(); i < n; i++) {
+ if (!g_.hasEdge(dtls->getLock(i), cur_idx))
+ return false;
+ }
+ return true;
+ }
+ return false;
+ }
+
+ // Adds edges from currently held locks to cur_node,
+ // returns the number of added edges, and puts the sources of added edges
+ // into added_edges[].
+ // Should be called before onLockAfter.
+ uptr addEdges(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk,
+ int unique_tid) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ uptr added_edges[40];
+ uptr n_added_edges = g_.addEdges(dtls->getLocks(current_epoch_), cur_idx,
+ added_edges, ARRAY_SIZE(added_edges));
+ for (uptr i = 0; i < n_added_edges; i++) {
+ if (n_edges_ < ARRAY_SIZE(edges_)) {
+ Edge e = {(u16)added_edges[i], (u16)cur_idx,
+ dtls->findLockContext(added_edges[i]), stk,
+ unique_tid};
+ edges_[n_edges_++] = e;
+ }
+ }
+ return n_added_edges;
+ }
+
+ bool findEdge(uptr from_node, uptr to_node, u32 *stk_from, u32 *stk_to,
+ int *unique_tid) {
+ uptr from_idx = nodeToIndex(from_node);
+ uptr to_idx = nodeToIndex(to_node);
+ for (uptr i = 0; i < n_edges_; i++) {
+ if (edges_[i].from == from_idx && edges_[i].to == to_idx) {
+ *stk_from = edges_[i].stk_from;
+ *stk_to = edges_[i].stk_to;
+ *unique_tid = edges_[i].unique_tid;
+ return true;
+ }
+ }
+ return false;
+ }
+
+ // Test-only function. Handles the before/after lock events,
+ // returns true if there is a cycle.
+ bool onLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ bool is_reachable = !isHeld(dtls, cur_node) && onLockBefore(dtls, cur_node);
+ addEdges(dtls, cur_node, stk, 0);
+ onLockAfter(dtls, cur_node, stk);
+ return is_reachable;
+ }
+
+ // Handles the try_lock event, returns false.
+ // When a try_lock event happens (i.e. a try_lock call succeeds) we need
+ // to add this lock to the currently held locks, but we should not try to
+ // change the lock graph or to detect a cycle. We may want to investigate
+ // whether a more aggressive strategy is possible for try_lock.
+ bool onTryLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, u32 stk = 0) {
+ ensureCurrentEpoch(dtls);
+ uptr cur_idx = nodeToIndex(cur_node);
+ dtls->addLock(cur_idx, current_epoch_, stk);
+ return false;
+ }
+
+ // Returns true iff dtls is empty (no locks are currently held) and we can
+ // add the node to the currently held locks w/o chanding the global state.
+ // This operation is thread-safe as it only touches the dtls.
+ bool onFirstLock(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (!dtls->empty()) return false;
+ if (dtls->getEpoch() && dtls->getEpoch() == nodeToEpoch(node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ // Finds a path between the lock 'cur_node' (currently not held in dtls)
+ // and some currently held lock, returns the length of the path
+ // or 0 on failure.
+ uptr findPathToLock(DeadlockDetectorTLS<BV> *dtls, uptr cur_node, uptr *path,
+ uptr path_size) {
+ tmp_bv_.copyFrom(dtls->getLocks(current_epoch_));
+ uptr idx = nodeToIndex(cur_node);
+ CHECK(!tmp_bv_.getBit(idx));
+ uptr res = g_.findShortestPath(idx, tmp_bv_, path, path_size);
+ for (uptr i = 0; i < res; i++)
+ path[i] = indexToNode(path[i]);
+ if (res)
+ CHECK_EQ(path[0], cur_node);
+ return res;
+ }
+
+ // Handle the unlock event.
+ // This operation is thread-safe as it only touches the dtls.
+ void onUnlock(DeadlockDetectorTLS<BV> *dtls, uptr node) {
+ if (dtls->getEpoch() == nodeToEpoch(node))
+ dtls->removeLock(nodeToIndexUnchecked(node));
+ }
+
+ // Tries to handle the lock event w/o writing to global state.
+ // Returns true on success.
+ // This operation is thread-safe as it only touches the dtls
+ // (modulo racy nature of hasAllEdges).
+ bool onLockFast(DeadlockDetectorTLS<BV> *dtls, uptr node, u32 stk = 0) {
+ if (hasAllEdges(dtls, node)) {
+ dtls->addLock(nodeToIndexUnchecked(node), nodeToEpoch(node), stk);
+ return true;
+ }
+ return false;
+ }
+
+ bool isHeld(DeadlockDetectorTLS<BV> *dtls, uptr node) const {
+ return dtls->getLocks(current_epoch_).getBit(nodeToIndex(node));
+ }
+
+ uptr testOnlyGetEpoch() const { return current_epoch_; }
+ bool testOnlyHasEdge(uptr l1, uptr l2) {
+ return g_.hasEdge(nodeToIndex(l1), nodeToIndex(l2));
+ }
+ // idx1 and idx2 are raw indices to g_, not lock IDs.
+ bool testOnlyHasEdgeRaw(uptr idx1, uptr idx2) {
+ return g_.hasEdge(idx1, idx2);
+ }
+
+ void Print() {
+ for (uptr from = 0; from < size(); from++)
+ for (uptr to = 0; to < size(); to++)
+ if (g_.hasEdge(from, to))
+ Printf(" %zx => %zx\n", from, to);
+ }
+
+ private:
+ void check_idx(uptr idx) const { CHECK_LT(idx, size()); }
+
+ void check_node(uptr node) const {
+ CHECK_GE(node, size());
+ CHECK_EQ(current_epoch_, nodeToEpoch(node));
+ }
+
+ uptr indexToNode(uptr idx) const {
+ check_idx(idx);
+ return idx + current_epoch_;
+ }
+
+ uptr nodeToIndexUnchecked(uptr node) const { return node % size(); }
+
+ uptr nodeToIndex(uptr node) const {
+ check_node(node);
+ return nodeToIndexUnchecked(node);
+ }
+
+ uptr nodeToEpoch(uptr node) const { return node / size() * size(); }
+
+ uptr getAvailableNode(uptr data) {
+ uptr idx = available_nodes_.getAndClearFirstOne();
+ data_[idx] = data;
+ return indexToNode(idx);
+ }
+
+ struct Edge {
+ u16 from;
+ u16 to;
+ u32 stk_from;
+ u32 stk_to;
+ int unique_tid;
+ };
+
+ uptr current_epoch_;
+ BV available_nodes_;
+ BV recycled_nodes_;
+ BV tmp_bv_;
+ BVGraph<BV> g_;
+ uptr data_[BV::kSize];
+ Edge edges_[BV::kSize * 32];
+ uptr n_edges_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector1.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector1.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector1.cc (revision 351984)
@@ -0,0 +1,194 @@
+//===-- sanitizer_deadlock_detector1.cc -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on NxN adjacency bit matrix.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_deadlock_detector.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+
+namespace __sanitizer {
+
+typedef TwoLevelBitVector<> DDBV; // DeadlockDetector's bit vector.
+
+struct DDPhysicalThread {
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ DeadlockDetectorTLS<DDBV> dd;
+ DDReport rep;
+ bool report_pending;
+};
+
+struct DD : public DDetector {
+ SpinMutex mtx;
+ DeadlockDetector<DDBV> dd;
+ DDFlags flags;
+
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread *CreatePhysicalThread() override;
+ void DestroyPhysicalThread(DDPhysicalThread *pt) override;
+
+ DDLogicalThread *CreateLogicalThread(u64 ctx) override;
+ void DestroyLogicalThread(DDLogicalThread *lt) override;
+
+ void MutexInit(DDCallback *cb, DDMutex *m) override;
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) override;
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) override;
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) override;
+ void MutexDestroy(DDCallback *cb, DDMutex *m) override;
+
+ DDReport *GetReport(DDCallback *cb) override;
+
+ void MutexEnsureID(DDLogicalThread *lt, DDMutex *m);
+ void ReportDeadlock(DDCallback *cb, DDMutex *m);
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags)
+ : flags(*flags) {
+ dd.clear();
+}
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ return nullptr;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(sizeof(*lt));
+ lt->ctx = ctx;
+ lt->dd.clear();
+ lt->report_pending = false;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ m->id = 0;
+ m->stk = cb->Unwind();
+}
+
+void DD::MutexEnsureID(DDLogicalThread *lt, DDMutex *m) {
+ if (!dd.nodeBelongsToCurrentEpoch(m->id))
+ m->id = dd.newNode(reinterpret_cast<uptr>(m));
+ dd.ensureCurrentEpoch(&lt->dd);
+}
+
+void DD::MutexBeforeLock(DDCallback *cb,
+ DDMutex *m, bool wlock) {
+ DDLogicalThread *lt = cb->lt;
+ if (lt->dd.empty()) return; // This will be the first lock held by lt.
+ if (dd.hasAllEdges(&lt->dd, m->id)) return; // We already have all edges.
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (dd.isHeld(&lt->dd, m->id))
+ return; // FIXME: allow this only for recursive locks.
+ if (dd.onLockBefore(&lt->dd, m->id)) {
+ // Actually add this edge now so that we have all the stack traces.
+ dd.addEdges(&lt->dd, m->id, cb->Unwind(), cb->UniqueTid());
+ ReportDeadlock(cb, m);
+ }
+}
+
+void DD::ReportDeadlock(DDCallback *cb, DDMutex *m) {
+ DDLogicalThread *lt = cb->lt;
+ uptr path[20];
+ uptr len = dd.findPathToLock(&lt->dd, m->id, path, ARRAY_SIZE(path));
+ if (len == 0U) {
+ // A cycle of 20+ locks? Well, that's a bit odd...
+ Printf("WARNING: too long mutex cycle found\n");
+ return;
+ }
+ CHECK_EQ(m->id, path[0]);
+ lt->report_pending = true;
+ len = Min<uptr>(len, DDReport::kMaxLoopSize);
+ DDReport *rep = &lt->rep;
+ rep->n = len;
+ for (uptr i = 0; i < len; i++) {
+ uptr from = path[i];
+ uptr to = path[(i + 1) % len];
+ DDMutex *m0 = (DDMutex*)dd.getData(from);
+ DDMutex *m1 = (DDMutex*)dd.getData(to);
+
+ u32 stk_from = -1U, stk_to = -1U;
+ int unique_tid = 0;
+ dd.findEdge(from, to, &stk_from, &stk_to, &unique_tid);
+ // Printf("Edge: %zd=>%zd: %u/%u T%d\n", from, to, stk_from, stk_to,
+ // unique_tid);
+ rep->loop[i].thr_ctx = unique_tid;
+ rep->loop[i].mtx_ctx0 = m0->ctx;
+ rep->loop[i].mtx_ctx1 = m1->ctx;
+ rep->loop[i].stk[0] = stk_to;
+ rep->loop[i].stk[1] = stk_from;
+ }
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock, bool trylock) {
+ DDLogicalThread *lt = cb->lt;
+ u32 stk = 0;
+ if (flags.second_deadlock_stack)
+ stk = cb->Unwind();
+ // Printf("T%p MutexLock: %zx stk %u\n", lt, m->id, stk);
+ if (dd.onFirstLock(&lt->dd, m->id, stk))
+ return;
+ if (dd.onLockFast(&lt->dd, m->id, stk))
+ return;
+
+ SpinMutexLock lk(&mtx);
+ MutexEnsureID(lt, m);
+ if (wlock) // Only a recursive rlock may be held.
+ CHECK(!dd.isHeld(&lt->dd, m->id));
+ if (!trylock)
+ dd.addEdges(&lt->dd, m->id, stk ? stk : cb->Unwind(), cb->UniqueTid());
+ dd.onLockAfter(&lt->dd, m->id, stk);
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ // Printf("T%p MutexUnLock: %zx\n", cb->lt, m->id);
+ dd.onUnlock(&cb->lt->dd, m->id);
+}
+
+void DD::MutexDestroy(DDCallback *cb,
+ DDMutex *m) {
+ if (!m->id) return;
+ SpinMutexLock lk(&mtx);
+ if (dd.nodeBelongsToCurrentEpoch(m->id))
+ dd.removeNode(m->id);
+ m->id = 0;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->lt->report_pending)
+ return nullptr;
+ cb->lt->report_pending = false;
+ return &cb->lt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector1.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector2.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector2.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector2.cc (revision 351984)
@@ -0,0 +1,423 @@
+//===-- sanitizer_deadlock_detector2.cc -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Deadlock detector implementation based on adjacency lists.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_mutex.h"
+
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+
+namespace __sanitizer {
+
+const int kMaxNesting = 64;
+const u32 kNoId = -1;
+const u32 kEndId = -2;
+const int kMaxLink = 8;
+const int kL1Size = 1024;
+const int kL2Size = 1024;
+const int kMaxMutex = kL1Size * kL2Size;
+
+struct Id {
+ u32 id;
+ u32 seq;
+
+ explicit Id(u32 id = 0, u32 seq = 0)
+ : id(id)
+ , seq(seq) {
+ }
+};
+
+struct Link {
+ u32 id;
+ u32 seq;
+ u32 tid;
+ u32 stk0;
+ u32 stk1;
+
+ explicit Link(u32 id = 0, u32 seq = 0, u32 tid = 0, u32 s0 = 0, u32 s1 = 0)
+ : id(id)
+ , seq(seq)
+ , tid(tid)
+ , stk0(s0)
+ , stk1(s1) {
+ }
+};
+
+struct DDPhysicalThread {
+ DDReport rep;
+ bool report_pending;
+ bool visited[kMaxMutex];
+ Link pending[kMaxMutex];
+ Link path[kMaxMutex];
+};
+
+struct ThreadMutex {
+ u32 id;
+ u32 stk;
+};
+
+struct DDLogicalThread {
+ u64 ctx;
+ ThreadMutex locked[kMaxNesting];
+ int nlocked;
+};
+
+struct Mutex {
+ StaticSpinMutex mtx;
+ u32 seq;
+ int nlink;
+ Link link[kMaxLink];
+};
+
+struct DD : public DDetector {
+ explicit DD(const DDFlags *flags);
+
+ DDPhysicalThread* CreatePhysicalThread();
+ void DestroyPhysicalThread(DDPhysicalThread *pt);
+
+ DDLogicalThread* CreateLogicalThread(u64 ctx);
+ void DestroyLogicalThread(DDLogicalThread *lt);
+
+ void MutexInit(DDCallback *cb, DDMutex *m);
+ void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock);
+ void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock);
+ void MutexDestroy(DDCallback *cb, DDMutex *m);
+
+ DDReport *GetReport(DDCallback *cb);
+
+ void CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt, DDMutex *mtx);
+ void Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath);
+ u32 allocateId(DDCallback *cb);
+ Mutex *getMutex(u32 id);
+ u32 getMutexId(Mutex *m);
+
+ DDFlags flags;
+
+ Mutex* mutex[kL1Size];
+
+ SpinMutex mtx;
+ InternalMmapVector<u32> free_id;
+ int id_gen = 0;
+};
+
+DDetector *DDetector::Create(const DDFlags *flags) {
+ (void)flags;
+ void *mem = MmapOrDie(sizeof(DD), "deadlock detector");
+ return new(mem) DD(flags);
+}
+
+DD::DD(const DDFlags *flags) : flags(*flags) { free_id.reserve(1024); }
+
+DDPhysicalThread* DD::CreatePhysicalThread() {
+ DDPhysicalThread *pt = (DDPhysicalThread*)MmapOrDie(sizeof(DDPhysicalThread),
+ "deadlock detector (physical thread)");
+ return pt;
+}
+
+void DD::DestroyPhysicalThread(DDPhysicalThread *pt) {
+ pt->~DDPhysicalThread();
+ UnmapOrDie(pt, sizeof(DDPhysicalThread));
+}
+
+DDLogicalThread* DD::CreateLogicalThread(u64 ctx) {
+ DDLogicalThread *lt = (DDLogicalThread*)InternalAlloc(
+ sizeof(DDLogicalThread));
+ lt->ctx = ctx;
+ lt->nlocked = 0;
+ return lt;
+}
+
+void DD::DestroyLogicalThread(DDLogicalThread *lt) {
+ lt->~DDLogicalThread();
+ InternalFree(lt);
+}
+
+void DD::MutexInit(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexInit(%p)\n", cb->lt->ctx, m);
+ m->id = kNoId;
+ m->recursion = 0;
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+}
+
+Mutex *DD::getMutex(u32 id) {
+ return &mutex[id / kL2Size][id % kL2Size];
+}
+
+u32 DD::getMutexId(Mutex *m) {
+ for (int i = 0; i < kL1Size; i++) {
+ Mutex *tab = mutex[i];
+ if (tab == 0)
+ break;
+ if (m >= tab && m < tab + kL2Size)
+ return i * kL2Size + (m - tab);
+ }
+ return -1;
+}
+
+u32 DD::allocateId(DDCallback *cb) {
+ u32 id = -1;
+ SpinMutexLock l(&mtx);
+ if (free_id.size() > 0) {
+ id = free_id.back();
+ free_id.pop_back();
+ } else {
+ CHECK_LT(id_gen, kMaxMutex);
+ if ((id_gen % kL2Size) == 0) {
+ mutex[id_gen / kL2Size] = (Mutex*)MmapOrDie(kL2Size * sizeof(Mutex),
+ "deadlock detector (mutex table)");
+ }
+ id = id_gen++;
+ }
+ CHECK_LE(id, kMaxMutex);
+ VPrintf(3, "#%llu: DD::allocateId assign id %d\n", cb->lt->ctx, id);
+ return id;
+}
+
+void DD::MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeLock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDPhysicalThread *pt = cb->pt;
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock recursive\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+
+ // FIXME(dvyukov): don't allocate id if lt->nlocked == 0?
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+ if (lt->nlocked == 1) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock first mutex\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ bool added = false;
+ Mutex *mtx = getMutex(m->id);
+ for (int i = 0; i < lt->nlocked - 1; i++) {
+ u32 id1 = lt->locked[i].id;
+ u32 stk1 = lt->locked[i].stk;
+ Mutex *mtx1 = getMutex(id1);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->nlink == kMaxLink) {
+ // FIXME(dvyukov): check stale links
+ continue;
+ }
+ int li = 0;
+ for (; li < mtx1->nlink; li++) {
+ Link *link = &mtx1->link[li];
+ if (link->id == m->id) {
+ if (link->seq != mtx->seq) {
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ break;
+ }
+ }
+ if (li == mtx1->nlink) {
+ // FIXME(dvyukov): check stale links
+ Link *link = &mtx1->link[mtx1->nlink++];
+ link->id = m->id;
+ link->seq = mtx->seq;
+ link->tid = lt->ctx;
+ link->stk0 = stk1;
+ link->stk1 = cb->Unwind();
+ added = true;
+ VPrintf(3, "#%llu: DD::MutexBeforeLock added %d->%d link\n",
+ cb->lt->ctx, getMutexId(mtx1), m->id);
+ }
+ }
+
+ if (!added || mtx->nlink == 0) {
+ VPrintf(3, "#%llu: DD::MutexBeforeLock don't check\n",
+ cb->lt->ctx);
+ return;
+ }
+
+ CycleCheck(pt, lt, m);
+}
+
+void DD::MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {
+ VPrintf(2, "#%llu: DD::MutexAfterLock(%p, wlock=%d, try=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, trylock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock recursive\n", cb->lt->ctx);
+ CHECK(wlock);
+ m->recursion++;
+ return;
+ }
+ CHECK_EQ(owner, 0);
+ if (wlock) {
+ VPrintf(3, "#%llu: DD::MutexAfterLock set owner\n", cb->lt->ctx);
+ CHECK_EQ(m->recursion, 0);
+ m->recursion = 1;
+ atomic_store(&m->owner, (uptr)cb->lt, memory_order_relaxed);
+ }
+
+ if (!trylock)
+ return;
+
+ CHECK_LE(lt->nlocked, kMaxNesting);
+ if (m->id == kNoId)
+ m->id = allocateId(cb);
+ ThreadMutex *tm = &lt->locked[lt->nlocked++];
+ tm->id = m->id;
+ if (flags.second_deadlock_stack)
+ tm->stk = cb->Unwind();
+}
+
+void DD::MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {
+ VPrintf(2, "#%llu: DD::MutexBeforeUnlock(%p, wlock=%d) nlocked=%d\n",
+ cb->lt->ctx, m, wlock, cb->lt->nlocked);
+ DDLogicalThread *lt = cb->lt;
+
+ uptr owner = atomic_load(&m->owner, memory_order_relaxed);
+ if (owner == (uptr)cb->lt) {
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock recursive\n", cb->lt->ctx);
+ if (--m->recursion > 0)
+ return;
+ VPrintf(3, "#%llu: DD::MutexBeforeUnlock reset owner\n", cb->lt->ctx);
+ atomic_store(&m->owner, 0, memory_order_relaxed);
+ }
+ CHECK_NE(m->id, kNoId);
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (cb->lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+}
+
+void DD::MutexDestroy(DDCallback *cb, DDMutex *m) {
+ VPrintf(2, "#%llu: DD::MutexDestroy(%p)\n",
+ cb->lt->ctx, m);
+ DDLogicalThread *lt = cb->lt;
+
+ if (m->id == kNoId)
+ return;
+
+ // Remove the mutex from lt->locked if there.
+ int last = lt->nlocked - 1;
+ for (int i = last; i >= 0; i--) {
+ if (lt->locked[i].id == m->id) {
+ lt->locked[i] = lt->locked[last];
+ lt->nlocked--;
+ break;
+ }
+ }
+
+ // Clear and invalidate the mutex descriptor.
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ mtx->seq++;
+ mtx->nlink = 0;
+ }
+
+ // Return id to cache.
+ {
+ SpinMutexLock l(&mtx);
+ free_id.push_back(m->id);
+ }
+}
+
+void DD::CycleCheck(DDPhysicalThread *pt, DDLogicalThread *lt,
+ DDMutex *m) {
+ internal_memset(pt->visited, 0, sizeof(pt->visited));
+ int npath = 0;
+ int npending = 0;
+ {
+ Mutex *mtx = getMutex(m->id);
+ SpinMutexLock l(&mtx->mtx);
+ for (int li = 0; li < mtx->nlink; li++)
+ pt->pending[npending++] = mtx->link[li];
+ }
+ while (npending > 0) {
+ Link link = pt->pending[--npending];
+ if (link.id == kEndId) {
+ npath--;
+ continue;
+ }
+ if (pt->visited[link.id])
+ continue;
+ Mutex *mtx1 = getMutex(link.id);
+ SpinMutexLock l(&mtx1->mtx);
+ if (mtx1->seq != link.seq)
+ continue;
+ pt->visited[link.id] = true;
+ if (mtx1->nlink == 0)
+ continue;
+ pt->path[npath++] = link;
+ pt->pending[npending++] = Link(kEndId);
+ if (link.id == m->id)
+ return Report(pt, lt, npath); // Bingo!
+ for (int li = 0; li < mtx1->nlink; li++) {
+ Link *link1 = &mtx1->link[li];
+ // Mutex *mtx2 = getMutex(link->id);
+ // FIXME(dvyukov): fast seq check
+ // FIXME(dvyukov): fast nlink != 0 check
+ // FIXME(dvyukov): fast pending check?
+ // FIXME(dvyukov): npending can be larger than kMaxMutex
+ pt->pending[npending++] = *link1;
+ }
+ }
+}
+
+void DD::Report(DDPhysicalThread *pt, DDLogicalThread *lt, int npath) {
+ DDReport *rep = &pt->rep;
+ rep->n = npath;
+ for (int i = 0; i < npath; i++) {
+ Link *link = &pt->path[i];
+ Link *link0 = &pt->path[i ? i - 1 : npath - 1];
+ rep->loop[i].thr_ctx = link->tid;
+ rep->loop[i].mtx_ctx0 = link0->id;
+ rep->loop[i].mtx_ctx1 = link->id;
+ rep->loop[i].stk[0] = flags.second_deadlock_stack ? link->stk0 : 0;
+ rep->loop[i].stk[1] = link->stk1;
+ }
+ pt->report_pending = true;
+}
+
+DDReport *DD::GetReport(DDCallback *cb) {
+ if (!cb->pt->report_pending)
+ return 0;
+ cb->pt->report_pending = false;
+ return &cb->pt->rep;
+}
+
+} // namespace __sanitizer
+#endif // #if SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector2.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector_interface.h (revision 351984)
@@ -0,0 +1,92 @@
+//===-- sanitizer_deadlock_detector_interface.h -----------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime.
+// Abstract deadlock detector interface.
+// FIXME: this is work in progress, nothing really works yet.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+#define SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
+
+#ifndef SANITIZER_DEADLOCK_DETECTOR_VERSION
+# define SANITIZER_DEADLOCK_DETECTOR_VERSION 1
+#endif
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+// dd - deadlock detector.
+// lt - logical (user) thread.
+// pt - physical (OS) thread.
+
+struct DDPhysicalThread;
+struct DDLogicalThread;
+
+struct DDMutex {
+#if SANITIZER_DEADLOCK_DETECTOR_VERSION == 1
+ uptr id;
+ u32 stk; // creation stack
+#elif SANITIZER_DEADLOCK_DETECTOR_VERSION == 2
+ u32 id;
+ u32 recursion;
+ atomic_uintptr_t owner;
+#else
+# error "BAD SANITIZER_DEADLOCK_DETECTOR_VERSION"
+#endif
+ u64 ctx;
+};
+
+struct DDFlags {
+ bool second_deadlock_stack;
+};
+
+struct DDReport {
+ enum { kMaxLoopSize = 20 };
+ int n; // number of entries in loop
+ struct {
+ u64 thr_ctx; // user thread context
+ u64 mtx_ctx0; // user mutex context, start of the edge
+ u64 mtx_ctx1; // user mutex context, end of the edge
+ u32 stk[2]; // stack ids for the edge
+ } loop[kMaxLoopSize];
+};
+
+struct DDCallback {
+ DDPhysicalThread *pt;
+ DDLogicalThread *lt;
+
+ virtual u32 Unwind() { return 0; }
+ virtual int UniqueTid() { return 0; }
+};
+
+struct DDetector {
+ static DDetector *Create(const DDFlags *flags);
+
+ virtual DDPhysicalThread* CreatePhysicalThread() { return nullptr; }
+ virtual void DestroyPhysicalThread(DDPhysicalThread *pt) {}
+
+ virtual DDLogicalThread* CreateLogicalThread(u64 ctx) { return nullptr; }
+ virtual void DestroyLogicalThread(DDLogicalThread *lt) {}
+
+ virtual void MutexInit(DDCallback *cb, DDMutex *m) {}
+ virtual void MutexBeforeLock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexAfterLock(DDCallback *cb, DDMutex *m, bool wlock,
+ bool trylock) {}
+ virtual void MutexBeforeUnlock(DDCallback *cb, DDMutex *m, bool wlock) {}
+ virtual void MutexDestroy(DDCallback *cb, DDMutex *m) {}
+
+ virtual DDReport *GetReport(DDCallback *cb) { return nullptr; }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_DEADLOCK_DETECTOR_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_deadlock_detector_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno.cc (revision 351984)
@@ -0,0 +1,34 @@
+//===-- sanitizer_errno.cc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+// Defines errno to avoid including errno.h and its dependencies into other
+// files (e.g. interceptors are not supposed to include any system headers).
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_errno_codes.h"
+#include "sanitizer_internal_defs.h"
+
+#include <errno.h>
+
+namespace __sanitizer {
+
+COMPILER_CHECK(errno_ENOMEM == ENOMEM);
+COMPILER_CHECK(errno_EBUSY == EBUSY);
+COMPILER_CHECK(errno_EINVAL == EINVAL);
+
+// EOWNERDEAD is not present in some older platforms.
+#if defined(EOWNERDEAD)
+extern const int errno_EOWNERDEAD = EOWNERDEAD;
+#else
+extern const int errno_EOWNERDEAD = -1;
+#endif
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno.h (revision 351984)
@@ -0,0 +1,39 @@
+//===-- sanitizer_errno.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+// Defines errno to avoid including errno.h and its dependencies into sensitive
+// files (e.g. interceptors are not supposed to include any system headers).
+// It's ok to use errno.h directly when your file already depend on other system
+// includes though.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ERRNO_H
+#define SANITIZER_ERRNO_H
+
+#include "sanitizer_errno_codes.h"
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+# define __errno_location __error
+#elif SANITIZER_ANDROID || SANITIZER_NETBSD || SANITIZER_OPENBSD || \
+ SANITIZER_RTEMS
+# define __errno_location __errno
+#elif SANITIZER_SOLARIS
+# define __errno_location ___errno
+#elif SANITIZER_WINDOWS
+# define __errno_location _errno
+#endif
+
+extern "C" int *__errno_location();
+
+#define errno (*__errno_location())
+
+#endif // SANITIZER_ERRNO_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno_codes.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno_codes.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno_codes.h (revision 351984)
@@ -0,0 +1,33 @@
+//===-- sanitizer_errno_codes.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+// Defines errno codes to avoid including errno.h and its dependencies into
+// sensitive files (e.g. interceptors are not supposed to include any system
+// headers).
+// It's ok to use errno.h directly when your file already depend on other system
+// includes though.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_ERRNO_CODES_H
+#define SANITIZER_ERRNO_CODES_H
+
+namespace __sanitizer {
+
+#define errno_ENOMEM 12
+#define errno_EBUSY 16
+#define errno_EINVAL 22
+
+// Those might not present or their value differ on different platforms.
+extern const int errno_EOWNERDEAD;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_ERRNO_CODES_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_errno_codes.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_file.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_file.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_file.cc (revision 351984)
@@ -0,0 +1,215 @@
+//===-- sanitizer_file.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries. It defines filesystem-related interfaces. This
+// is separate from sanitizer_common.cc so that it's simpler to disable
+// all the filesystem support code for a port that doesn't use it.
+//
+//===---------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if !SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+
+namespace __sanitizer {
+
+void CatastrophicErrorWrite(const char *buffer, uptr length) {
+ WriteToFile(kStderrFd, buffer, length);
+}
+
+StaticSpinMutex report_file_mu;
+ReportFile report_file = {&report_file_mu, kStderrFd, "", "", 0};
+
+void RawWrite(const char *buffer) {
+ report_file.Write(buffer, internal_strlen(buffer));
+}
+
+void ReportFile::ReopenIfNecessary() {
+ mu->CheckLocked();
+ if (fd == kStdoutFd || fd == kStderrFd) return;
+
+ uptr pid = internal_getpid();
+ // If in tracer, use the parent's file.
+ if (pid == stoptheworld_tracer_pid)
+ pid = stoptheworld_tracer_ppid;
+ if (fd != kInvalidFd) {
+ // If the report file is already opened by the current process,
+ // do nothing. Otherwise the report file was opened by the parent
+ // process, close it now.
+ if (fd_pid == pid)
+ return;
+ else
+ CloseFile(fd);
+ }
+
+ const char *exe_name = GetProcessName();
+ if (common_flags()->log_exe_name && exe_name) {
+ internal_snprintf(full_path, kMaxPathLength, "%s.%s.%zu", path_prefix,
+ exe_name, pid);
+ } else {
+ internal_snprintf(full_path, kMaxPathLength, "%s.%zu", path_prefix, pid);
+ }
+ fd = OpenFile(full_path, WrOnly);
+ if (fd == kInvalidFd) {
+ const char *ErrorMsgPrefix = "ERROR: Can't open file: ";
+ WriteToFile(kStderrFd, ErrorMsgPrefix, internal_strlen(ErrorMsgPrefix));
+ WriteToFile(kStderrFd, full_path, internal_strlen(full_path));
+ Die();
+ }
+ fd_pid = pid;
+}
+
+void ReportFile::SetReportPath(const char *path) {
+ if (!path)
+ return;
+ uptr len = internal_strlen(path);
+ if (len > sizeof(path_prefix) - 100) {
+ Report("ERROR: Path is too long: %c%c%c%c%c%c%c%c...\n",
+ path[0], path[1], path[2], path[3],
+ path[4], path[5], path[6], path[7]);
+ Die();
+ }
+
+ SpinMutexLock l(mu);
+ if (fd != kStdoutFd && fd != kStderrFd && fd != kInvalidFd)
+ CloseFile(fd);
+ fd = kInvalidFd;
+ if (internal_strcmp(path, "stdout") == 0) {
+ fd = kStdoutFd;
+ } else if (internal_strcmp(path, "stderr") == 0) {
+ fd = kStderrFd;
+ } else {
+ internal_snprintf(path_prefix, kMaxPathLength, "%s", path);
+ }
+}
+
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len, error_t *errno_p) {
+ *buff = nullptr;
+ *buff_size = 0;
+ *read_len = 0;
+ if (!max_len)
+ return true;
+ uptr PageSize = GetPageSizeCached();
+ uptr kMinFileLen = Min(PageSize, max_len);
+
+ // The files we usually open are not seekable, so try different buffer sizes.
+ for (uptr size = kMinFileLen;; size = Min(size * 2, max_len)) {
+ UnmapOrDie(*buff, *buff_size);
+ *buff = (char*)MmapOrDie(size, __func__);
+ *buff_size = size;
+ fd_t fd = OpenFile(file_name, RdOnly, errno_p);
+ if (fd == kInvalidFd) {
+ UnmapOrDie(*buff, *buff_size);
+ return false;
+ }
+ *read_len = 0;
+ // Read up to one page at a time.
+ bool reached_eof = false;
+ while (*read_len < size) {
+ uptr just_read;
+ if (!ReadFromFile(fd, *buff + *read_len, size - *read_len, &just_read,
+ errno_p)) {
+ UnmapOrDie(*buff, *buff_size);
+ CloseFile(fd);
+ return false;
+ }
+ *read_len += just_read;
+ if (just_read == 0 || *read_len == max_len) {
+ reached_eof = true;
+ break;
+ }
+ }
+ CloseFile(fd);
+ if (reached_eof) // We've read the whole file.
+ break;
+ }
+ return true;
+}
+
+bool ReadFileToVector(const char *file_name,
+ InternalMmapVectorNoCtor<char> *buff, uptr max_len,
+ error_t *errno_p) {
+ buff->clear();
+ if (!max_len)
+ return true;
+ uptr PageSize = GetPageSizeCached();
+ fd_t fd = OpenFile(file_name, RdOnly, errno_p);
+ if (fd == kInvalidFd)
+ return false;
+ uptr read_len = 0;
+ while (read_len < max_len) {
+ if (read_len >= buff->size())
+ buff->resize(Min(Max(PageSize, read_len * 2), max_len));
+ CHECK_LT(read_len, buff->size());
+ CHECK_LE(buff->size(), max_len);
+ uptr just_read;
+ if (!ReadFromFile(fd, buff->data() + read_len, buff->size() - read_len,
+ &just_read, errno_p)) {
+ CloseFile(fd);
+ return false;
+ }
+ read_len += just_read;
+ if (!just_read)
+ break;
+ }
+ CloseFile(fd);
+ buff->resize(read_len);
+ return true;
+}
+
+static const char kPathSeparator = SANITIZER_WINDOWS ? ';' : ':';
+
+char *FindPathToBinary(const char *name) {
+ if (FileExists(name)) {
+ return internal_strdup(name);
+ }
+
+ const char *path = GetEnv("PATH");
+ if (!path)
+ return nullptr;
+ uptr name_len = internal_strlen(name);
+ InternalMmapVector<char> buffer(kMaxPathLength);
+ const char *beg = path;
+ while (true) {
+ const char *end = internal_strchrnul(beg, kPathSeparator);
+ uptr prefix_len = end - beg;
+ if (prefix_len + name_len + 2 <= kMaxPathLength) {
+ internal_memcpy(buffer.data(), beg, prefix_len);
+ buffer[prefix_len] = '/';
+ internal_memcpy(&buffer[prefix_len + 1], name, name_len);
+ buffer[prefix_len + 1 + name_len] = '\0';
+ if (FileExists(buffer.data()))
+ return internal_strdup(buffer.data());
+ }
+ if (*end == '\0') break;
+ beg = end + 1;
+ }
+ return nullptr;
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer; // NOLINT
+
+extern "C" {
+void __sanitizer_set_report_path(const char *path) {
+ report_file.SetReportPath(path);
+}
+
+void __sanitizer_set_report_fd(void *fd) {
+ report_file.fd = (fd_t)reinterpret_cast<uptr>(fd);
+ report_file.fd_pid = internal_getpid();
+}
+} // extern "C"
+
+#endif // !SANITIZER_FUCHSIA
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_file.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_file.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_file.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_file.h (revision 351984)
@@ -0,0 +1,106 @@
+//===-- sanitizer_file.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// This file is shared between run-time libraries of sanitizers.
+// It declares filesystem-related interfaces. This is separate from
+// sanitizer_common.h so that it's simpler to disable all the filesystem
+// support code for a port that doesn't use it.
+//
+//===---------------------------------------------------------------------===//
+#ifndef SANITIZER_FILE_H
+#define SANITIZER_FILE_H
+
+#include "sanitizer_interface_internal.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+struct ReportFile {
+ void Write(const char *buffer, uptr length);
+ bool SupportsColors();
+ void SetReportPath(const char *path);
+
+ // Don't use fields directly. They are only declared public to allow
+ // aggregate initialization.
+
+ // Protects fields below.
+ StaticSpinMutex *mu;
+ // Opened file descriptor. Defaults to stderr. It may be equal to
+ // kInvalidFd, in which case new file will be opened when necessary.
+ fd_t fd;
+ // Path prefix of report file, set via __sanitizer_set_report_path.
+ char path_prefix[kMaxPathLength];
+ // Full path to report, obtained as <path_prefix>.PID
+ char full_path[kMaxPathLength];
+ // PID of the process that opened fd. If a fork() occurs,
+ // the PID of child will be different from fd_pid.
+ uptr fd_pid;
+
+ private:
+ void ReopenIfNecessary();
+};
+extern ReportFile report_file;
+
+enum FileAccessMode {
+ RdOnly,
+ WrOnly,
+ RdWr
+};
+
+// Returns kInvalidFd on error.
+fd_t OpenFile(const char *filename, FileAccessMode mode,
+ error_t *errno_p = nullptr);
+void CloseFile(fd_t);
+
+// Return true on success, false on error.
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
+ uptr *bytes_read = nullptr, error_t *error_p = nullptr);
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
+ uptr *bytes_written = nullptr, error_t *error_p = nullptr);
+
+// Scoped file handle closer.
+struct FileCloser {
+ explicit FileCloser(fd_t fd) : fd(fd) {}
+ ~FileCloser() { CloseFile(fd); }
+ fd_t fd;
+};
+
+bool SupportsColoredOutput(fd_t fd);
+
+// OS
+const char *GetPwd();
+bool FileExists(const char *filename);
+char *FindPathToBinary(const char *name);
+bool IsPathSeparator(const char c);
+bool IsAbsolutePath(const char *path);
+// Starts a subprocess and returs its pid.
+// If *_fd parameters are not kInvalidFd their corresponding input/output
+// streams will be redirect to the file. The files will always be closed
+// in parent process even in case of an error.
+// The child process will close all fds after STDERR_FILENO
+// before passing control to a program.
+pid_t StartSubprocess(const char *filename, const char *const argv[],
+ fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd,
+ fd_t stderr_fd = kInvalidFd);
+// Checks if specified process is still running
+bool IsProcessRunning(pid_t pid);
+// Waits for the process to finish and returns its exit code.
+// Returns -1 in case of an error.
+int WaitForProcess(pid_t pid);
+
+// Maps given file to virtual memory, and returns pointer to it
+// (or NULL if mapping fails). Stores the size of mmaped region
+// in '*buff_size'.
+void *MapFileToMemory(const char *file_name, uptr *buff_size);
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FILE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_file.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flag_parser.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flag_parser.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flag_parser.cc (revision 351984)
@@ -0,0 +1,183 @@
+//===-- sanitizer_flag_parser.cc ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_flag_parser.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_flag_parser.h"
+
+namespace __sanitizer {
+
+LowLevelAllocator FlagParser::Alloc;
+
+class UnknownFlags {
+ static const int kMaxUnknownFlags = 20;
+ const char *unknown_flags_[kMaxUnknownFlags];
+ int n_unknown_flags_;
+
+ public:
+ void Add(const char *name) {
+ CHECK_LT(n_unknown_flags_, kMaxUnknownFlags);
+ unknown_flags_[n_unknown_flags_++] = name;
+ }
+
+ void Report() {
+ if (!n_unknown_flags_) return;
+ Printf("WARNING: found %d unrecognized flag(s):\n", n_unknown_flags_);
+ for (int i = 0; i < n_unknown_flags_; ++i)
+ Printf(" %s\n", unknown_flags_[i]);
+ n_unknown_flags_ = 0;
+ }
+};
+
+UnknownFlags unknown_flags;
+
+void ReportUnrecognizedFlags() {
+ unknown_flags.Report();
+}
+
+char *FlagParser::ll_strndup(const char *s, uptr n) {
+ uptr len = internal_strnlen(s, n);
+ char *s2 = (char*)Alloc.Allocate(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
+void FlagParser::PrintFlagDescriptions() {
+ Printf("Available flags for %s:\n", SanitizerToolName);
+ for (int i = 0; i < n_flags_; ++i)
+ Printf("\t%s\n\t\t- %s\n", flags_[i].name, flags_[i].desc);
+}
+
+void FlagParser::fatal_error(const char *err) {
+ Printf("%s: ERROR: %s\n", SanitizerToolName, err);
+ Die();
+}
+
+bool FlagParser::is_space(char c) {
+ return c == ' ' || c == ',' || c == ':' || c == '\n' || c == '\t' ||
+ c == '\r';
+}
+
+void FlagParser::skip_whitespace() {
+ while (is_space(buf_[pos_])) ++pos_;
+}
+
+void FlagParser::parse_flag(const char *env_option_name) {
+ uptr name_start = pos_;
+ while (buf_[pos_] != 0 && buf_[pos_] != '=' && !is_space(buf_[pos_])) ++pos_;
+ if (buf_[pos_] != '=') {
+ if (env_option_name) {
+ Printf("%s: ERROR: expected '=' in %s\n", SanitizerToolName,
+ env_option_name);
+ Die();
+ } else
+ fatal_error("expected '='");
+ }
+ char *name = ll_strndup(buf_ + name_start, pos_ - name_start);
+
+ uptr value_start = ++pos_;
+ char *value;
+ if (buf_[pos_] == '\'' || buf_[pos_] == '"') {
+ char quote = buf_[pos_++];
+ while (buf_[pos_] != 0 && buf_[pos_] != quote) ++pos_;
+ if (buf_[pos_] == 0) fatal_error("unterminated string");
+ value = ll_strndup(buf_ + value_start + 1, pos_ - value_start - 1);
+ ++pos_; // consume the closing quote
+ } else {
+ while (buf_[pos_] != 0 && !is_space(buf_[pos_])) ++pos_;
+ if (buf_[pos_] != 0 && !is_space(buf_[pos_]))
+ fatal_error("expected separator or eol");
+ value = ll_strndup(buf_ + value_start, pos_ - value_start);
+ }
+
+ bool res = run_handler(name, value);
+ if (!res) fatal_error("Flag parsing failed.");
+}
+
+void FlagParser::parse_flags(const char *env_option_name) {
+ while (true) {
+ skip_whitespace();
+ if (buf_[pos_] == 0) break;
+ parse_flag(env_option_name);
+ }
+
+ // Do a sanity check for certain flags.
+ if (common_flags_dont_use.malloc_context_size < 1)
+ common_flags_dont_use.malloc_context_size = 1;
+}
+
+void FlagParser::ParseStringFromEnv(const char *env_name) {
+ const char *env = GetEnv(env_name);
+ VPrintf(1, "%s: %s\n", env_name, env ? env : "<empty>");
+ ParseString(env, env_name);
+}
+
+void FlagParser::ParseString(const char *s, const char *env_option_name) {
+ if (!s) return;
+ // Backup current parser state to allow nested ParseString() calls.
+ const char *old_buf_ = buf_;
+ uptr old_pos_ = pos_;
+ buf_ = s;
+ pos_ = 0;
+
+ parse_flags(env_option_name);
+
+ buf_ = old_buf_;
+ pos_ = old_pos_;
+}
+
+bool FlagParser::ParseFile(const char *path, bool ignore_missing) {
+ static const uptr kMaxIncludeSize = 1 << 15;
+ char *data;
+ uptr data_mapped_size;
+ error_t err;
+ uptr len;
+ if (!ReadFileToBuffer(path, &data, &data_mapped_size, &len,
+ Max(kMaxIncludeSize, GetPageSizeCached()), &err)) {
+ if (ignore_missing)
+ return true;
+ Printf("Failed to read options from '%s': error %d\n", path, err);
+ return false;
+ }
+ ParseString(data, path);
+ UnmapOrDie(data, data_mapped_size);
+ return true;
+}
+
+bool FlagParser::run_handler(const char *name, const char *value) {
+ for (int i = 0; i < n_flags_; ++i) {
+ if (internal_strcmp(name, flags_[i].name) == 0)
+ return flags_[i].handler->Parse(value);
+ }
+ // Unrecognized flag. This is not a fatal error, we may print a warning later.
+ unknown_flags.Add(name);
+ return true;
+}
+
+void FlagParser::RegisterHandler(const char *name, FlagHandlerBase *handler,
+ const char *desc) {
+ CHECK_LT(n_flags_, kMaxFlags);
+ flags_[n_flags_].name = name;
+ flags_[n_flags_].desc = desc;
+ flags_[n_flags_].handler = handler;
+ ++n_flags_;
+}
+
+FlagParser::FlagParser() : n_flags_(0), buf_(nullptr), pos_(0) {
+ flags_ = (Flag *)Alloc.Allocate(sizeof(Flag) * kMaxFlags);
+}
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flag_parser.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flag_parser.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flag_parser.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flag_parser.h (revision 351984)
@@ -0,0 +1,155 @@
+//===-- sanitizer_flag_parser.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAG_REGISTRY_H
+#define SANITIZER_FLAG_REGISTRY_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class FlagHandlerBase {
+ public:
+ virtual bool Parse(const char *value) { return false; }
+
+ protected:
+ ~FlagHandlerBase() {};
+};
+
+template <typename T>
+class FlagHandler : public FlagHandlerBase {
+ T *t_;
+
+ public:
+ explicit FlagHandler(T *t) : t_(t) {}
+ bool Parse(const char *value) final;
+};
+
+inline bool ParseBool(const char *value, bool *b) {
+ if (internal_strcmp(value, "0") == 0 ||
+ internal_strcmp(value, "no") == 0 ||
+ internal_strcmp(value, "false") == 0) {
+ *b = false;
+ return true;
+ }
+ if (internal_strcmp(value, "1") == 0 ||
+ internal_strcmp(value, "yes") == 0 ||
+ internal_strcmp(value, "true") == 0) {
+ *b = true;
+ return true;
+ }
+ return false;
+}
+
+template <>
+inline bool FlagHandler<bool>::Parse(const char *value) {
+ if (ParseBool(value, t_)) return true;
+ Printf("ERROR: Invalid value for bool option: '%s'\n", value);
+ return false;
+}
+
+template <>
+inline bool FlagHandler<HandleSignalMode>::Parse(const char *value) {
+ bool b;
+ if (ParseBool(value, &b)) {
+ *t_ = b ? kHandleSignalYes : kHandleSignalNo;
+ return true;
+ }
+ if (internal_strcmp(value, "2") == 0 ||
+ internal_strcmp(value, "exclusive") == 0) {
+ *t_ = kHandleSignalExclusive;
+ return true;
+ }
+ Printf("ERROR: Invalid value for signal handler option: '%s'\n", value);
+ return false;
+}
+
+template <>
+inline bool FlagHandler<const char *>::Parse(const char *value) {
+ *t_ = value;
+ return true;
+}
+
+template <>
+inline bool FlagHandler<int>::Parse(const char *value) {
+ const char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for int option: '%s'\n", value);
+ return ok;
+}
+
+template <>
+inline bool FlagHandler<uptr>::Parse(const char *value) {
+ const char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for uptr option: '%s'\n", value);
+ return ok;
+}
+
+template <>
+inline bool FlagHandler<s64>::Parse(const char *value) {
+ const char *value_end;
+ *t_ = internal_simple_strtoll(value, &value_end, 10);
+ bool ok = *value_end == 0;
+ if (!ok) Printf("ERROR: Invalid value for s64 option: '%s'\n", value);
+ return ok;
+}
+
+class FlagParser {
+ static const int kMaxFlags = 200;
+ struct Flag {
+ const char *name;
+ const char *desc;
+ FlagHandlerBase *handler;
+ } *flags_;
+ int n_flags_;
+
+ const char *buf_;
+ uptr pos_;
+
+ public:
+ FlagParser();
+ void RegisterHandler(const char *name, FlagHandlerBase *handler,
+ const char *desc);
+ void ParseString(const char *s, const char *env_name = 0);
+ void ParseStringFromEnv(const char *env_name);
+ bool ParseFile(const char *path, bool ignore_missing);
+ void PrintFlagDescriptions();
+
+ static LowLevelAllocator Alloc;
+
+ private:
+ void fatal_error(const char *err);
+ bool is_space(char c);
+ void skip_whitespace();
+ void parse_flags(const char *env_option_name);
+ void parse_flag(const char *env_option_name);
+ bool run_handler(const char *name, const char *value);
+ char *ll_strndup(const char *s, uptr n);
+};
+
+template <typename T>
+static void RegisterFlag(FlagParser *parser, const char *name, const char *desc,
+ T *var) {
+ FlagHandler<T> *fh = new (FlagParser::Alloc) FlagHandler<T>(var); // NOLINT
+ parser->RegisterHandler(name, fh, desc);
+}
+
+void ReportUnrecognizedFlags();
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FLAG_REGISTRY_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flag_parser.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.cc (revision 351984)
@@ -0,0 +1,121 @@
+//===-- sanitizer_flags.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_flags.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_list.h"
+#include "sanitizer_flag_parser.h"
+
+namespace __sanitizer {
+
+CommonFlags common_flags_dont_use;
+
+void CommonFlags::SetDefaults() {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+}
+
+void CommonFlags::CopyFrom(const CommonFlags &other) {
+ internal_memcpy(this, &other, sizeof(*this));
+}
+
+// Copy the string from "s" to "out", making the following substitutions:
+// %b = binary basename
+// %p = pid
+void SubstituteForFlagValue(const char *s, char *out, uptr out_size) {
+ char *out_end = out + out_size;
+ while (*s && out < out_end - 1) {
+ if (s[0] != '%') {
+ *out++ = *s++;
+ continue;
+ }
+ switch (s[1]) {
+ case 'b': {
+ const char *base = GetProcessName();
+ CHECK(base);
+ while (*base && out < out_end - 1)
+ *out++ = *base++;
+ s += 2; // skip "%b"
+ break;
+ }
+ case 'p': {
+ int pid = internal_getpid();
+ char buf[32];
+ char *buf_pos = buf + 32;
+ do {
+ *--buf_pos = (pid % 10) + '0';
+ pid /= 10;
+ } while (pid);
+ while (buf_pos < buf + 32 && out < out_end - 1)
+ *out++ = *buf_pos++;
+ s += 2; // skip "%p"
+ break;
+ }
+ default:
+ *out++ = *s++;
+ break;
+ }
+ }
+ CHECK(out < out_end - 1);
+ *out = '\0';
+}
+
+class FlagHandlerInclude : public FlagHandlerBase {
+ FlagParser *parser_;
+ bool ignore_missing_;
+
+ public:
+ explicit FlagHandlerInclude(FlagParser *parser, bool ignore_missing)
+ : parser_(parser), ignore_missing_(ignore_missing) {}
+ bool Parse(const char *value) final {
+ if (internal_strchr(value, '%')) {
+ char *buf = (char *)MmapOrDie(kMaxPathLength, "FlagHandlerInclude");
+ SubstituteForFlagValue(value, buf, kMaxPathLength);
+ bool res = parser_->ParseFile(buf, ignore_missing_);
+ UnmapOrDie(buf, kMaxPathLength);
+ return res;
+ }
+ return parser_->ParseFile(value, ignore_missing_);
+ }
+};
+
+void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf) {
+ FlagHandlerInclude *fh_include = new (FlagParser::Alloc) // NOLINT
+ FlagHandlerInclude(parser, /*ignore_missing*/ false);
+ parser->RegisterHandler("include", fh_include,
+ "read more options from the given file");
+ FlagHandlerInclude *fh_include_if_exists = new (FlagParser::Alloc) // NOLINT
+ FlagHandlerInclude(parser, /*ignore_missing*/ true);
+ parser->RegisterHandler(
+ "include_if_exists", fh_include_if_exists,
+ "read more options from the given file (if it exists)");
+}
+
+void RegisterCommonFlags(FlagParser *parser, CommonFlags *cf) {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &cf->Name);
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+
+ RegisterIncludeFlags(parser, cf);
+}
+
+void InitializeCommonFlags(CommonFlags *cf) {
+ // need to record coverage to generate coverage report.
+ cf->coverage |= cf->html_cov_report;
+ SetVerbosity(cf->verbosity);
+}
+
+} // namespace __sanitizer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.h (revision 351984)
@@ -0,0 +1,67 @@
+//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FLAGS_H
+#define SANITIZER_FLAGS_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+enum HandleSignalMode {
+ kHandleSignalNo,
+ kHandleSignalYes,
+ kHandleSignalExclusive,
+};
+
+struct CommonFlags {
+#define COMMON_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "sanitizer_flags.inc"
+#undef COMMON_FLAG
+
+ void SetDefaults();
+ void CopyFrom(const CommonFlags &other);
+};
+
+// Functions to get/set global CommonFlags shared by all sanitizer runtimes:
+extern CommonFlags common_flags_dont_use;
+inline const CommonFlags *common_flags() {
+ return &common_flags_dont_use;
+}
+
+inline void SetCommonFlagsDefaults() {
+ common_flags_dont_use.SetDefaults();
+}
+
+// This function can only be used to setup tool-specific overrides for
+// CommonFlags defaults. Generally, it should only be used right after
+// SetCommonFlagsDefaults(), but before ParseCommonFlagsFromString(), and
+// only during the flags initialization (i.e. before they are used for
+// the first time).
+inline void OverrideCommonFlags(const CommonFlags &cf) {
+ common_flags_dont_use.CopyFrom(cf);
+}
+
+void SubstituteForFlagValue(const char *s, char *out, uptr out_size);
+
+class FlagParser;
+void RegisterCommonFlags(FlagParser *parser,
+ CommonFlags *cf = &common_flags_dont_use);
+void RegisterIncludeFlags(FlagParser *parser, CommonFlags *cf);
+
+// Should be called after parsing all flags. Sets up common flag values
+// and perform initializations common to all sanitizers (e.g. setting
+// verbosity).
+void InitializeCommonFlags(CommonFlags *cf = &common_flags_dont_use);
+} // namespace __sanitizer
+
+#endif // SANITIZER_FLAGS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.inc (revision 351984)
@@ -0,0 +1,247 @@
+//===-- sanitizer_flags.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file describes common flags available in all sanitizers.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef COMMON_FLAG
+#error "Define COMMON_FLAG prior to including this file!"
+#endif
+
+// COMMON_FLAG(Type, Name, DefaultValue, Description)
+// Supported types: bool, const char *, int, uptr.
+// Default value must be a compile-time constant.
+// Description must be a string literal.
+
+COMMON_FLAG(
+ bool, symbolize, true,
+ "If set, use the online symbolizer from common sanitizer runtime to turn "
+ "virtual addresses to file/line locations.")
+COMMON_FLAG(
+ const char *, external_symbolizer_path, nullptr,
+ "Path to external symbolizer. If empty, the tool will search $PATH for "
+ "the symbolizer.")
+COMMON_FLAG(
+ bool, allow_addr2line, false,
+ "If set, allows online symbolizer to run addr2line binary to symbolize "
+ "stack traces (addr2line will only be used if llvm-symbolizer binary is "
+ "unavailable.")
+COMMON_FLAG(const char *, strip_path_prefix, "",
+ "Strips this prefix from file paths in error reports.")
+COMMON_FLAG(bool, fast_unwind_on_check, false,
+ "If available, use the fast frame-pointer-based unwinder on "
+ "internal CHECK failures.")
+COMMON_FLAG(bool, fast_unwind_on_fatal, false,
+ "If available, use the fast frame-pointer-based unwinder on fatal "
+ "errors.")
+COMMON_FLAG(bool, fast_unwind_on_malloc, true,
+ "If available, use the fast frame-pointer-based unwinder on "
+ "malloc/free.")
+COMMON_FLAG(bool, handle_ioctl, false, "Intercept and handle ioctl requests.")
+COMMON_FLAG(int, malloc_context_size, 1,
+ "Max number of stack frames kept for each allocation/deallocation.")
+COMMON_FLAG(
+ const char *, log_path, "stderr",
+ "Write logs to \"log_path.pid\". The special values are \"stdout\" and "
+ "\"stderr\". The default is \"stderr\".")
+COMMON_FLAG(
+ bool, log_exe_name, false,
+ "Mention name of executable when reporting error and "
+ "append executable name to logs (as in \"log_path.exe_name.pid\").")
+COMMON_FLAG(
+ bool, log_to_syslog, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
+ "Write all sanitizer output to syslog in addition to other means of "
+ "logging.")
+COMMON_FLAG(
+ int, verbosity, 0,
+ "Verbosity level (0 - silent, 1 - a bit of output, 2+ - more output).")
+COMMON_FLAG(bool, strip_env, 1,
+ "Whether to remove the sanitizer from DYLD_INSERT_LIBRARIES to "
+ "avoid passing it to children. Default is true.")
+COMMON_FLAG(bool, detect_leaks, !SANITIZER_MAC, "Enable memory leak detection.")
+COMMON_FLAG(
+ bool, leak_check_at_exit, true,
+ "Invoke leak checking in an atexit handler. Has no effect if "
+ "detect_leaks=false, or if __lsan_do_leak_check() is called before the "
+ "handler has a chance to run.")
+COMMON_FLAG(bool, allocator_may_return_null, false,
+ "If false, the allocator will crash instead of returning 0 on "
+ "out-of-memory.")
+COMMON_FLAG(bool, print_summary, true,
+ "If false, disable printing error summaries in addition to error "
+ "reports.")
+COMMON_FLAG(int, print_module_map, 0,
+ "OS X only (0 - don't print, 1 - print only once before process "
+ "exits, 2 - print after each report).")
+COMMON_FLAG(bool, check_printf, true, "Check printf arguments.")
+#define COMMON_FLAG_HANDLE_SIGNAL_HELP(signal) \
+ "Controls custom tool's " #signal " handler (0 - do not registers the " \
+ "handler, 1 - register the handler and allow user to set own, " \
+ "2 - registers the handler and block user from changing it). "
+COMMON_FLAG(HandleSignalMode, handle_segv, kHandleSignalYes,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGSEGV))
+COMMON_FLAG(HandleSignalMode, handle_sigbus, kHandleSignalYes,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGBUS))
+COMMON_FLAG(HandleSignalMode, handle_abort, kHandleSignalNo,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGABRT))
+COMMON_FLAG(HandleSignalMode, handle_sigill, kHandleSignalNo,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGILL))
+COMMON_FLAG(HandleSignalMode, handle_sigtrap, kHandleSignalNo,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGTRAP))
+COMMON_FLAG(HandleSignalMode, handle_sigfpe, kHandleSignalYes,
+ COMMON_FLAG_HANDLE_SIGNAL_HELP(SIGFPE))
+#undef COMMON_FLAG_HANDLE_SIGNAL_HELP
+COMMON_FLAG(bool, allow_user_segv_handler, true,
+ "Deprecated. True has no effect, use handle_sigbus=1. If false, "
+ "handle_*=1 will be upgraded to handle_*=2.")
+COMMON_FLAG(bool, use_sigaltstack, true,
+ "If set, uses alternate stack for signal handling.")
+COMMON_FLAG(bool, detect_deadlocks, true,
+ "If set, deadlock detection is enabled.")
+COMMON_FLAG(
+ uptr, clear_shadow_mmap_threshold, 64 * 1024,
+ "Large shadow regions are zero-filled using mmap(NORESERVE) instead of "
+ "memset(). This is the threshold size in bytes.")
+COMMON_FLAG(const char *, color, "auto",
+ "Colorize reports: (always|never|auto).")
+COMMON_FLAG(
+ bool, legacy_pthread_cond, false,
+ "Enables support for dynamic libraries linked with libpthread 2.2.5.")
+COMMON_FLAG(bool, intercept_tls_get_addr, false, "Intercept __tls_get_addr.")
+COMMON_FLAG(bool, help, false, "Print the flag descriptions.")
+COMMON_FLAG(uptr, mmap_limit_mb, 0,
+ "Limit the amount of mmap-ed memory (excluding shadow) in Mb; "
+ "not a user-facing flag, used mosly for testing the tools")
+COMMON_FLAG(uptr, hard_rss_limit_mb, 0,
+ "Hard RSS limit in Mb."
+ " If non-zero, a background thread is spawned at startup"
+ " which periodically reads RSS and aborts the process if the"
+ " limit is reached")
+COMMON_FLAG(uptr, soft_rss_limit_mb, 0,
+ "Soft RSS limit in Mb."
+ " If non-zero, a background thread is spawned at startup"
+ " which periodically reads RSS. If the limit is reached"
+ " all subsequent malloc/new calls will fail or return NULL"
+ " (depending on the value of allocator_may_return_null)"
+ " until the RSS goes below the soft limit."
+ " This limit does not affect memory allocations other than"
+ " malloc/new.")
+COMMON_FLAG(bool, heap_profile, false, "Experimental heap profiler, asan-only")
+COMMON_FLAG(s32, allocator_release_to_os_interval_ms,
+ ((bool)SANITIZER_FUCHSIA || (bool)SANITIZER_WINDOWS) ? -1 : 5000,
+ "Only affects a 64-bit allocator. If set, tries to release unused "
+ "memory to the OS, but not more often than this interval (in "
+ "milliseconds). Negative values mean do not attempt to release "
+ "memory to the OS.\n")
+COMMON_FLAG(bool, can_use_proc_maps_statm, true,
+ "If false, do not attempt to read /proc/maps/statm."
+ " Mostly useful for testing sanitizers.")
+COMMON_FLAG(
+ bool, coverage, false,
+ "If set, coverage information will be dumped at program shutdown (if the "
+ "coverage instrumentation was enabled at compile time).")
+COMMON_FLAG(const char *, coverage_dir, ".",
+ "Target directory for coverage dumps. Defaults to the current "
+ "directory.")
+COMMON_FLAG(bool, full_address_space, false,
+ "Sanitize complete address space; "
+ "by default kernel area on 32-bit platforms will not be sanitized")
+COMMON_FLAG(bool, print_suppressions, true,
+ "Print matched suppressions at exit.")
+COMMON_FLAG(
+ bool, disable_coredump, (SANITIZER_WORDSIZE == 64) && !SANITIZER_GO,
+ "Disable core dumping. By default, disable_coredump=1 on 64-bit to avoid"
+ " dumping a 16T+ core file. Ignored on OSes that don't dump core by"
+ " default and for sanitizers that don't reserve lots of virtual memory.")
+COMMON_FLAG(bool, use_madv_dontdump, true,
+ "If set, instructs kernel to not store the (huge) shadow "
+ "in core file.")
+COMMON_FLAG(bool, symbolize_inline_frames, true,
+ "Print inlined frames in stacktraces. Defaults to true.")
+COMMON_FLAG(bool, symbolize_vs_style, false,
+ "Print file locations in Visual Studio style (e.g: "
+ " file(10,42): ...")
+COMMON_FLAG(int, dedup_token_length, 0,
+ "If positive, after printing a stack trace also print a short "
+ "string token based on this number of frames that will simplify "
+ "deduplication of the reports. "
+ "Example: 'DEDUP_TOKEN: foo-bar-main'. Default is 0.")
+COMMON_FLAG(const char *, stack_trace_format, "DEFAULT",
+ "Format string used to render stack frames. "
+ "See sanitizer_stacktrace_printer.h for the format description. "
+ "Use DEFAULT to get default format.")
+COMMON_FLAG(bool, no_huge_pages_for_shadow, true,
+ "If true, the shadow is not allowed to use huge pages. ")
+COMMON_FLAG(bool, strict_string_checks, false,
+ "If set check that string arguments are properly null-terminated")
+COMMON_FLAG(bool, intercept_strstr, true,
+ "If set, uses custom wrappers for strstr and strcasestr functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strspn, true,
+ "If set, uses custom wrappers for strspn and strcspn function "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strtok, true,
+ "If set, uses a custom wrapper for the strtok function "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strpbrk, true,
+ "If set, uses custom wrappers for strpbrk function "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strlen, true,
+ "If set, uses custom wrappers for strlen and strnlen functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strndup, true,
+ "If set, uses custom wrappers for strndup functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_strchr, true,
+ "If set, uses custom wrappers for strchr, strchrnul, and strrchr "
+ "functions to find more errors.")
+COMMON_FLAG(bool, intercept_memcmp, true,
+ "If set, uses custom wrappers for memcmp function "
+ "to find more errors.")
+COMMON_FLAG(bool, strict_memcmp, true,
+ "If true, assume that memcmp(p1, p2, n) always reads n bytes before "
+ "comparing p1 and p2.")
+COMMON_FLAG(bool, intercept_memmem, true,
+ "If set, uses a wrapper for memmem() to find more errors.")
+COMMON_FLAG(bool, intercept_intrin, true,
+ "If set, uses custom wrappers for memset/memcpy/memmove "
+ "intrinsics to find more errors.")
+COMMON_FLAG(bool, intercept_stat, true,
+ "If set, uses custom wrappers for *stat functions "
+ "to find more errors.")
+COMMON_FLAG(bool, intercept_send, true,
+ "If set, uses custom wrappers for send* functions "
+ "to find more errors.")
+COMMON_FLAG(bool, decorate_proc_maps, (bool)SANITIZER_ANDROID,
+ "If set, decorate sanitizer mappings in /proc/self/maps with "
+ "user-readable names")
+COMMON_FLAG(int, exitcode, 1, "Override the program exit status if the tool "
+ "found an error")
+COMMON_FLAG(
+ bool, abort_on_error, (bool)SANITIZER_ANDROID || (bool)SANITIZER_MAC,
+ "If set, the tool calls abort() instead of _exit() after printing the "
+ "error report.")
+COMMON_FLAG(bool, suppress_equal_pcs, true,
+ "Deduplicate multiple reports for single source location in "
+ "halt_on_error=false mode (asan only).")
+COMMON_FLAG(bool, print_cmdline, false, "Print command line on crash "
+ "(asan only).")
+COMMON_FLAG(bool, html_cov_report, false, "Generate html coverage report.")
+COMMON_FLAG(const char *, sancov_path, "sancov", "Sancov tool location.")
+COMMON_FLAG(bool, dump_instruction_bytes, false,
+ "If true, dump 16 bytes starting at the instruction that caused SEGV")
+COMMON_FLAG(bool, dump_registers, true,
+ "If true, dump values of CPU registers when SEGV happens. Only "
+ "available on OS X for now.")
+COMMON_FLAG(bool, detect_write_exec, false,
+ "If true, triggers warning when writable-executable pages requests "
+ "are being made")
+COMMON_FLAG(bool, test_only_emulate_no_memorymap, false,
+ "TEST ONLY fail to read memory mappings to emulate sanitized "
+ "\"init\"")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_freebsd.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_freebsd.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_freebsd.h (revision 351984)
@@ -0,0 +1,136 @@
+//===-- sanitizer_freebsd.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer runtime. It contains FreeBSD-specific
+// definitions.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_FREEBSD_H
+#define SANITIZER_FREEBSD_H
+
+#include "sanitizer_internal_defs.h"
+
+// x86-64 FreeBSD 9.2 and older define 'ucontext_t' incorrectly in
+// 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+# include <osreldate.h>
+# if __FreeBSD_version <= 902001 // v9.2
+# include <link.h>
+# include <sys/param.h>
+# include <ucontext.h>
+
+namespace __sanitizer {
+
+typedef unsigned long long __xuint64_t;
+
+typedef __int32_t __xregister_t;
+
+typedef struct __xmcontext {
+ __xregister_t mc_onstack;
+ __xregister_t mc_gs;
+ __xregister_t mc_fs;
+ __xregister_t mc_es;
+ __xregister_t mc_ds;
+ __xregister_t mc_edi;
+ __xregister_t mc_esi;
+ __xregister_t mc_ebp;
+ __xregister_t mc_isp;
+ __xregister_t mc_ebx;
+ __xregister_t mc_edx;
+ __xregister_t mc_ecx;
+ __xregister_t mc_eax;
+ __xregister_t mc_trapno;
+ __xregister_t mc_err;
+ __xregister_t mc_eip;
+ __xregister_t mc_cs;
+ __xregister_t mc_eflags;
+ __xregister_t mc_esp;
+ __xregister_t mc_ss;
+
+ int mc_len;
+ int mc_fpformat;
+ int mc_ownedfp;
+ __xregister_t mc_flags;
+
+ int mc_fpstate[128] __aligned(16);
+ __xregister_t mc_fsbase;
+ __xregister_t mc_gsbase;
+ __xregister_t mc_xfpustate;
+ __xregister_t mc_xfpustate_len;
+
+ int mc_spare2[4];
+} xmcontext_t;
+
+typedef struct __xucontext {
+ sigset_t uc_sigmask;
+ xmcontext_t uc_mcontext;
+
+ struct __ucontext *uc_link;
+ stack_t uc_stack;
+ int uc_flags;
+ int __spare__[4];
+} xucontext_t;
+
+struct xkinfo_vmentry {
+ int kve_structsize;
+ int kve_type;
+ __xuint64_t kve_start;
+ __xuint64_t kve_end;
+ __xuint64_t kve_offset;
+ __xuint64_t kve_vn_fileid;
+ __uint32_t kve_vn_fsid;
+ int kve_flags;
+ int kve_resident;
+ int kve_private_resident;
+ int kve_protection;
+ int kve_ref_count;
+ int kve_shadow_count;
+ int kve_vn_type;
+ __xuint64_t kve_vn_size;
+ __uint32_t kve_vn_rdev;
+ __uint16_t kve_vn_mode;
+ __uint16_t kve_status;
+ int _kve_ispare[12];
+ char kve_path[PATH_MAX];
+};
+
+typedef struct {
+ __uint32_t p_type;
+ __uint32_t p_offset;
+ __uint32_t p_vaddr;
+ __uint32_t p_paddr;
+ __uint32_t p_filesz;
+ __uint32_t p_memsz;
+ __uint32_t p_flags;
+ __uint32_t p_align;
+} XElf32_Phdr;
+
+struct xdl_phdr_info {
+ Elf_Addr dlpi_addr;
+ const char *dlpi_name;
+ const XElf32_Phdr *dlpi_phdr;
+ Elf_Half dlpi_phnum;
+ unsigned long long int dlpi_adds;
+ unsigned long long int dlpi_subs;
+ size_t dlpi_tls_modid;
+ void *dlpi_tls_data;
+};
+
+typedef int (*__xdl_iterate_hdr_callback)(struct xdl_phdr_info*, size_t, void*);
+typedef int xdl_iterate_phdr_t(__xdl_iterate_hdr_callback, void*);
+
+#define xdl_iterate_phdr(callback, param) \
+ (((xdl_iterate_phdr_t*) dl_iterate_phdr)((callback), (param)))
+
+} // namespace __sanitizer
+
+# endif // __FreeBSD_version <= 902001
+#endif // SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+
+#endif // SANITIZER_FREEBSD_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_freebsd.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_fuchsia.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_fuchsia.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_fuchsia.cc (revision 351984)
@@ -0,0 +1,527 @@
+//===-- sanitizer_fuchsia.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and other sanitizer
+// run-time libraries and implements Fuchsia-specific functions from
+// sanitizer_common.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_fuchsia.h"
+#if SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+
+#include <limits.h>
+#include <pthread.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <zircon/errors.h>
+#include <zircon/process.h>
+#include <zircon/syscalls.h>
+
+namespace __sanitizer {
+
+void NORETURN internal__exit(int exitcode) { _zx_process_exit(exitcode); }
+
+uptr internal_sched_yield() {
+ zx_status_t status = _zx_nanosleep(0);
+ CHECK_EQ(status, ZX_OK);
+ return 0; // Why doesn't this return void?
+}
+
+static void internal_nanosleep(zx_time_t ns) {
+ zx_status_t status = _zx_nanosleep(_zx_deadline_after(ns));
+ CHECK_EQ(status, ZX_OK);
+}
+
+unsigned int internal_sleep(unsigned int seconds) {
+ internal_nanosleep(ZX_SEC(seconds));
+ return 0;
+}
+
+u64 NanoTime() {
+ zx_time_t time;
+ zx_status_t status = _zx_clock_get(ZX_CLOCK_UTC, &time);
+ CHECK_EQ(status, ZX_OK);
+ return time;
+}
+
+u64 MonotonicNanoTime() { return _zx_clock_get_monotonic(); }
+
+uptr internal_getpid() {
+ zx_info_handle_basic_t info;
+ zx_status_t status =
+ _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &info,
+ sizeof(info), NULL, NULL);
+ CHECK_EQ(status, ZX_OK);
+ uptr pid = static_cast<uptr>(info.koid);
+ CHECK_EQ(pid, info.koid);
+ return pid;
+}
+
+uptr GetThreadSelf() { return reinterpret_cast<uptr>(thrd_current()); }
+
+tid_t GetTid() { return GetThreadSelf(); }
+
+void Abort() { abort(); }
+
+int Atexit(void (*function)(void)) { return atexit(function); }
+
+void SleepForSeconds(int seconds) { internal_sleep(seconds); }
+
+void SleepForMillis(int millis) { internal_nanosleep(ZX_MSEC(millis)); }
+
+void GetThreadStackTopAndBottom(bool, uptr *stack_top, uptr *stack_bottom) {
+ pthread_attr_t attr;
+ CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
+ void *base;
+ size_t size;
+ CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
+ CHECK_EQ(pthread_attr_destroy(&attr), 0);
+
+ *stack_bottom = reinterpret_cast<uptr>(base);
+ *stack_top = *stack_bottom + size;
+}
+
+void InitializePlatformEarly() {}
+void MaybeReexec() {}
+void CheckASLR() {}
+void CheckMPROTECT() {}
+void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+void DisableCoreDumperIfNecessary() {}
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
+void SetAlternateSignalStack() {}
+void UnsetAlternateSignalStack() {}
+void InitTlsSize() {}
+
+void PrintModuleMap() {}
+
+bool SignalContext::IsStackOverflow() const { return false; }
+void SignalContext::DumpAllRegisters(void *context) { UNIMPLEMENTED(); }
+const char *SignalContext::Describe() const { UNIMPLEMENTED(); }
+
+enum MutexState : int { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
+
+BlockingMutex::BlockingMutex() {
+ // NOTE! It's important that this use internal_memset, because plain
+ // memset might be intercepted (e.g., actually be __asan_memset).
+ // Defining this so the compiler initializes each field, e.g.:
+ // BlockingMutex::BlockingMutex() : BlockingMutex(LINKER_INITIALIZED) {}
+ // might result in the compiler generating a call to memset, which would
+ // have the same problem.
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ CHECK_EQ(owner_, 0);
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+ zx_status_t status =
+ _zx_futex_wait(reinterpret_cast<zx_futex_t *>(m), MtxSleeping,
+ ZX_HANDLE_INVALID, ZX_TIME_INFINITE);
+ if (status != ZX_ERR_BAD_STATE) // Normal race.
+ CHECK_EQ(status, ZX_OK);
+ }
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
+ CHECK_NE(v, MtxUnlocked);
+ if (v == MtxSleeping) {
+ zx_status_t status = _zx_futex_wake(reinterpret_cast<zx_futex_t *>(m), 1);
+ CHECK_EQ(status, ZX_OK);
+ }
+}
+
+void BlockingMutex::CheckLocked() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
+}
+
+uptr GetPageSize() { return PAGE_SIZE; }
+
+uptr GetMmapGranularity() { return PAGE_SIZE; }
+
+sanitizer_shadow_bounds_t ShadowBounds;
+
+uptr GetMaxUserVirtualAddress() {
+ ShadowBounds = __sanitizer_shadow_bounds();
+ return ShadowBounds.memory_limit - 1;
+}
+
+uptr GetMaxVirtualAddress() { return GetMaxUserVirtualAddress(); }
+
+static void *DoAnonymousMmapOrDie(uptr size, const char *mem_type,
+ bool raw_report, bool die_for_nomem) {
+ size = RoundUpTo(size, PAGE_SIZE);
+
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status,
+ raw_report);
+ return nullptr;
+ }
+ _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
+ internal_strlen(mem_type));
+
+ // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
+ uintptr_t addr;
+ status =
+ _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
+ vmo, 0, size, &addr);
+ _zx_handle_close(vmo);
+
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status,
+ raw_report);
+ return nullptr;
+ }
+
+ IncreaseTotalMmap(size);
+
+ return reinterpret_cast<void *>(addr);
+}
+
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
+ return DoAnonymousMmapOrDie(size, mem_type, raw_report, true);
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ return MmapOrDie(size, mem_type);
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ return DoAnonymousMmapOrDie(size, mem_type, false, false);
+}
+
+uptr ReservedAddressRange::Init(uptr init_size, const char *name,
+ uptr fixed_addr) {
+ init_size = RoundUpTo(init_size, PAGE_SIZE);
+ DCHECK_EQ(os_handle_, ZX_HANDLE_INVALID);
+ uintptr_t base;
+ zx_handle_t vmar;
+ zx_status_t status =
+ _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC,
+ 0, init_size, &vmar, &base);
+ if (status != ZX_OK)
+ ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status);
+ base_ = reinterpret_cast<void *>(base);
+ size_ = init_size;
+ name_ = name;
+ os_handle_ = vmar;
+
+ return reinterpret_cast<uptr>(base_);
+}
+
+static uptr DoMmapFixedOrDie(zx_handle_t vmar, uptr fixed_addr, uptr map_size,
+ void *base, const char *name, bool die_for_nomem) {
+ uptr offset = fixed_addr - reinterpret_cast<uptr>(base);
+ map_size = RoundUpTo(map_size, PAGE_SIZE);
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(map_size, 0, &vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem)
+ ReportMmapFailureAndDie(map_size, name, "zx_vmo_create", status);
+ return 0;
+ }
+ _zx_object_set_property(vmo, ZX_PROP_NAME, name, internal_strlen(name));
+ DCHECK_GE(base + size_, map_size + offset);
+ uintptr_t addr;
+
+ status =
+ _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC,
+ offset, vmo, 0, map_size, &addr);
+ _zx_handle_close(vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY || die_for_nomem) {
+ ReportMmapFailureAndDie(map_size, name, "zx_vmar_map", status);
+ }
+ return 0;
+ }
+ IncreaseTotalMmap(map_size);
+ return addr;
+}
+
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr map_size,
+ const char *name) {
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+ name_, false);
+}
+
+uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr map_size,
+ const char *name) {
+ return DoMmapFixedOrDie(os_handle_, fixed_addr, map_size, base_,
+ name_, true);
+}
+
+void UnmapOrDieVmar(void *addr, uptr size, zx_handle_t target_vmar) {
+ if (!addr || !size) return;
+ size = RoundUpTo(size, PAGE_SIZE);
+
+ zx_status_t status =
+ _zx_vmar_unmap(target_vmar, reinterpret_cast<uintptr_t>(addr), size);
+ if (status != ZX_OK) {
+ Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, size, size, addr);
+ CHECK("unable to unmap" && 0);
+ }
+
+ DecreaseTotalMmap(size);
+}
+
+void ReservedAddressRange::Unmap(uptr addr, uptr size) {
+ CHECK_LE(size, size_);
+ const zx_handle_t vmar = static_cast<zx_handle_t>(os_handle_);
+ if (addr == reinterpret_cast<uptr>(base_)) {
+ if (size == size_) {
+ // Destroying the vmar effectively unmaps the whole mapping.
+ _zx_vmar_destroy(vmar);
+ _zx_handle_close(vmar);
+ os_handle_ = static_cast<uptr>(ZX_HANDLE_INVALID);
+ DecreaseTotalMmap(size);
+ return;
+ }
+ } else {
+ CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
+ }
+ // Partial unmapping does not affect the fact that the initial range is still
+ // reserved, and the resulting unmapped memory can't be reused.
+ UnmapOrDieVmar(reinterpret_cast<void *>(addr), size, vmar);
+}
+
+// This should never be called.
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
+ UNIMPLEMENTED();
+}
+
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
+ CHECK_GE(size, PAGE_SIZE);
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmo_create", status, false);
+ return nullptr;
+ }
+ _zx_object_set_property(vmo, ZX_PROP_NAME, mem_type,
+ internal_strlen(mem_type));
+
+ // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that?
+
+ // Map a larger size to get a chunk of address space big enough that
+ // it surely contains an aligned region of the requested size. Then
+ // overwrite the aligned middle portion with a mapping from the
+ // beginning of the VMO, and unmap the excess before and after.
+ size_t map_size = size + alignment;
+ uintptr_t addr;
+ status =
+ _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
+ vmo, 0, map_size, &addr);
+ if (status == ZX_OK) {
+ uintptr_t map_addr = addr;
+ uintptr_t map_end = map_addr + map_size;
+ addr = RoundUpTo(map_addr, alignment);
+ uintptr_t end = addr + size;
+ if (addr != map_addr) {
+ zx_info_vmar_t info;
+ status = _zx_object_get_info(_zx_vmar_root_self(), ZX_INFO_VMAR, &info,
+ sizeof(info), NULL, NULL);
+ if (status == ZX_OK) {
+ uintptr_t new_addr;
+ status = _zx_vmar_map(
+ _zx_vmar_root_self(),
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE,
+ addr - info.base, vmo, 0, size, &new_addr);
+ if (status == ZX_OK) CHECK_EQ(new_addr, addr);
+ }
+ }
+ if (status == ZX_OK && addr != map_addr)
+ status = _zx_vmar_unmap(_zx_vmar_root_self(), map_addr, addr - map_addr);
+ if (status == ZX_OK && end != map_end)
+ status = _zx_vmar_unmap(_zx_vmar_root_self(), end, map_end - end);
+ }
+ _zx_handle_close(vmo);
+
+ if (status != ZX_OK) {
+ if (status != ZX_ERR_NO_MEMORY)
+ ReportMmapFailureAndDie(size, mem_type, "zx_vmar_map", status, false);
+ return nullptr;
+ }
+
+ IncreaseTotalMmap(size);
+
+ return reinterpret_cast<void *>(addr);
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ UnmapOrDieVmar(addr, size, _zx_vmar_root_self());
+}
+
+// This is used on the shadow mapping, which cannot be changed.
+// Zircon doesn't have anything like MADV_DONTNEED.
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
+
+void DumpProcessMap() {
+ // TODO(mcgrathr): write it
+ return;
+}
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ // TODO(mcgrathr): Figure out a better way.
+ zx_handle_t vmo;
+ zx_status_t status = _zx_vmo_create(size, 0, &vmo);
+ if (status == ZX_OK) {
+ status = _zx_vmo_write(vmo, reinterpret_cast<const void *>(beg), 0, size);
+ _zx_handle_close(vmo);
+ }
+ return status == ZX_OK;
+}
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {}
+
+bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
+ uptr *read_len, uptr max_len, error_t *errno_p) {
+ zx_handle_t vmo;
+ zx_status_t status = __sanitizer_get_configuration(file_name, &vmo);
+ if (status == ZX_OK) {
+ uint64_t vmo_size;
+ status = _zx_vmo_get_size(vmo, &vmo_size);
+ if (status == ZX_OK) {
+ if (vmo_size < max_len) max_len = vmo_size;
+ size_t map_size = RoundUpTo(max_len, PAGE_SIZE);
+ uintptr_t addr;
+ status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0,
+ map_size, &addr);
+ if (status == ZX_OK) {
+ *buff = reinterpret_cast<char *>(addr);
+ *buff_size = map_size;
+ *read_len = max_len;
+ }
+ }
+ _zx_handle_close(vmo);
+ }
+ if (status != ZX_OK && errno_p) *errno_p = status;
+ return status == ZX_OK;
+}
+
+void RawWrite(const char *buffer) {
+ constexpr size_t size = 128;
+ static _Thread_local char line[size];
+ static _Thread_local size_t lastLineEnd = 0;
+ static _Thread_local size_t cur = 0;
+
+ while (*buffer) {
+ if (cur >= size) {
+ if (lastLineEnd == 0)
+ lastLineEnd = size;
+ __sanitizer_log_write(line, lastLineEnd);
+ internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
+ cur = cur - lastLineEnd;
+ lastLineEnd = 0;
+ }
+ if (*buffer == '\n')
+ lastLineEnd = cur + 1;
+ line[cur++] = *buffer++;
+ }
+ // Flush all complete lines before returning.
+ if (lastLineEnd != 0) {
+ __sanitizer_log_write(line, lastLineEnd);
+ internal_memmove(line, line + lastLineEnd, cur - lastLineEnd);
+ cur = cur - lastLineEnd;
+ lastLineEnd = 0;
+ }
+}
+
+void CatastrophicErrorWrite(const char *buffer, uptr length) {
+ __sanitizer_log_write(buffer, length);
+}
+
+char **StoredArgv;
+char **StoredEnviron;
+
+char **GetArgv() { return StoredArgv; }
+char **GetEnviron() { return StoredEnviron; }
+
+const char *GetEnv(const char *name) {
+ if (StoredEnviron) {
+ uptr NameLen = internal_strlen(name);
+ for (char **Env = StoredEnviron; *Env != 0; Env++) {
+ if (internal_strncmp(*Env, name, NameLen) == 0 && (*Env)[NameLen] == '=')
+ return (*Env) + NameLen + 1;
+ }
+ }
+ return nullptr;
+}
+
+uptr ReadBinaryName(/*out*/ char *buf, uptr buf_len) {
+ const char *argv0 = "<UNKNOWN>";
+ if (StoredArgv && StoredArgv[0]) {
+ argv0 = StoredArgv[0];
+ }
+ internal_strncpy(buf, argv0, buf_len);
+ return internal_strlen(buf);
+}
+
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
+ return ReadBinaryName(buf, buf_len);
+}
+
+uptr MainThreadStackBase, MainThreadStackSize;
+
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ CHECK_LE(length, ZX_CPRNG_DRAW_MAX_LEN);
+ _zx_cprng_draw(buffer, length);
+ return true;
+}
+
+u32 GetNumberOfCPUs() {
+ return zx_system_get_num_cpus();
+}
+
+uptr GetRSS() { UNIMPLEMENTED(); }
+
+} // namespace __sanitizer
+
+using namespace __sanitizer; // NOLINT
+
+extern "C" {
+void __sanitizer_startup_hook(int argc, char **argv, char **envp,
+ void *stack_base, size_t stack_size) {
+ __sanitizer::StoredArgv = argv;
+ __sanitizer::StoredEnviron = envp;
+ __sanitizer::MainThreadStackBase = reinterpret_cast<uintptr_t>(stack_base);
+ __sanitizer::MainThreadStackSize = stack_size;
+}
+
+void __sanitizer_set_report_path(const char *path) {
+ // Handle the initialization code in each sanitizer, but no other calls.
+ // This setting is never consulted on Fuchsia.
+ DCHECK_EQ(path, common_flags()->log_path);
+}
+
+void __sanitizer_set_report_fd(void *fd) {
+ UNREACHABLE("not available on Fuchsia");
+}
+} // extern "C"
+
+#endif // SANITIZER_FUCHSIA
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_fuchsia.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_fuchsia.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_fuchsia.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_fuchsia.h (revision 351984)
@@ -0,0 +1,30 @@
+//===-- sanitizer_fuchsia.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+//
+// Fuchsia-specific sanitizer support.
+//
+//===---------------------------------------------------------------------===//
+#ifndef SANITIZER_FUCHSIA_H
+#define SANITIZER_FUCHSIA_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FUCHSIA
+
+#include "sanitizer_common.h"
+
+#include <zircon/sanitizer.h>
+
+namespace __sanitizer {
+
+extern uptr MainThreadStackBase, MainThreadStackSize;
+extern sanitizer_shadow_bounds_t ShadowBounds;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FUCHSIA
+#endif // SANITIZER_FUCHSIA_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_fuchsia.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_getauxval.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_getauxval.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_getauxval.h (revision 351984)
@@ -0,0 +1,47 @@
+//===-- sanitizer_getauxval.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common getauxval() guards and definitions.
+// getauxval() is not defined until glibc version 2.16, or until API level 21
+// for Android.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_GETAUXVAL_H
+#define SANITIZER_GETAUXVAL_H
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_FUCHSIA
+
+# include <features.h>
+
+# ifndef __GLIBC_PREREQ
+# define __GLIBC_PREREQ(x, y) 0
+# endif
+
+# if __GLIBC_PREREQ(2, 16) || (SANITIZER_ANDROID && __ANDROID_API__ >= 21) || \
+ SANITIZER_FUCHSIA
+# define SANITIZER_USE_GETAUXVAL 1
+# else
+# define SANITIZER_USE_GETAUXVAL 0
+# endif
+
+# if SANITIZER_USE_GETAUXVAL
+# include <sys/auxv.h>
+# else
+// The weak getauxval definition allows to check for the function at runtime.
+// This is useful for Android, when compiled at a lower API level yet running
+// on a more recent platform that offers the function.
+extern "C" SANITIZER_WEAK_ATTRIBUTE
+unsigned long getauxval(unsigned long type); // NOLINT
+# endif
+
+#endif // SANITIZER_LINUX || SANITIZER_FUCHSIA
+
+#endif // SANITIZER_GETAUXVAL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_getauxval.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_hash.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_hash.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_hash.h (revision 351984)
@@ -0,0 +1,43 @@
+//===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a simple hash function.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_HASH_H
+#define SANITIZER_HASH_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+class MurMur2HashBuilder {
+ static const u32 m = 0x5bd1e995;
+ static const u32 seed = 0x9747b28c;
+ static const u32 r = 24;
+ u32 h;
+
+ public:
+ explicit MurMur2HashBuilder(u32 init = 0) { h = seed ^ init; }
+ void add(u32 k) {
+ k *= m;
+ k ^= k >> r;
+ k *= m;
+ h *= m;
+ h ^= k;
+ }
+ u32 get() {
+ u32 x = h;
+ x ^= x >> 13;
+ x *= m;
+ x ^= x >> 15;
+ return x;
+ }
+};
+} //namespace __sanitizer
+
+#endif // SANITIZER_HASH_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_interface_internal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_interface_internal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_interface_internal.h (revision 351984)
@@ -0,0 +1,116 @@
+//===-- sanitizer_interface_internal.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between run-time libraries of sanitizers.
+//
+// This header declares the sanitizer runtime interface functions.
+// The runtime library has to define these functions so the instrumented program
+// could call them.
+//
+// See also include/sanitizer/common_interface_defs.h
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_INTERFACE_INTERNAL_H
+#define SANITIZER_INTERFACE_INTERNAL_H
+
+#include "sanitizer_internal_defs.h"
+
+extern "C" {
+ // Tell the tools to write their reports to "path.<pid>" instead of stderr.
+ // The special values are "stdout" and "stderr".
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_set_report_path(const char *path);
+ // Tell the tools to write their reports to the provided file descriptor
+ // (casted to void *).
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_set_report_fd(void *fd);
+
+ typedef struct {
+ int coverage_sandboxed;
+ __sanitizer::sptr coverage_fd;
+ unsigned int coverage_max_block_size;
+ } __sanitizer_sandbox_arguments;
+
+ // Notify the tools that the sandbox is going to be turned on.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void
+ __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
+
+ // This function is called by the tool when it has just finished reporting
+ // an error. 'error_summary' is a one-line string that summarizes
+ // the error message. This function can be overridden by the client.
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_report_error_summary(const char *error_summary);
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov_dump();
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_coverage(
+ const __sanitizer::uptr *pcs, const __sanitizer::uptr len);
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_dump_trace_pc_guard_coverage();
+
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_cov(__sanitizer::u32 *guard);
+
+ // Returns 1 on the first call, then returns 0 thereafter. Called by the tool
+ // to ensure only one report is printed when multiple errors occur
+ // simultaneously.
+ SANITIZER_INTERFACE_ATTRIBUTE int __sanitizer_acquire_crash_state();
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ void __sanitizer_annotate_contiguous_container(const void *beg,
+ const void *end,
+ const void *old_mid,
+ const void *new_mid);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
+ SANITIZER_INTERFACE_ATTRIBUTE
+ const void *__sanitizer_contiguous_container_find_bad_address(
+ const void *beg, const void *mid, const void *end);
+
+ SANITIZER_INTERFACE_ATTRIBUTE
+ int __sanitizer_get_module_and_offset_for_pc(
+ __sanitizer::uptr pc, char *module_path,
+ __sanitizer::uptr module_path_len, __sanitizer::uptr *pc_offset);
+
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp1();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp2();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_cmp8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp1();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp2();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_const_cmp8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_switch();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_div4();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_div8();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_gep();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_indir();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_guard(__sanitizer::u32*);
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_trace_pc_guard_init(__sanitizer::u32*,
+ __sanitizer::u32*);
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_8bit_counters_init();
+ SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+ void __sanitizer_cov_pcs_init();
+} // extern "C"
+
+#endif // SANITIZER_INTERFACE_INTERNAL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_interface_internal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_internal_defs.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_internal_defs.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_internal_defs.h (revision 351984)
@@ -0,0 +1,436 @@
+//===-- sanitizer_internal_defs.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+// It contains macro used in run-time libraries code.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_DEFS_H
+#define SANITIZER_DEFS_H
+
+#include "sanitizer_platform.h"
+
+#ifndef SANITIZER_DEBUG
+# define SANITIZER_DEBUG 0
+#endif
+
+#define SANITIZER_STRINGIFY_(S) #S
+#define SANITIZER_STRINGIFY(S) SANITIZER_STRINGIFY_(S)
+
+// Only use SANITIZER_*ATTRIBUTE* before the function return type!
+#if SANITIZER_WINDOWS
+#if SANITIZER_IMPORT_INTERFACE
+# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllimport)
+#else
+# define SANITIZER_INTERFACE_ATTRIBUTE __declspec(dllexport)
+#endif
+# define SANITIZER_WEAK_ATTRIBUTE
+#elif SANITIZER_GO
+# define SANITIZER_INTERFACE_ATTRIBUTE
+# define SANITIZER_WEAK_ATTRIBUTE
+#else
+# define SANITIZER_INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+# define SANITIZER_WEAK_ATTRIBUTE __attribute__((weak))
+#endif
+
+// TLS is handled differently on different platforms
+#if SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_FREEBSD || SANITIZER_OPENBSD
+# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE \
+ __attribute__((tls_model("initial-exec"))) thread_local
+#else
+# define SANITIZER_TLS_INITIAL_EXEC_ATTRIBUTE
+#endif
+
+//--------------------------- WEAK FUNCTIONS ---------------------------------//
+// When working with weak functions, to simplify the code and make it more
+// portable, when possible define a default implementation using this macro:
+//
+// SANITIZER_INTERFACE_WEAK_DEF(<return_type>, <name>, <parameter list>)
+//
+// For example:
+// SANITIZER_INTERFACE_WEAK_DEF(bool, compare, int a, int b) { return a > b; }
+//
+#if SANITIZER_WINDOWS
+#include "sanitizer_win_defs.h"
+# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
+ WIN_WEAK_EXPORT_DEF(ReturnType, Name, __VA_ARGS__)
+#else
+# define SANITIZER_INTERFACE_WEAK_DEF(ReturnType, Name, ...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE \
+ ReturnType Name(__VA_ARGS__)
+#endif
+
+// SANITIZER_SUPPORTS_WEAK_HOOKS means that we support real weak functions that
+// will evaluate to a null pointer when not defined.
+#ifndef SANITIZER_SUPPORTS_WEAK_HOOKS
+#if (SANITIZER_LINUX || SANITIZER_SOLARIS) && !SANITIZER_GO
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
+// Before Xcode 4.5, the Darwin linker doesn't reliably support undefined
+// weak symbols. Mac OS X 10.9/Darwin 13 is the first release only supported
+// by Xcode >= 4.5.
+#elif SANITIZER_MAC && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1090 && !SANITIZER_GO
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 1
+#else
+# define SANITIZER_SUPPORTS_WEAK_HOOKS 0
+#endif
+#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+// For some weak hooks that will be called very often and we want to avoid the
+// overhead of executing the default implementation when it is not necessary,
+// we can use the flag SANITIZER_SUPPORTS_WEAK_HOOKS to only define the default
+// implementation for platforms that doesn't support weak symbols. For example:
+//
+// #if !SANITIZER_SUPPORT_WEAK_HOOKS
+// SANITIZER_INTERFACE_WEAK_DEF(bool, compare_hook, int a, int b) {
+// return a > b;
+// }
+// #endif
+//
+// And then use it as: if (compare_hook) compare_hook(a, b);
+//----------------------------------------------------------------------------//
+
+
+// We can use .preinit_array section on Linux to call sanitizer initialization
+// functions very early in the process startup (unless PIC macro is defined).
+//
+// On FreeBSD, .preinit_array functions are called with rtld_bind_lock writer
+// lock held. It will lead to dead lock if unresolved PLT functions (which helds
+// rtld_bind_lock reader lock) are called inside .preinit_array functions.
+//
+// FIXME: do we have anything like this on Mac?
+#ifndef SANITIZER_CAN_USE_PREINIT_ARRAY
+#if ((SANITIZER_LINUX && !SANITIZER_ANDROID) || SANITIZER_OPENBSD || \
+ SANITIZER_FUCHSIA) && !defined(PIC)
+#define SANITIZER_CAN_USE_PREINIT_ARRAY 1
+// Before Solaris 11.4, .preinit_array is fully supported only with GNU ld.
+// FIXME: Check for those conditions.
+#elif SANITIZER_SOLARIS && !defined(PIC)
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 1
+#else
+# define SANITIZER_CAN_USE_PREINIT_ARRAY 0
+#endif
+#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
+
+// GCC does not understand __has_feature
+#if !defined(__has_feature)
+# define __has_feature(x) 0
+#endif
+
+// Older GCCs do not understand __has_attribute.
+#if !defined(__has_attribute)
+# define __has_attribute(x) 0
+#endif
+
+// For portability reasons we do not include stddef.h, stdint.h or any other
+// system header, but we do need some basic types that are not defined
+// in a portable way by the language itself.
+namespace __sanitizer {
+
+#if defined(_WIN64)
+// 64-bit Windows uses LLP64 data model.
+typedef unsigned long long uptr; // NOLINT
+typedef signed long long sptr; // NOLINT
+#else
+typedef unsigned long uptr; // NOLINT
+typedef signed long sptr; // NOLINT
+#endif // defined(_WIN64)
+#if defined(__x86_64__)
+// Since x32 uses ILP32 data model in 64-bit hardware mode, we must use
+// 64-bit pointer to unwind stack frame.
+typedef unsigned long long uhwptr; // NOLINT
+#else
+typedef uptr uhwptr; // NOLINT
+#endif
+typedef unsigned char u8;
+typedef unsigned short u16; // NOLINT
+typedef unsigned int u32;
+typedef unsigned long long u64; // NOLINT
+typedef signed char s8;
+typedef signed short s16; // NOLINT
+typedef signed int s32;
+typedef signed long long s64; // NOLINT
+#if SANITIZER_WINDOWS
+// On Windows, files are HANDLE, which is a synonim of void*.
+// Use void* to avoid including <windows.h> everywhere.
+typedef void* fd_t;
+typedef unsigned error_t;
+#else
+typedef int fd_t;
+typedef int error_t;
+#endif
+#if SANITIZER_SOLARIS && !defined(_LP64)
+typedef long pid_t;
+#else
+typedef int pid_t;
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_MAC || \
+ (SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)) || \
+ (SANITIZER_LINUX && defined(__x86_64__))
+typedef u64 OFF_T;
+#else
+typedef uptr OFF_T;
+#endif
+typedef u64 OFF64_T;
+
+#if (SANITIZER_WORDSIZE == 64) || SANITIZER_MAC
+typedef uptr operator_new_size_type;
+#else
+# if SANITIZER_OPENBSD || defined(__s390__) && !defined(__s390x__)
+// Special case: 31-bit s390 has unsigned long as size_t.
+typedef unsigned long operator_new_size_type;
+# else
+typedef u32 operator_new_size_type;
+# endif
+#endif
+
+typedef u64 tid_t;
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers to avoid portability issues.
+
+// Common defs.
+#ifndef INLINE
+#define INLINE inline
+#endif
+#define INTERFACE_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
+#define SANITIZER_WEAK_DEFAULT_IMPL \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
+#define SANITIZER_WEAK_CXX_DEFAULT_IMPL \
+ extern "C++" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE NOINLINE
+
+// Platform-specific defs.
+#if defined(_MSC_VER)
+# define ALWAYS_INLINE __forceinline
+// FIXME(timurrrr): do we need this on Windows?
+# define ALIAS(x)
+# define ALIGNED(x) __declspec(align(x))
+# define FORMAT(f, a)
+# define NOINLINE __declspec(noinline)
+# define NORETURN __declspec(noreturn)
+# define THREADLOCAL __declspec(thread)
+# define LIKELY(x) (x)
+# define UNLIKELY(x) (x)
+# define PREFETCH(x) /* _mm_prefetch(x, _MM_HINT_NTA) */ (void)0
+# define WARN_UNUSED_RESULT
+#else // _MSC_VER
+# define ALWAYS_INLINE inline __attribute__((always_inline))
+# define ALIAS(x) __attribute__((alias(x)))
+// Please only use the ALIGNED macro before the type.
+// Using ALIGNED after the variable declaration is not portable!
+# define ALIGNED(x) __attribute__((aligned(x)))
+# define FORMAT(f, a) __attribute__((format(printf, f, a)))
+# define NOINLINE __attribute__((noinline))
+# define NORETURN __attribute__((noreturn))
+# define THREADLOCAL __thread
+# define LIKELY(x) __builtin_expect(!!(x), 1)
+# define UNLIKELY(x) __builtin_expect(!!(x), 0)
+# if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(x) generates prefetchnt0 on x86
+# define PREFETCH(x) __asm__("prefetchnta (%0)" : : "r" (x))
+# else
+# define PREFETCH(x) __builtin_prefetch(x)
+# endif
+# define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#endif // _MSC_VER
+
+#if !defined(_MSC_VER) || defined(__clang__)
+# define UNUSED __attribute__((unused))
+# define USED __attribute__((used))
+#else
+# define UNUSED
+# define USED
+#endif
+
+#if !defined(_MSC_VER) || defined(__clang__) || MSC_PREREQ(1900)
+# define NOEXCEPT noexcept
+#else
+# define NOEXCEPT throw()
+#endif
+
+// Unaligned versions of basic types.
+typedef ALIGNED(1) u16 uu16;
+typedef ALIGNED(1) u32 uu32;
+typedef ALIGNED(1) u64 uu64;
+typedef ALIGNED(1) s16 us16;
+typedef ALIGNED(1) s32 us32;
+typedef ALIGNED(1) s64 us64;
+
+#if SANITIZER_WINDOWS
+} // namespace __sanitizer
+typedef unsigned long DWORD; // NOLINT
+namespace __sanitizer {
+typedef DWORD thread_return_t;
+# define THREAD_CALLING_CONV __stdcall
+#else // _WIN32
+typedef void* thread_return_t;
+# define THREAD_CALLING_CONV
+#endif // _WIN32
+typedef thread_return_t (THREAD_CALLING_CONV *thread_callback_t)(void* arg);
+
+// NOTE: Functions below must be defined in each run-time.
+void NORETURN Die();
+
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2);
+
+// Check macro
+#define RAW_CHECK_MSG(expr, msg) do { \
+ if (UNLIKELY(!(expr))) { \
+ RawWrite(msg); \
+ Die(); \
+ } \
+} while (0)
+
+#define RAW_CHECK(expr) RAW_CHECK_MSG(expr, #expr)
+
+#define CHECK_IMPL(c1, op, c2) \
+ do { \
+ __sanitizer::u64 v1 = (__sanitizer::u64)(c1); \
+ __sanitizer::u64 v2 = (__sanitizer::u64)(c2); \
+ if (UNLIKELY(!(v1 op v2))) \
+ __sanitizer::CheckFailed(__FILE__, __LINE__, \
+ "(" #c1 ") " #op " (" #c2 ")", v1, v2); \
+ } while (false) \
+/**/
+
+#define CHECK(a) CHECK_IMPL((a), !=, 0)
+#define CHECK_EQ(a, b) CHECK_IMPL((a), ==, (b))
+#define CHECK_NE(a, b) CHECK_IMPL((a), !=, (b))
+#define CHECK_LT(a, b) CHECK_IMPL((a), <, (b))
+#define CHECK_LE(a, b) CHECK_IMPL((a), <=, (b))
+#define CHECK_GT(a, b) CHECK_IMPL((a), >, (b))
+#define CHECK_GE(a, b) CHECK_IMPL((a), >=, (b))
+
+#if SANITIZER_DEBUG
+#define DCHECK(a) CHECK(a)
+#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
+#define DCHECK_NE(a, b) CHECK_NE(a, b)
+#define DCHECK_LT(a, b) CHECK_LT(a, b)
+#define DCHECK_LE(a, b) CHECK_LE(a, b)
+#define DCHECK_GT(a, b) CHECK_GT(a, b)
+#define DCHECK_GE(a, b) CHECK_GE(a, b)
+#else
+#define DCHECK(a)
+#define DCHECK_EQ(a, b)
+#define DCHECK_NE(a, b)
+#define DCHECK_LT(a, b)
+#define DCHECK_LE(a, b)
+#define DCHECK_GT(a, b)
+#define DCHECK_GE(a, b)
+#endif
+
+#define UNREACHABLE(msg) do { \
+ CHECK(0 && msg); \
+ Die(); \
+} while (0)
+
+#define UNIMPLEMENTED() UNREACHABLE("unimplemented")
+
+#define COMPILER_CHECK(pred) IMPL_COMPILER_ASSERT(pred, __LINE__)
+
+#define ARRAY_SIZE(a) (sizeof(a)/sizeof((a)[0]))
+
+#define IMPL_PASTE(a, b) a##b
+#define IMPL_COMPILER_ASSERT(pred, line) \
+ typedef char IMPL_PASTE(assertion_failed_##_, line)[2*(int)(pred)-1]
+
+// Limits for integral types. We have to redefine it in case we don't
+// have stdint.h (like in Visual Studio 9).
+#undef __INT64_C
+#undef __UINT64_C
+#if SANITIZER_WORDSIZE == 64
+# define __INT64_C(c) c ## L
+# define __UINT64_C(c) c ## UL
+#else
+# define __INT64_C(c) c ## LL
+# define __UINT64_C(c) c ## ULL
+#endif // SANITIZER_WORDSIZE == 64
+#undef INT32_MIN
+#define INT32_MIN (-2147483647-1)
+#undef INT32_MAX
+#define INT32_MAX (2147483647)
+#undef UINT32_MAX
+#define UINT32_MAX (4294967295U)
+#undef INT64_MIN
+#define INT64_MIN (-__INT64_C(9223372036854775807)-1)
+#undef INT64_MAX
+#define INT64_MAX (__INT64_C(9223372036854775807))
+#undef UINT64_MAX
+#define UINT64_MAX (__UINT64_C(18446744073709551615))
+#undef UINTPTR_MAX
+#if SANITIZER_WORDSIZE == 64
+# define UINTPTR_MAX (18446744073709551615UL)
+#else
+# define UINTPTR_MAX (4294967295U)
+#endif // SANITIZER_WORDSIZE == 64
+
+enum LinkerInitialized { LINKER_INITIALIZED = 0 };
+
+#if !defined(_MSC_VER) || defined(__clang__)
+#if SANITIZER_S390_31
+#define GET_CALLER_PC() \
+ (__sanitizer::uptr) __builtin_extract_return_addr(__builtin_return_address(0))
+#else
+#define GET_CALLER_PC() (__sanitizer::uptr) __builtin_return_address(0)
+#endif
+#define GET_CURRENT_FRAME() (__sanitizer::uptr) __builtin_frame_address(0)
+inline void Trap() {
+ __builtin_trap();
+}
+#else
+extern "C" void* _ReturnAddress(void);
+extern "C" void* _AddressOfReturnAddress(void);
+# pragma intrinsic(_ReturnAddress)
+# pragma intrinsic(_AddressOfReturnAddress)
+#define GET_CALLER_PC() (__sanitizer::uptr) _ReturnAddress()
+// CaptureStackBackTrace doesn't need to know BP on Windows.
+#define GET_CURRENT_FRAME() \
+ (((__sanitizer::uptr)_AddressOfReturnAddress()) + sizeof(__sanitizer::uptr))
+
+extern "C" void __ud2(void);
+# pragma intrinsic(__ud2)
+inline void Trap() {
+ __ud2();
+}
+#endif
+
+#define HANDLE_EINTR(res, f) \
+ { \
+ int rverrno; \
+ do { \
+ res = (f); \
+ } while (internal_iserror(res, &rverrno) && rverrno == EINTR); \
+ }
+
+// Forces the compiler to generate a frame pointer in the function.
+#define ENABLE_FRAME_POINTER \
+ do { \
+ volatile __sanitizer::uptr enable_fp; \
+ enable_fp = GET_CURRENT_FRAME(); \
+ (void)enable_fp; \
+ } while (0)
+
+} // namespace __sanitizer
+
+namespace __asan { using namespace __sanitizer; } // NOLINT
+namespace __dsan { using namespace __sanitizer; } // NOLINT
+namespace __dfsan { using namespace __sanitizer; } // NOLINT
+namespace __lsan { using namespace __sanitizer; } // NOLINT
+namespace __msan { using namespace __sanitizer; } // NOLINT
+namespace __hwasan { using namespace __sanitizer; } // NOLINT
+namespace __tsan { using namespace __sanitizer; } // NOLINT
+namespace __scudo { using namespace __sanitizer; } // NOLINT
+namespace __ubsan { using namespace __sanitizer; } // NOLINT
+namespace __xray { using namespace __sanitizer; } // NOLINT
+namespace __interception { using namespace __sanitizer; } // NOLINT
+namespace __hwasan { using namespace __sanitizer; } // NOLINT
+
+
+#endif // SANITIZER_DEFS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_lfstack.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_lfstack.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_lfstack.h (revision 351984)
@@ -0,0 +1,72 @@
+//===-- sanitizer_lfstack.h -=-----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Lock-free stack.
+// Uses 32/17 bits as ABA-counter on 32/64-bit platforms.
+// The memory passed to Push() must not be ever munmap'ed.
+// The type T must contain T *next field.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LFSTACK_H
+#define SANITIZER_LFSTACK_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
+
+namespace __sanitizer {
+
+template<typename T>
+struct LFStack {
+ void Clear() {
+ atomic_store(&head_, 0, memory_order_relaxed);
+ }
+
+ bool Empty() const {
+ return (atomic_load(&head_, memory_order_relaxed) & kPtrMask) == 0;
+ }
+
+ void Push(T *p) {
+ u64 cmp = atomic_load(&head_, memory_order_relaxed);
+ for (;;) {
+ u64 cnt = (cmp & kCounterMask) + kCounterInc;
+ u64 xch = (u64)(uptr)p | cnt;
+ p->next = (T*)(uptr)(cmp & kPtrMask);
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_release))
+ break;
+ }
+ }
+
+ T *Pop() {
+ u64 cmp = atomic_load(&head_, memory_order_acquire);
+ for (;;) {
+ T *cur = (T*)(uptr)(cmp & kPtrMask);
+ if (!cur)
+ return nullptr;
+ T *nxt = cur->next;
+ u64 cnt = (cmp & kCounterMask);
+ u64 xch = (u64)(uptr)nxt | cnt;
+ if (atomic_compare_exchange_weak(&head_, &cmp, xch,
+ memory_order_acquire))
+ return cur;
+ }
+ }
+
+ // private:
+ static const int kCounterBits = FIRST_32_SECOND_64(32, 17);
+ static const u64 kPtrMask = ((u64)-1) >> kCounterBits;
+ static const u64 kCounterMask = ~kPtrMask;
+ static const u64 kCounterInc = kPtrMask + 1;
+
+ atomic_uint64_t head_;
+};
+} // namespace __sanitizer
+
+#endif // SANITIZER_LFSTACK_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libc.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libc.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libc.cc (revision 351984)
@@ -0,0 +1,279 @@
+//===-- sanitizer_libc.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries. See sanitizer_libc.h for details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+s64 internal_atoll(const char *nptr) {
+ return internal_simple_strtoll(nptr, nullptr, 10);
+}
+
+void *internal_memchr(const void *s, int c, uptr n) {
+ const char *t = (const char *)s;
+ for (uptr i = 0; i < n; ++i, ++t)
+ if (*t == c)
+ return reinterpret_cast<void *>(const_cast<char *>(t));
+ return nullptr;
+}
+
+void *internal_memrchr(const void *s, int c, uptr n) {
+ const char *t = (const char *)s;
+ void *res = nullptr;
+ for (uptr i = 0; i < n; ++i, ++t) {
+ if (*t == c) res = reinterpret_cast<void *>(const_cast<char *>(t));
+ }
+ return res;
+}
+
+int internal_memcmp(const void* s1, const void* s2, uptr n) {
+ const char *t1 = (const char *)s1;
+ const char *t2 = (const char *)s2;
+ for (uptr i = 0; i < n; ++i, ++t1, ++t2)
+ if (*t1 != *t2)
+ return *t1 < *t2 ? -1 : 1;
+ return 0;
+}
+
+void *internal_memcpy(void *dest, const void *src, uptr n) {
+ char *d = (char*)dest;
+ const char *s = (const char *)src;
+ for (uptr i = 0; i < n; ++i)
+ d[i] = s[i];
+ return dest;
+}
+
+void *internal_memmove(void *dest, const void *src, uptr n) {
+ char *d = (char*)dest;
+ const char *s = (const char *)src;
+ sptr i, signed_n = (sptr)n;
+ CHECK_GE(signed_n, 0);
+ if (d < s) {
+ for (i = 0; i < signed_n; ++i)
+ d[i] = s[i];
+ } else {
+ if (d > s && signed_n > 0)
+ for (i = signed_n - 1; i >= 0 ; --i) {
+ d[i] = s[i];
+ }
+ }
+ return dest;
+}
+
+void *internal_memset(void* s, int c, uptr n) {
+ // Optimize for the most performance-critical case:
+ if ((reinterpret_cast<uptr>(s) % 16) == 0 && (n % 16) == 0) {
+ u64 *p = reinterpret_cast<u64*>(s);
+ u64 *e = p + n / 8;
+ u64 v = c;
+ v |= v << 8;
+ v |= v << 16;
+ v |= v << 32;
+ for (; p < e; p += 2)
+ p[0] = p[1] = v;
+ return s;
+ }
+ // The next line prevents Clang from making a call to memset() instead of the
+ // loop below.
+ // FIXME: building the runtime with -ffreestanding is a better idea. However
+ // there currently are linktime problems due to PR12396.
+ char volatile *t = (char*)s;
+ for (uptr i = 0; i < n; ++i, ++t) {
+ *t = c;
+ }
+ return s;
+}
+
+uptr internal_strcspn(const char *s, const char *reject) {
+ uptr i;
+ for (i = 0; s[i]; i++) {
+ if (internal_strchr(reject, s[i]))
+ return i;
+ }
+ return i;
+}
+
+char* internal_strdup(const char *s) {
+ uptr len = internal_strlen(s);
+ char *s2 = (char*)InternalAlloc(len + 1);
+ internal_memcpy(s2, s, len);
+ s2[len] = 0;
+ return s2;
+}
+
+int internal_strcmp(const char *s1, const char *s2) {
+ while (true) {
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2) return (c1 < c2) ? -1 : 1;
+ if (c1 == 0) break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+int internal_strncmp(const char *s1, const char *s2, uptr n) {
+ for (uptr i = 0; i < n; i++) {
+ unsigned c1 = *s1;
+ unsigned c2 = *s2;
+ if (c1 != c2) return (c1 < c2) ? -1 : 1;
+ if (c1 == 0) break;
+ s1++;
+ s2++;
+ }
+ return 0;
+}
+
+char* internal_strchr(const char *s, int c) {
+ while (true) {
+ if (*s == (char)c)
+ return const_cast<char *>(s);
+ if (*s == 0)
+ return nullptr;
+ s++;
+ }
+}
+
+char *internal_strchrnul(const char *s, int c) {
+ char *res = internal_strchr(s, c);
+ if (!res)
+ res = const_cast<char *>(s) + internal_strlen(s);
+ return res;
+}
+
+char *internal_strrchr(const char *s, int c) {
+ const char *res = nullptr;
+ for (uptr i = 0; s[i]; i++) {
+ if (s[i] == c) res = s + i;
+ }
+ return const_cast<char *>(res);
+}
+
+uptr internal_strlen(const char *s) {
+ uptr i = 0;
+ while (s[i]) i++;
+ return i;
+}
+
+uptr internal_strlcat(char *dst, const char *src, uptr maxlen) {
+ const uptr srclen = internal_strlen(src);
+ const uptr dstlen = internal_strnlen(dst, maxlen);
+ if (dstlen == maxlen) return maxlen + srclen;
+ if (srclen < maxlen - dstlen) {
+ internal_memmove(dst + dstlen, src, srclen + 1);
+ } else {
+ internal_memmove(dst + dstlen, src, maxlen - dstlen - 1);
+ dst[maxlen - 1] = '\0';
+ }
+ return dstlen + srclen;
+}
+
+char *internal_strncat(char *dst, const char *src, uptr n) {
+ uptr len = internal_strlen(dst);
+ uptr i;
+ for (i = 0; i < n && src[i]; i++)
+ dst[len + i] = src[i];
+ dst[len + i] = 0;
+ return dst;
+}
+
+uptr internal_strlcpy(char *dst, const char *src, uptr maxlen) {
+ const uptr srclen = internal_strlen(src);
+ if (srclen < maxlen) {
+ internal_memmove(dst, src, srclen + 1);
+ } else if (maxlen != 0) {
+ internal_memmove(dst, src, maxlen - 1);
+ dst[maxlen - 1] = '\0';
+ }
+ return srclen;
+}
+
+char *internal_strncpy(char *dst, const char *src, uptr n) {
+ uptr i;
+ for (i = 0; i < n && src[i]; i++)
+ dst[i] = src[i];
+ internal_memset(dst + i, '\0', n - i);
+ return dst;
+}
+
+uptr internal_strnlen(const char *s, uptr maxlen) {
+ uptr i = 0;
+ while (i < maxlen && s[i]) i++;
+ return i;
+}
+
+char *internal_strstr(const char *haystack, const char *needle) {
+ // This is O(N^2), but we are not using it in hot places.
+ uptr len1 = internal_strlen(haystack);
+ uptr len2 = internal_strlen(needle);
+ if (len1 < len2) return nullptr;
+ for (uptr pos = 0; pos <= len1 - len2; pos++) {
+ if (internal_memcmp(haystack + pos, needle, len2) == 0)
+ return const_cast<char *>(haystack) + pos;
+ }
+ return nullptr;
+}
+
+s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base) {
+ CHECK_EQ(base, 10);
+ while (IsSpace(*nptr)) nptr++;
+ int sgn = 1;
+ u64 res = 0;
+ bool have_digits = false;
+ char *old_nptr = const_cast<char *>(nptr);
+ if (*nptr == '+') {
+ sgn = 1;
+ nptr++;
+ } else if (*nptr == '-') {
+ sgn = -1;
+ nptr++;
+ }
+ while (IsDigit(*nptr)) {
+ res = (res <= UINT64_MAX / 10) ? res * 10 : UINT64_MAX;
+ int digit = ((*nptr) - '0');
+ res = (res <= UINT64_MAX - digit) ? res + digit : UINT64_MAX;
+ have_digits = true;
+ nptr++;
+ }
+ if (endptr) {
+ *endptr = (have_digits) ? const_cast<char *>(nptr) : old_nptr;
+ }
+ if (sgn > 0) {
+ return (s64)(Min((u64)INT64_MAX, res));
+ } else {
+ return (res > INT64_MAX) ? INT64_MIN : ((s64)res * -1);
+ }
+}
+
+bool mem_is_zero(const char *beg, uptr size) {
+ CHECK_LE(size, 1ULL << FIRST_32_SECOND_64(30, 40)); // Sanity check.
+ const char *end = beg + size;
+ uptr *aligned_beg = (uptr *)RoundUpTo((uptr)beg, sizeof(uptr));
+ uptr *aligned_end = (uptr *)RoundDownTo((uptr)end, sizeof(uptr));
+ uptr all = 0;
+ // Prologue.
+ for (const char *mem = beg; mem < (char*)aligned_beg && mem < end; mem++)
+ all |= *mem;
+ // Aligned loop.
+ for (; aligned_beg < aligned_end; aligned_beg++)
+ all |= *aligned_beg;
+ // Epilogue.
+ if ((char*)aligned_end >= beg)
+ for (const char *mem = (char*)aligned_end; mem < end; mem++)
+ all |= *mem;
+ return all == 0;
+}
+
+} // namespace __sanitizer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libc.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libc.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libc.h (revision 351984)
@@ -0,0 +1,83 @@
+//===-- sanitizer_libc.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// These tools can not use some of the libc functions directly because those
+// functions are intercepted. Instead, we implement a tiny subset of libc here.
+// FIXME: Some of functions declared in this file are in fact POSIX, not libc.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LIBC_H
+#define SANITIZER_LIBC_H
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers from sanitizer runtime.
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// internal_X() is a custom implementation of X() for use in RTL.
+
+// String functions
+s64 internal_atoll(const char *nptr);
+void *internal_memchr(const void *s, int c, uptr n);
+void *internal_memrchr(const void *s, int c, uptr n);
+int internal_memcmp(const void* s1, const void* s2, uptr n);
+void *internal_memcpy(void *dest, const void *src, uptr n);
+void *internal_memmove(void *dest, const void *src, uptr n);
+// Should not be used in performance-critical places.
+void *internal_memset(void *s, int c, uptr n);
+char* internal_strchr(const char *s, int c);
+char *internal_strchrnul(const char *s, int c);
+int internal_strcmp(const char *s1, const char *s2);
+uptr internal_strcspn(const char *s, const char *reject);
+char *internal_strdup(const char *s);
+uptr internal_strlen(const char *s);
+uptr internal_strlcat(char *dst, const char *src, uptr maxlen);
+char *internal_strncat(char *dst, const char *src, uptr n);
+int internal_strncmp(const char *s1, const char *s2, uptr n);
+uptr internal_strlcpy(char *dst, const char *src, uptr maxlen);
+char *internal_strncpy(char *dst, const char *src, uptr n);
+uptr internal_strnlen(const char *s, uptr maxlen);
+char *internal_strrchr(const char *s, int c);
+char *internal_strstr(const char *haystack, const char *needle);
+// Works only for base=10 and doesn't set errno.
+s64 internal_simple_strtoll(const char *nptr, const char **endptr, int base);
+int internal_snprintf(char *buffer, uptr length, const char *format, ...);
+
+// Return true if all bytes in [mem, mem+size) are zero.
+// Optimized for the case when the result is true.
+bool mem_is_zero(const char *mem, uptr size);
+
+// I/O
+// Define these as macros so we can use them in linker initialized global
+// structs without dynamic initialization.
+#define kInvalidFd ((fd_t)-1)
+#define kStdinFd ((fd_t)0)
+#define kStdoutFd ((fd_t)1)
+#define kStderrFd ((fd_t)2)
+
+uptr internal_ftruncate(fd_t fd, uptr size);
+
+// OS
+void NORETURN internal__exit(int exitcode);
+unsigned int internal_sleep(unsigned int seconds);
+
+uptr internal_getpid();
+uptr internal_getppid();
+
+// Threading
+uptr internal_sched_yield();
+
+// Error handling
+bool internal_iserror(uptr retval, int *rverrno = nullptr);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIBC_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libignore.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libignore.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libignore.cc (revision 351984)
@@ -0,0 +1,129 @@
+//===-- sanitizer_libignore.cc --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
+ SANITIZER_NETBSD || SANITIZER_OPENBSD
+
+#include "sanitizer_libignore.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_posix.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+LibIgnore::LibIgnore(LinkerInitialized) {
+}
+
+void LibIgnore::AddIgnoredLibrary(const char *name_templ) {
+ BlockingMutexLock lock(&mutex_);
+ if (count_ >= kMaxLibs) {
+ Report("%s: too many ignored libraries (max: %d)\n", SanitizerToolName,
+ kMaxLibs);
+ Die();
+ }
+ Lib *lib = &libs_[count_++];
+ lib->templ = internal_strdup(name_templ);
+ lib->name = nullptr;
+ lib->real_name = nullptr;
+ lib->loaded = false;
+}
+
+void LibIgnore::OnLibraryLoaded(const char *name) {
+ BlockingMutexLock lock(&mutex_);
+ // Try to match suppressions with symlink target.
+ InternalScopedString buf(kMaxPathLength);
+ if (name && internal_readlink(name, buf.data(), buf.size() - 1) > 0 &&
+ buf[0]) {
+ for (uptr i = 0; i < count_; i++) {
+ Lib *lib = &libs_[i];
+ if (!lib->loaded && (!lib->real_name) &&
+ TemplateMatch(lib->templ, name))
+ lib->real_name = internal_strdup(buf.data());
+ }
+ }
+
+ // Scan suppressions list and find newly loaded and unloaded libraries.
+ ListOfModules modules;
+ modules.init();
+ for (uptr i = 0; i < count_; i++) {
+ Lib *lib = &libs_[i];
+ bool loaded = false;
+ for (const auto &mod : modules) {
+ for (const auto &range : mod.ranges()) {
+ if (!range.executable)
+ continue;
+ if (!TemplateMatch(lib->templ, mod.full_name()) &&
+ !(lib->real_name &&
+ internal_strcmp(lib->real_name, mod.full_name()) == 0))
+ continue;
+ if (loaded) {
+ Report("%s: called_from_lib suppression '%s' is matched against"
+ " 2 libraries: '%s' and '%s'\n",
+ SanitizerToolName, lib->templ, lib->name, mod.full_name());
+ Die();
+ }
+ loaded = true;
+ if (lib->loaded)
+ continue;
+ VReport(1,
+ "Matched called_from_lib suppression '%s' against library"
+ " '%s'\n",
+ lib->templ, mod.full_name());
+ lib->loaded = true;
+ lib->name = internal_strdup(mod.full_name());
+ const uptr idx =
+ atomic_load(&ignored_ranges_count_, memory_order_relaxed);
+ CHECK_LT(idx, ARRAY_SIZE(ignored_code_ranges_));
+ ignored_code_ranges_[idx].begin = range.beg;
+ ignored_code_ranges_[idx].end = range.end;
+ atomic_store(&ignored_ranges_count_, idx + 1, memory_order_release);
+ break;
+ }
+ }
+ if (lib->loaded && !loaded) {
+ Report("%s: library '%s' that was matched against called_from_lib"
+ " suppression '%s' is unloaded\n",
+ SanitizerToolName, lib->name, lib->templ);
+ Die();
+ }
+ }
+
+ // Track instrumented ranges.
+ if (track_instrumented_libs_) {
+ for (const auto &mod : modules) {
+ if (!mod.instrumented())
+ continue;
+ for (const auto &range : mod.ranges()) {
+ if (!range.executable)
+ continue;
+ if (IsPcInstrumented(range.beg) && IsPcInstrumented(range.end - 1))
+ continue;
+ VReport(1, "Adding instrumented range %p-%p from library '%s'\n",
+ range.beg, range.end, mod.full_name());
+ const uptr idx =
+ atomic_load(&instrumented_ranges_count_, memory_order_relaxed);
+ CHECK_LT(idx, ARRAY_SIZE(instrumented_code_ranges_));
+ instrumented_code_ranges_[idx].begin = range.beg;
+ instrumented_code_ranges_[idx].end = range.end;
+ atomic_store(&instrumented_ranges_count_, idx + 1,
+ memory_order_release);
+ }
+ }
+ }
+}
+
+void LibIgnore::OnLibraryUnloaded() {
+ OnLibraryLoaded(nullptr);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC ||
+ // SANITIZER_NETBSD
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libignore.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libignore.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libignore.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libignore.h (revision 351984)
@@ -0,0 +1,115 @@
+//===-- sanitizer_libignore.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// LibIgnore allows to ignore all interceptors called from a particular set
+// of dynamic libraries. LibIgnore can be initialized with several templates
+// of names of libraries to be ignored. It finds code ranges for the libraries;
+// and checks whether the provided PC value belongs to the code ranges.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LIBIGNORE_H
+#define SANITIZER_LIBIGNORE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+class LibIgnore {
+ public:
+ explicit LibIgnore(LinkerInitialized);
+
+ // Must be called during initialization.
+ void AddIgnoredLibrary(const char *name_templ);
+ void IgnoreNoninstrumentedModules(bool enable) {
+ track_instrumented_libs_ = enable;
+ }
+
+ // Must be called after a new dynamic library is loaded.
+ void OnLibraryLoaded(const char *name);
+
+ // Must be called after a dynamic library is unloaded.
+ void OnLibraryUnloaded();
+
+ // Checks whether the provided PC belongs to one of the ignored libraries or
+ // the PC should be ignored because it belongs to an non-instrumented module
+ // (when ignore_noninstrumented_modules=1). Also returns true via
+ // "pc_in_ignored_lib" if the PC is in an ignored library, false otherwise.
+ bool IsIgnored(uptr pc, bool *pc_in_ignored_lib) const;
+
+ // Checks whether the provided PC belongs to an instrumented module.
+ bool IsPcInstrumented(uptr pc) const;
+
+ private:
+ struct Lib {
+ char *templ;
+ char *name;
+ char *real_name; // target of symlink
+ bool loaded;
+ };
+
+ struct LibCodeRange {
+ uptr begin;
+ uptr end;
+ };
+
+ inline bool IsInRange(uptr pc, const LibCodeRange &range) const {
+ return (pc >= range.begin && pc < range.end);
+ }
+
+ static const uptr kMaxIgnoredRanges = 128;
+ static const uptr kMaxInstrumentedRanges = 1024;
+ static const uptr kMaxLibs = 1024;
+
+ // Hot part:
+ atomic_uintptr_t ignored_ranges_count_;
+ LibCodeRange ignored_code_ranges_[kMaxIgnoredRanges];
+
+ atomic_uintptr_t instrumented_ranges_count_;
+ LibCodeRange instrumented_code_ranges_[kMaxInstrumentedRanges];
+
+ // Cold part:
+ BlockingMutex mutex_;
+ uptr count_;
+ Lib libs_[kMaxLibs];
+ bool track_instrumented_libs_;
+
+ // Disallow copying of LibIgnore objects.
+ LibIgnore(const LibIgnore&); // not implemented
+ void operator = (const LibIgnore&); // not implemented
+};
+
+inline bool LibIgnore::IsIgnored(uptr pc, bool *pc_in_ignored_lib) const {
+ const uptr n = atomic_load(&ignored_ranges_count_, memory_order_acquire);
+ for (uptr i = 0; i < n; i++) {
+ if (IsInRange(pc, ignored_code_ranges_[i])) {
+ *pc_in_ignored_lib = true;
+ return true;
+ }
+ }
+ *pc_in_ignored_lib = false;
+ if (track_instrumented_libs_ && !IsPcInstrumented(pc))
+ return true;
+ return false;
+}
+
+inline bool LibIgnore::IsPcInstrumented(uptr pc) const {
+ const uptr n = atomic_load(&instrumented_ranges_count_, memory_order_acquire);
+ for (uptr i = 0; i < n; i++) {
+ if (IsInRange(pc, instrumented_code_ranges_[i]))
+ return true;
+ }
+ return false;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIBIGNORE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_libignore.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux.h (revision 351984)
@@ -0,0 +1,160 @@
+//===-- sanitizer_linux.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Linux-specific syscall wrappers and classes.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LINUX_H
+#define SANITIZER_LINUX_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_freebsd.h"
+#include "sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_platform_limits_openbsd.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_platform_limits_solaris.h"
+#include "sanitizer_posix.h"
+
+struct link_map; // Opaque type returned by dlopen().
+
+namespace __sanitizer {
+// Dirent structure for getdents(). Note that this structure is different from
+// the one in <dirent.h>, which is used by readdir().
+struct linux_dirent;
+
+struct ProcSelfMapsBuff {
+ char *data;
+ uptr mmaped_size;
+ uptr len;
+};
+
+struct MemoryMappingLayoutData {
+ ProcSelfMapsBuff proc_self_maps;
+ const char *current;
+};
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps);
+
+// Syscall wrappers.
+uptr internal_getdents(fd_t fd, struct linux_dirent *dirp, unsigned int count);
+uptr internal_sigaltstack(const void* ss, void* oss);
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset);
+uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp);
+
+// Linux-only syscalls.
+#if SANITIZER_LINUX
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5);
+// Used only by sanitizer_stoptheworld. Signal handlers that are actually used
+// (like the process-wide error reporting SEGV handler) must use
+// internal_sigaction instead.
+int internal_sigaction_norestorer(int signum, const void *act, void *oldact);
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+#if defined(__x86_64__) || defined(__mips__) || defined(__aarch64__) \
+ || defined(__powerpc64__) || defined(__s390__) || defined(__i386__) \
+ || defined(__arm__)
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr);
+#endif
+#elif SANITIZER_FREEBSD
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+#elif SANITIZER_NETBSD
+void internal_sigdelset(__sanitizer_sigset_t *set, int signum);
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg);
+#endif // SANITIZER_LINUX
+
+// This class reads thread IDs from /proc/<pid>/task using only syscalls.
+class ThreadLister {
+ public:
+ explicit ThreadLister(pid_t pid);
+ ~ThreadLister();
+ enum Result {
+ Error,
+ Incomplete,
+ Ok,
+ };
+ Result ListThreads(InternalMmapVector<tid_t> *threads);
+
+ private:
+ bool IsAlive(int tid);
+
+ pid_t pid_;
+ int descriptor_ = -1;
+ InternalMmapVector<char> buffer_;
+};
+
+// Exposed for testing.
+uptr ThreadDescriptorSize();
+uptr ThreadSelf();
+uptr ThreadSelfOffset();
+
+// Matches a library's file name against a base name (stripping path and version
+// information).
+bool LibraryNameIs(const char *full_name, const char *base_name);
+
+// Call cb for each region mapped by map.
+void ForEachMappedRegion(link_map *map, void (*cb)(const void *, uptr));
+
+// Releases memory pages entirely within the [beg, end] address range.
+// The pages no longer count toward RSS; reads are guaranteed to return 0.
+// Requires (but does not verify!) that pages are MAP_PRIVATE.
+INLINE void ReleaseMemoryPagesToOSAndZeroFill(uptr beg, uptr end) {
+ // man madvise on Linux promises zero-fill for anonymous private pages.
+ // Testing shows the same behaviour for private (but not anonymous) mappings
+ // of shm_open() files, as long as the underlying file is untouched.
+ CHECK(SANITIZER_LINUX);
+ ReleaseMemoryPagesToOS(beg, end);
+}
+
+#if SANITIZER_ANDROID
+
+#if defined(__aarch64__)
+# define __get_tls() \
+ ({ void** __v; __asm__("mrs %0, tpidr_el0" : "=r"(__v)); __v; })
+#elif defined(__arm__)
+# define __get_tls() \
+ ({ void** __v; __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); __v; })
+#elif defined(__mips__)
+// On mips32r1, this goes via a kernel illegal instruction trap that's
+// optimized for v1.
+# define __get_tls() \
+ ({ register void** __v asm("v1"); \
+ __asm__(".set push\n" \
+ ".set mips32r2\n" \
+ "rdhwr %0,$29\n" \
+ ".set pop\n" : "=r"(__v)); \
+ __v; })
+#elif defined(__i386__)
+# define __get_tls() \
+ ({ void** __v; __asm__("movl %%gs:0, %0" : "=r"(__v)); __v; })
+#elif defined(__x86_64__)
+# define __get_tls() \
+ ({ void** __v; __asm__("mov %%fs:0, %0" : "=r"(__v)); __v; })
+#else
+#error "Unsupported architecture."
+#endif
+
+// The Android Bionic team has allocated a TLS slot for sanitizers starting
+// with Q, given that Android currently doesn't support ELF TLS. It is used to
+// store sanitizer thread specific data.
+static const int TLS_SLOT_SANITIZER = 6;
+
+ALWAYS_INLINE uptr *get_android_tls_ptr() {
+ return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
+}
+
+#endif // SANITIZER_ANDROID
+
+} // namespace __sanitizer
+
+#endif
+#endif // SANITIZER_LINUX_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux_libcdep.cc (revision 351984)
@@ -0,0 +1,850 @@
+//===-- sanitizer_linux_libcdep.cc ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_freebsd.h"
+#include "sanitizer_getauxval.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#include <dlfcn.h> // for dlsym()
+#include <link.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sys/resource.h>
+#include <syslog.h>
+
+#if SANITIZER_FREEBSD
+#include <pthread_np.h>
+#include <osreldate.h>
+#include <sys/sysctl.h>
+#define pthread_getattr_np pthread_attr_get_np
+#endif
+
+#if SANITIZER_OPENBSD
+#include <pthread_np.h>
+#include <sys/sysctl.h>
+#endif
+
+#if SANITIZER_NETBSD
+#include <sys/sysctl.h>
+#include <sys/tls.h>
+#endif
+
+#if SANITIZER_SOLARIS
+#include <stdlib.h>
+#include <thread.h>
+#endif
+
+#if SANITIZER_ANDROID
+#include <android/api-level.h>
+#if !defined(CPU_COUNT) && !defined(__aarch64__)
+#include <dirent.h>
+#include <fcntl.h>
+struct __sanitizer::linux_dirent {
+ long d_ino;
+ off_t d_off;
+ unsigned short d_reclen;
+ char d_name[];
+};
+#endif
+#endif
+
+#if !SANITIZER_ANDROID
+#include <elf.h>
+#include <unistd.h>
+#endif
+
+namespace __sanitizer {
+
+SANITIZER_WEAK_ATTRIBUTE int
+real_sigaction(int signum, const void *act, void *oldact);
+
+int internal_sigaction(int signum, const void *act, void *oldact) {
+#if !SANITIZER_GO
+ if (&real_sigaction)
+ return real_sigaction(signum, act, oldact);
+#endif
+ return sigaction(signum, (const struct sigaction *)act,
+ (struct sigaction *)oldact);
+}
+
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ if (at_initialization) {
+ // This is the main thread. Libpthread may not be initialized yet.
+ struct rlimit rl;
+ CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
+
+ // Find the mapping that contains a stack variable.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ if (proc_maps.Error()) {
+ *stack_top = *stack_bottom = 0;
+ return;
+ }
+ MemoryMappedSegment segment;
+ uptr prev_end = 0;
+ while (proc_maps.Next(&segment)) {
+ if ((uptr)&rl < segment.end) break;
+ prev_end = segment.end;
+ }
+ CHECK((uptr)&rl >= segment.start && (uptr)&rl < segment.end);
+
+ // Get stacksize from rlimit, but clip it so that it does not overlap
+ // with other mappings.
+ uptr stacksize = rl.rlim_cur;
+ if (stacksize > segment.end - prev_end) stacksize = segment.end - prev_end;
+ // When running with unlimited stack size, we still want to set some limit.
+ // The unlimited stack size is caused by 'ulimit -s unlimited'.
+ // Also, for some reason, GNU make spawns subprocesses with unlimited stack.
+ if (stacksize > kMaxThreadStackSize)
+ stacksize = kMaxThreadStackSize;
+ *stack_top = segment.end;
+ *stack_bottom = segment.end - stacksize;
+ return;
+ }
+ uptr stacksize = 0;
+ void *stackaddr = nullptr;
+#if SANITIZER_SOLARIS
+ stack_t ss;
+ CHECK_EQ(thr_stksegment(&ss), 0);
+ stacksize = ss.ss_size;
+ stackaddr = (char *)ss.ss_sp - stacksize;
+#elif SANITIZER_OPENBSD
+ stack_t sattr;
+ CHECK_EQ(pthread_stackseg_np(pthread_self(), &sattr), 0);
+ stackaddr = sattr.ss_sp;
+ stacksize = sattr.ss_size;
+#else // !SANITIZER_SOLARIS
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
+ my_pthread_attr_getstack(&attr, &stackaddr, &stacksize);
+ pthread_attr_destroy(&attr);
+#endif // SANITIZER_SOLARIS
+
+ *stack_top = (uptr)stackaddr + stacksize;
+ *stack_bottom = (uptr)stackaddr;
+}
+
+#if !SANITIZER_GO
+bool SetEnv(const char *name, const char *value) {
+ void *f = dlsym(RTLD_NEXT, "setenv");
+ if (!f)
+ return false;
+ typedef int(*setenv_ft)(const char *name, const char *value, int overwrite);
+ setenv_ft setenv_f;
+ CHECK_EQ(sizeof(setenv_f), sizeof(f));
+ internal_memcpy(&setenv_f, &f, sizeof(f));
+ return setenv_f(name, value, 1) == 0;
+}
+#endif
+
+__attribute__((unused)) static bool GetLibcVersion(int *major, int *minor,
+ int *patch) {
+#ifdef _CS_GNU_LIBC_VERSION
+ char buf[64];
+ uptr len = confstr(_CS_GNU_LIBC_VERSION, buf, sizeof(buf));
+ if (len >= sizeof(buf))
+ return false;
+ buf[len] = 0;
+ static const char kGLibC[] = "glibc ";
+ if (internal_strncmp(buf, kGLibC, sizeof(kGLibC) - 1) != 0)
+ return false;
+ const char *p = buf + sizeof(kGLibC) - 1;
+ *major = internal_simple_strtoll(p, &p, 10);
+ *minor = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
+ *patch = (*p == '.') ? internal_simple_strtoll(p + 1, &p, 10) : 0;
+ return true;
+#else
+ return false;
+#endif
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO && \
+ !SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_SOLARIS
+static uptr g_tls_size;
+
+#ifdef __i386__
+# ifndef __GLIBC_PREREQ
+# define CHECK_GET_TLS_STATIC_INFO_VERSION 1
+# else
+# define CHECK_GET_TLS_STATIC_INFO_VERSION (!__GLIBC_PREREQ(2, 27))
+# endif
+#else
+# define CHECK_GET_TLS_STATIC_INFO_VERSION 0
+#endif
+
+#if CHECK_GET_TLS_STATIC_INFO_VERSION
+# define DL_INTERNAL_FUNCTION __attribute__((regparm(3), stdcall))
+#else
+# define DL_INTERNAL_FUNCTION
+#endif
+
+namespace {
+struct GetTlsStaticInfoCall {
+ typedef void (*get_tls_func)(size_t*, size_t*);
+};
+struct GetTlsStaticInfoRegparmCall {
+ typedef void (*get_tls_func)(size_t*, size_t*) DL_INTERNAL_FUNCTION;
+};
+
+template <typename T>
+void CallGetTls(void* ptr, size_t* size, size_t* align) {
+ typename T::get_tls_func get_tls;
+ CHECK_EQ(sizeof(get_tls), sizeof(ptr));
+ internal_memcpy(&get_tls, &ptr, sizeof(ptr));
+ CHECK_NE(get_tls, 0);
+ get_tls(size, align);
+}
+
+bool CmpLibcVersion(int major, int minor, int patch) {
+ int ma;
+ int mi;
+ int pa;
+ if (!GetLibcVersion(&ma, &mi, &pa))
+ return false;
+ if (ma > major)
+ return true;
+ if (ma < major)
+ return false;
+ if (mi > minor)
+ return true;
+ if (mi < minor)
+ return false;
+ return pa >= patch;
+}
+
+} // namespace
+
+void InitTlsSize() {
+ // all current supported platforms have 16 bytes stack alignment
+ const size_t kStackAlign = 16;
+ void *get_tls_static_info_ptr = dlsym(RTLD_NEXT, "_dl_get_tls_static_info");
+ size_t tls_size = 0;
+ size_t tls_align = 0;
+ // On i?86, _dl_get_tls_static_info used to be internal_function, i.e.
+ // __attribute__((regparm(3), stdcall)) before glibc 2.27 and is normal
+ // function in 2.27 and later.
+ if (CHECK_GET_TLS_STATIC_INFO_VERSION && !CmpLibcVersion(2, 27, 0))
+ CallGetTls<GetTlsStaticInfoRegparmCall>(get_tls_static_info_ptr,
+ &tls_size, &tls_align);
+ else
+ CallGetTls<GetTlsStaticInfoCall>(get_tls_static_info_ptr,
+ &tls_size, &tls_align);
+ if (tls_align < kStackAlign)
+ tls_align = kStackAlign;
+ g_tls_size = RoundUpTo(tls_size, tls_align);
+}
+#else
+void InitTlsSize() { }
+#endif // !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_GO &&
+ // !SANITIZER_NETBSD && !SANITIZER_SOLARIS
+
+#if (defined(__x86_64__) || defined(__i386__) || defined(__mips__) || \
+ defined(__aarch64__) || defined(__powerpc64__) || defined(__s390__) || \
+ defined(__arm__)) && \
+ SANITIZER_LINUX && !SANITIZER_ANDROID
+// sizeof(struct pthread) from glibc.
+static atomic_uintptr_t thread_descriptor_size;
+
+uptr ThreadDescriptorSize() {
+ uptr val = atomic_load_relaxed(&thread_descriptor_size);
+ if (val)
+ return val;
+#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
+ int major;
+ int minor;
+ int patch;
+ if (GetLibcVersion(&major, &minor, &patch) && major == 2) {
+ /* sizeof(struct pthread) values from various glibc versions. */
+ if (SANITIZER_X32)
+ val = 1728; // Assume only one particular version for x32.
+ // For ARM sizeof(struct pthread) changed in Glibc 2.23.
+ else if (SANITIZER_ARM)
+ val = minor <= 22 ? 1120 : 1216;
+ else if (minor <= 3)
+ val = FIRST_32_SECOND_64(1104, 1696);
+ else if (minor == 4)
+ val = FIRST_32_SECOND_64(1120, 1728);
+ else if (minor == 5)
+ val = FIRST_32_SECOND_64(1136, 1728);
+ else if (minor <= 9)
+ val = FIRST_32_SECOND_64(1136, 1712);
+ else if (minor == 10)
+ val = FIRST_32_SECOND_64(1168, 1776);
+ else if (minor == 11 || (minor == 12 && patch == 1))
+ val = FIRST_32_SECOND_64(1168, 2288);
+ else if (minor <= 14)
+ val = FIRST_32_SECOND_64(1168, 2304);
+ else
+ val = FIRST_32_SECOND_64(1216, 2304);
+ }
+#elif defined(__mips__)
+ // TODO(sagarthakur): add more values as per different glibc versions.
+ val = FIRST_32_SECOND_64(1152, 1776);
+#elif defined(__aarch64__)
+ // The sizeof (struct pthread) is the same from GLIBC 2.17 to 2.22.
+ val = 1776;
+#elif defined(__powerpc64__)
+ val = 1776; // from glibc.ppc64le 2.20-8.fc21
+#elif defined(__s390__)
+ val = FIRST_32_SECOND_64(1152, 1776); // valid for glibc 2.22
+#endif
+ if (val)
+ atomic_store_relaxed(&thread_descriptor_size, val);
+ return val;
+}
+
+// The offset at which pointer to self is located in the thread descriptor.
+const uptr kThreadSelfOffset = FIRST_32_SECOND_64(8, 16);
+
+uptr ThreadSelfOffset() {
+ return kThreadSelfOffset;
+}
+
+#if defined(__mips__) || defined(__powerpc64__)
+// TlsPreTcbSize includes size of struct pthread_descr and size of tcb
+// head structure. It lies before the static tls blocks.
+static uptr TlsPreTcbSize() {
+# if defined(__mips__)
+ const uptr kTcbHead = 16; // sizeof (tcbhead_t)
+# elif defined(__powerpc64__)
+ const uptr kTcbHead = 88; // sizeof (tcbhead_t)
+# endif
+ const uptr kTlsAlign = 16;
+ const uptr kTlsPreTcbSize =
+ RoundUpTo(ThreadDescriptorSize() + kTcbHead, kTlsAlign);
+ return kTlsPreTcbSize;
+}
+#endif
+
+uptr ThreadSelf() {
+ uptr descr_addr;
+# if defined(__i386__)
+ asm("mov %%gs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
+# elif defined(__x86_64__)
+ asm("mov %%fs:%c1,%0" : "=r"(descr_addr) : "i"(kThreadSelfOffset));
+# elif defined(__mips__)
+ // MIPS uses TLS variant I. The thread pointer (in hardware register $29)
+ // points to the end of the TCB + 0x7000. The pthread_descr structure is
+ // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
+ // TCB and the size of pthread_descr.
+ const uptr kTlsTcbOffset = 0x7000;
+ uptr thread_pointer;
+ asm volatile(".set push;\
+ .set mips64r2;\
+ rdhwr %0,$29;\
+ .set pop" : "=r" (thread_pointer));
+ descr_addr = thread_pointer - kTlsTcbOffset - TlsPreTcbSize();
+# elif defined(__aarch64__) || defined(__arm__)
+ descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer()) -
+ ThreadDescriptorSize();
+# elif defined(__s390__)
+ descr_addr = reinterpret_cast<uptr>(__builtin_thread_pointer());
+# elif defined(__powerpc64__)
+ // PPC64LE uses TLS variant I. The thread pointer (in GPR 13)
+ // points to the end of the TCB + 0x7000. The pthread_descr structure is
+ // immediately in front of the TCB. TlsPreTcbSize() includes the size of the
+ // TCB and the size of pthread_descr.
+ const uptr kTlsTcbOffset = 0x7000;
+ uptr thread_pointer;
+ asm("addi %0,13,%1" : "=r"(thread_pointer) : "I"(-kTlsTcbOffset));
+ descr_addr = thread_pointer - TlsPreTcbSize();
+# else
+# error "unsupported CPU arch"
+# endif
+ return descr_addr;
+}
+#endif // (x86_64 || i386 || MIPS) && SANITIZER_LINUX
+
+#if SANITIZER_FREEBSD
+static void **ThreadSelfSegbase() {
+ void **segbase = 0;
+# if defined(__i386__)
+ // sysarch(I386_GET_GSBASE, segbase);
+ __asm __volatile("mov %%gs:0, %0" : "=r" (segbase));
+# elif defined(__x86_64__)
+ // sysarch(AMD64_GET_FSBASE, segbase);
+ __asm __volatile("movq %%fs:0, %0" : "=r" (segbase));
+# else
+# error "unsupported CPU arch"
+# endif
+ return segbase;
+}
+
+uptr ThreadSelf() {
+ return (uptr)ThreadSelfSegbase()[2];
+}
+#endif // SANITIZER_FREEBSD
+
+#if SANITIZER_NETBSD
+static struct tls_tcb * ThreadSelfTlsTcb() {
+ struct tls_tcb * tcb;
+# ifdef __HAVE___LWP_GETTCB_FAST
+ tcb = (struct tls_tcb *)__lwp_gettcb_fast();
+# elif defined(__HAVE___LWP_GETPRIVATE_FAST)
+ tcb = (struct tls_tcb *)__lwp_getprivate_fast();
+# endif
+ return tcb;
+}
+
+uptr ThreadSelf() {
+ return (uptr)ThreadSelfTlsTcb()->tcb_pthread;
+}
+
+int GetSizeFromHdr(struct dl_phdr_info *info, size_t size, void *data) {
+ const Elf_Phdr *hdr = info->dlpi_phdr;
+ const Elf_Phdr *last_hdr = hdr + info->dlpi_phnum;
+
+ for (; hdr != last_hdr; ++hdr) {
+ if (hdr->p_type == PT_TLS && info->dlpi_tls_modid == 1) {
+ *(uptr*)data = hdr->p_memsz;
+ break;
+ }
+ }
+ return 0;
+}
+#endif // SANITIZER_NETBSD
+
+#if !SANITIZER_GO
+static void GetTls(uptr *addr, uptr *size) {
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# if defined(__x86_64__) || defined(__i386__) || defined(__s390__)
+ *addr = ThreadSelf();
+ *size = GetTlsSize();
+ *addr -= *size;
+ *addr += ThreadDescriptorSize();
+# elif defined(__mips__) || defined(__aarch64__) || defined(__powerpc64__) \
+ || defined(__arm__)
+ *addr = ThreadSelf();
+ *size = GetTlsSize();
+# else
+ *addr = 0;
+ *size = 0;
+# endif
+#elif SANITIZER_FREEBSD
+ void** segbase = ThreadSelfSegbase();
+ *addr = 0;
+ *size = 0;
+ if (segbase != 0) {
+ // tcbalign = 16
+ // tls_size = round(tls_static_space, tcbalign);
+ // dtv = segbase[1];
+ // dtv[2] = segbase - tls_static_space;
+ void **dtv = (void**) segbase[1];
+ *addr = (uptr) dtv[2];
+ *size = (*addr == 0) ? 0 : ((uptr) segbase[0] - (uptr) dtv[2]);
+ }
+#elif SANITIZER_NETBSD
+ struct tls_tcb * const tcb = ThreadSelfTlsTcb();
+ *addr = 0;
+ *size = 0;
+ if (tcb != 0) {
+ // Find size (p_memsz) of dlpi_tls_modid 1 (TLS block of the main program).
+ // ld.elf_so hardcodes the index 1.
+ dl_iterate_phdr(GetSizeFromHdr, size);
+
+ if (*size != 0) {
+ // The block has been found and tcb_dtv[1] contains the base address
+ *addr = (uptr)tcb->tcb_dtv[1];
+ }
+ }
+#elif SANITIZER_OPENBSD
+ *addr = 0;
+ *size = 0;
+#elif SANITIZER_ANDROID
+ *addr = 0;
+ *size = 0;
+#elif SANITIZER_SOLARIS
+ // FIXME
+ *addr = 0;
+ *size = 0;
+#else
+# error "Unknown OS"
+#endif
+}
+#endif
+
+#if !SANITIZER_GO
+uptr GetTlsSize() {
+#if SANITIZER_FREEBSD || SANITIZER_ANDROID || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+ uptr addr, size;
+ GetTls(&addr, &size);
+ return size;
+#elif defined(__mips__) || defined(__powerpc64__)
+ return RoundUpTo(g_tls_size + TlsPreTcbSize(), 16);
+#else
+ return g_tls_size;
+#endif
+}
+#endif
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+#if SANITIZER_GO
+ // Stub implementation for Go.
+ *stk_addr = *stk_size = *tls_addr = *tls_size = 0;
+#else
+ GetTls(tls_addr, tls_size);
+
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+
+ if (!main) {
+ // If stack and tls intersect, make them non-intersecting.
+ if (*tls_addr > *stk_addr && *tls_addr < *stk_addr + *stk_size) {
+ CHECK_GT(*tls_addr + *tls_size, *stk_addr);
+ CHECK_LE(*tls_addr + *tls_size, *stk_addr + *stk_size);
+ *stk_size -= *tls_size;
+ *tls_addr = *stk_addr + *stk_size;
+ }
+ }
+#endif
+}
+
+#if !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+typedef ElfW(Phdr) Elf_Phdr;
+#elif SANITIZER_WORDSIZE == 32 && __FreeBSD_version <= 902001 // v9.2
+#define Elf_Phdr XElf32_Phdr
+#define dl_phdr_info xdl_phdr_info
+#define dl_iterate_phdr(c, b) xdl_iterate_phdr((c), (b))
+#endif // !SANITIZER_FREEBSD && !SANITIZER_OPENBSD
+
+struct DlIteratePhdrData {
+ InternalMmapVectorNoCtor<LoadedModule> *modules;
+ bool first;
+};
+
+static int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *arg) {
+ DlIteratePhdrData *data = (DlIteratePhdrData*)arg;
+ InternalScopedString module_name(kMaxPathLength);
+ if (data->first) {
+ data->first = false;
+ // First module is the binary itself.
+ ReadBinaryNameCached(module_name.data(), module_name.size());
+ } else if (info->dlpi_name) {
+ module_name.append("%s", info->dlpi_name);
+ }
+ if (module_name[0] == '\0')
+ return 0;
+ LoadedModule cur_module;
+ cur_module.set(module_name.data(), info->dlpi_addr);
+ for (int i = 0; i < (int)info->dlpi_phnum; i++) {
+ const Elf_Phdr *phdr = &info->dlpi_phdr[i];
+ if (phdr->p_type == PT_LOAD) {
+ uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
+ uptr cur_end = cur_beg + phdr->p_memsz;
+ bool executable = phdr->p_flags & PF_X;
+ bool writable = phdr->p_flags & PF_W;
+ cur_module.addAddressRange(cur_beg, cur_end, executable,
+ writable);
+ }
+ }
+ data->modules->push_back(cur_module);
+ return 0;
+}
+
+#if SANITIZER_ANDROID && __ANDROID_API__ < 21
+extern "C" __attribute__((weak)) int dl_iterate_phdr(
+ int (*)(struct dl_phdr_info *, size_t, void *), void *);
+#endif
+
+static bool requiresProcmaps() {
+#if SANITIZER_ANDROID && __ANDROID_API__ <= 22
+ // Fall back to /proc/maps if dl_iterate_phdr is unavailable or broken.
+ // The runtime check allows the same library to work with
+ // both K and L (and future) Android releases.
+ return AndroidGetApiLevel() <= ANDROID_LOLLIPOP_MR1;
+#else
+ return false;
+#endif
+}
+
+static void procmapsInit(InternalMmapVectorNoCtor<LoadedModule> *modules) {
+ MemoryMappingLayout memory_mapping(/*cache_enabled*/true);
+ memory_mapping.DumpListOfModules(modules);
+}
+
+void ListOfModules::init() {
+ clearOrInit();
+ if (requiresProcmaps()) {
+ procmapsInit(&modules_);
+ } else {
+ DlIteratePhdrData data = {&modules_, true};
+ dl_iterate_phdr(dl_iterate_phdr_cb, &data);
+ }
+}
+
+// When a custom loader is used, dl_iterate_phdr may not contain the full
+// list of modules. Allow callers to fall back to using procmaps.
+void ListOfModules::fallbackInit() {
+ if (!requiresProcmaps()) {
+ clearOrInit();
+ procmapsInit(&modules_);
+ } else {
+ clear();
+ }
+}
+
+// getrusage does not give us the current RSS, only the max RSS.
+// Still, this is better than nothing if /proc/self/statm is not available
+// for some reason, e.g. due to a sandbox.
+static uptr GetRSSFromGetrusage() {
+ struct rusage usage;
+ if (getrusage(RUSAGE_SELF, &usage)) // Failed, probably due to a sandbox.
+ return 0;
+ return usage.ru_maxrss << 10; // ru_maxrss is in Kb.
+}
+
+uptr GetRSS() {
+ if (!common_flags()->can_use_proc_maps_statm)
+ return GetRSSFromGetrusage();
+ fd_t fd = OpenFile("/proc/self/statm", RdOnly);
+ if (fd == kInvalidFd)
+ return GetRSSFromGetrusage();
+ char buf[64];
+ uptr len = internal_read(fd, buf, sizeof(buf) - 1);
+ internal_close(fd);
+ if ((sptr)len <= 0)
+ return 0;
+ buf[len] = 0;
+ // The format of the file is:
+ // 1084 89 69 11 0 79 0
+ // We need the second number which is RSS in pages.
+ char *pos = buf;
+ // Skip the first number.
+ while (*pos >= '0' && *pos <= '9')
+ pos++;
+ // Skip whitespaces.
+ while (!(*pos >= '0' && *pos <= '9') && *pos != 0)
+ pos++;
+ // Read the number.
+ uptr rss = 0;
+ while (*pos >= '0' && *pos <= '9')
+ rss = rss * 10 + *pos++ - '0';
+ return rss * GetPageSizeCached();
+}
+
+// sysconf(_SC_NPROCESSORS_{CONF,ONLN}) cannot be used on most platforms as
+// they allocate memory.
+u32 GetNumberOfCPUs() {
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
+ u32 ncpu;
+ int req[2];
+ uptr len = sizeof(ncpu);
+ req[0] = CTL_HW;
+ req[1] = HW_NCPU;
+ CHECK_EQ(internal_sysctl(req, 2, &ncpu, &len, NULL, 0), 0);
+ return ncpu;
+#elif SANITIZER_ANDROID && !defined(CPU_COUNT) && !defined(__aarch64__)
+ // Fall back to /sys/devices/system/cpu on Android when cpu_set_t doesn't
+ // exist in sched.h. That is the case for toolchains generated with older
+ // NDKs.
+ // This code doesn't work on AArch64 because internal_getdents makes use of
+ // the 64bit getdents syscall, but cpu_set_t seems to always exist on AArch64.
+ uptr fd = internal_open("/sys/devices/system/cpu", O_RDONLY | O_DIRECTORY);
+ if (internal_iserror(fd))
+ return 0;
+ InternalMmapVector<u8> buffer(4096);
+ uptr bytes_read = buffer.size();
+ uptr n_cpus = 0;
+ u8 *d_type;
+ struct linux_dirent *entry = (struct linux_dirent *)&buffer[bytes_read];
+ while (true) {
+ if ((u8 *)entry >= &buffer[bytes_read]) {
+ bytes_read = internal_getdents(fd, (struct linux_dirent *)buffer.data(),
+ buffer.size());
+ if (internal_iserror(bytes_read) || !bytes_read)
+ break;
+ entry = (struct linux_dirent *)buffer.data();
+ }
+ d_type = (u8 *)entry + entry->d_reclen - 1;
+ if (d_type >= &buffer[bytes_read] ||
+ (u8 *)&entry->d_name[3] >= &buffer[bytes_read])
+ break;
+ if (entry->d_ino != 0 && *d_type == DT_DIR) {
+ if (entry->d_name[0] == 'c' && entry->d_name[1] == 'p' &&
+ entry->d_name[2] == 'u' &&
+ entry->d_name[3] >= '0' && entry->d_name[3] <= '9')
+ n_cpus++;
+ }
+ entry = (struct linux_dirent *)(((u8 *)entry) + entry->d_reclen);
+ }
+ internal_close(fd);
+ return n_cpus;
+#elif SANITIZER_SOLARIS
+ return sysconf(_SC_NPROCESSORS_ONLN);
+#else
+ cpu_set_t CPUs;
+ CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
+ return CPU_COUNT(&CPUs);
+#endif
+}
+
+#if SANITIZER_LINUX
+
+# if SANITIZER_ANDROID
+static atomic_uint8_t android_log_initialized;
+
+void AndroidLogInit() {
+ openlog(GetProcessName(), 0, LOG_USER);
+ atomic_store(&android_log_initialized, 1, memory_order_release);
+}
+
+static bool ShouldLogAfterPrintf() {
+ return atomic_load(&android_log_initialized, memory_order_acquire);
+}
+
+extern "C" SANITIZER_WEAK_ATTRIBUTE
+int async_safe_write_log(int pri, const char* tag, const char* msg);
+extern "C" SANITIZER_WEAK_ATTRIBUTE
+int __android_log_write(int prio, const char* tag, const char* msg);
+
+// ANDROID_LOG_INFO is 4, but can't be resolved at runtime.
+#define SANITIZER_ANDROID_LOG_INFO 4
+
+// async_safe_write_log is a new public version of __libc_write_log that is
+// used behind syslog. It is preferable to syslog as it will not do any dynamic
+// memory allocation or formatting.
+// If the function is not available, syslog is preferred for L+ (it was broken
+// pre-L) as __android_log_write triggers a racey behavior with the strncpy
+// interceptor. Fallback to __android_log_write pre-L.
+void WriteOneLineToSyslog(const char *s) {
+ if (&async_safe_write_log) {
+ async_safe_write_log(SANITIZER_ANDROID_LOG_INFO, GetProcessName(), s);
+ } else if (AndroidGetApiLevel() > ANDROID_KITKAT) {
+ syslog(LOG_INFO, "%s", s);
+ } else {
+ CHECK(&__android_log_write);
+ __android_log_write(SANITIZER_ANDROID_LOG_INFO, nullptr, s);
+ }
+}
+
+extern "C" SANITIZER_WEAK_ATTRIBUTE
+void android_set_abort_message(const char *);
+
+void SetAbortMessage(const char *str) {
+ if (&android_set_abort_message)
+ android_set_abort_message(str);
+}
+# else
+void AndroidLogInit() {}
+
+static bool ShouldLogAfterPrintf() { return true; }
+
+void WriteOneLineToSyslog(const char *s) { syslog(LOG_INFO, "%s", s); }
+
+void SetAbortMessage(const char *str) {}
+# endif // SANITIZER_ANDROID
+
+void LogMessageOnPrintf(const char *str) {
+ if (common_flags()->log_to_syslog && ShouldLogAfterPrintf())
+ WriteToSyslog(str);
+}
+
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_GO
+// glibc crashes when using clock_gettime from a preinit_array function as the
+// vDSO function pointers haven't been initialized yet. __progname is
+// initialized after the vDSO function pointers, so if it exists, is not null
+// and is not empty, we can use clock_gettime.
+extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
+INLINE bool CanUseVDSO() {
+ // Bionic is safe, it checks for the vDSO function pointers to be initialized.
+ if (SANITIZER_ANDROID)
+ return true;
+ if (&__progname && __progname && *__progname)
+ return true;
+ return false;
+}
+
+// MonotonicNanoTime is a timing function that can leverage the vDSO by calling
+// clock_gettime. real_clock_gettime only exists if clock_gettime is
+// intercepted, so define it weakly and use it if available.
+extern "C" SANITIZER_WEAK_ATTRIBUTE
+int real_clock_gettime(u32 clk_id, void *tp);
+u64 MonotonicNanoTime() {
+ timespec ts;
+ if (CanUseVDSO()) {
+ if (&real_clock_gettime)
+ real_clock_gettime(CLOCK_MONOTONIC, &ts);
+ else
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ } else {
+ internal_clock_gettime(CLOCK_MONOTONIC, &ts);
+ }
+ return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
+}
+#else
+// Non-Linux & Go always use the syscall.
+u64 MonotonicNanoTime() {
+ timespec ts;
+ internal_clock_gettime(CLOCK_MONOTONIC, &ts);
+ return (u64)ts.tv_sec * (1000ULL * 1000 * 1000) + ts.tv_nsec;
+}
+#endif // SANITIZER_LINUX && !SANITIZER_GO
+
+#if !SANITIZER_OPENBSD
+void ReExec() {
+ const char *pathname = "/proc/self/exe";
+
+#if SANITIZER_NETBSD
+ static const int name[] = {
+ CTL_KERN,
+ KERN_PROC_ARGS,
+ -1,
+ KERN_PROC_PATHNAME,
+ };
+ char path[400];
+ uptr len;
+
+ len = sizeof(path);
+ if (internal_sysctl(name, ARRAY_SIZE(name), path, &len, NULL, 0) != -1)
+ pathname = path;
+#elif SANITIZER_SOLARIS
+ pathname = getexecname();
+ CHECK_NE(pathname, NULL);
+#elif SANITIZER_USE_GETAUXVAL
+ // Calling execve with /proc/self/exe sets that as $EXEC_ORIGIN. Binaries that
+ // rely on that will fail to load shared libraries. Query AT_EXECFN instead.
+ pathname = reinterpret_cast<const char *>(getauxval(AT_EXECFN));
+#endif
+
+ uptr rv = internal_execve(pathname, GetArgv(), GetEnviron());
+ int rverrno;
+ CHECK_EQ(internal_iserror(rv, &rverrno), true);
+ Printf("execve failed, errno %d\n", rverrno);
+ Die();
+}
+#endif // !SANITIZER_OPENBSD
+
+} // namespace __sanitizer
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux_s390.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux_s390.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux_s390.cc (revision 351984)
@@ -0,0 +1,221 @@
+//===-- sanitizer_linux_s390.cc -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements s390-linux-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX && SANITIZER_S390
+
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+
+#include <errno.h>
+#include <sys/syscall.h>
+#include <sys/utsname.h>
+#include <unistd.h>
+
+namespace __sanitizer {
+
+// --------------- sanitizer_libc.h
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+ OFF_T offset) {
+ struct s390_mmap_params {
+ unsigned long addr;
+ unsigned long length;
+ unsigned long prot;
+ unsigned long flags;
+ unsigned long fd;
+ unsigned long offset;
+ } params = {
+ (unsigned long)addr,
+ (unsigned long)length,
+ (unsigned long)prot,
+ (unsigned long)flags,
+ (unsigned long)fd,
+# ifdef __s390x__
+ (unsigned long)offset,
+# else
+ (unsigned long)(offset / 4096),
+# endif
+ };
+# ifdef __s390x__
+ return syscall(__NR_mmap, &params);
+# else
+ return syscall(__NR_mmap2, &params);
+# endif
+}
+
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags, void *arg,
+ int *parent_tidptr, void *newtls, int *child_tidptr) {
+ if (!fn || !child_stack)
+ return -EINVAL;
+ CHECK_EQ(0, (uptr)child_stack % 16);
+ // Minimum frame size.
+#ifdef __s390x__
+ child_stack = (char *)child_stack - 160;
+#else
+ child_stack = (char *)child_stack - 96;
+#endif
+ // Terminate unwind chain.
+ ((unsigned long *)child_stack)[0] = 0;
+ // And pass parameters.
+ ((unsigned long *)child_stack)[1] = (uptr)fn;
+ ((unsigned long *)child_stack)[2] = (uptr)arg;
+ register long res __asm__("r2");
+ register void *__cstack __asm__("r2") = child_stack;
+ register int __flags __asm__("r3") = flags;
+ register int * __ptidptr __asm__("r4") = parent_tidptr;
+ register int * __ctidptr __asm__("r5") = child_tidptr;
+ register void * __newtls __asm__("r6") = newtls;
+
+ __asm__ __volatile__(
+ /* Clone. */
+ "svc %1\n"
+
+ /* if (%r2 != 0)
+ * return;
+ */
+#ifdef __s390x__
+ "cghi %%r2, 0\n"
+#else
+ "chi %%r2, 0\n"
+#endif
+ "jne 1f\n"
+
+ /* Call "fn(arg)". */
+#ifdef __s390x__
+ "lmg %%r1, %%r2, 8(%%r15)\n"
+#else
+ "lm %%r1, %%r2, 4(%%r15)\n"
+#endif
+ "basr %%r14, %%r1\n"
+
+ /* Call _exit(%r2). */
+ "svc %2\n"
+
+ /* Return to parent. */
+ "1:\n"
+ : "=r" (res)
+ : "i"(__NR_clone), "i"(__NR_exit),
+ "r"(__cstack),
+ "r"(__flags),
+ "r"(__ptidptr),
+ "r"(__ctidptr),
+ "r"(__newtls)
+ : "memory", "cc");
+ return res;
+}
+
+#if SANITIZER_S390_64
+static bool FixedCVE_2016_2143() {
+ // Try to determine if the running kernel has a fix for CVE-2016-2143,
+ // return false if in doubt (better safe than sorry). Distros may want to
+ // adjust this for their own kernels.
+ struct utsname buf;
+ unsigned int major, minor, patch = 0;
+ // This should never fail, but just in case...
+ if (uname(&buf))
+ return false;
+ const char *ptr = buf.release;
+ major = internal_simple_strtoll(ptr, &ptr, 10);
+ // At least first 2 should be matched.
+ if (ptr[0] != '.')
+ return false;
+ minor = internal_simple_strtoll(ptr+1, &ptr, 10);
+ // Third is optional.
+ if (ptr[0] == '.')
+ patch = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (major < 3) {
+ if (major == 2 && minor == 6 && patch == 32 && ptr[0] == '-' &&
+ internal_strstr(ptr, ".el6")) {
+ // Check RHEL6
+ int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r1 >= 657) // 2.6.32-657.el6 or later
+ return true;
+ if (r1 == 642 && ptr[0] == '.') {
+ int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r2 >= 9) // 2.6.32-642.9.1.el6 or later
+ return true;
+ }
+ }
+ // <3.0 is bad.
+ return false;
+ } else if (major == 3) {
+ // 3.2.79+ is OK.
+ if (minor == 2 && patch >= 79)
+ return true;
+ // 3.12.58+ is OK.
+ if (minor == 12 && patch >= 58)
+ return true;
+ if (minor == 10 && patch == 0 && ptr[0] == '-' &&
+ internal_strstr(ptr, ".el7")) {
+ // Check RHEL7
+ int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r1 >= 426) // 3.10.0-426.el7 or later
+ return true;
+ if (r1 == 327 && ptr[0] == '.') {
+ int r2 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r2 >= 27) // 3.10.0-327.27.1.el7 or later
+ return true;
+ }
+ }
+ // Otherwise, bad.
+ return false;
+ } else if (major == 4) {
+ // 4.1.21+ is OK.
+ if (minor == 1 && patch >= 21)
+ return true;
+ // 4.4.6+ is OK.
+ if (minor == 4 && patch >= 6)
+ return true;
+ if (minor == 4 && patch == 0 && ptr[0] == '-' &&
+ internal_strstr(buf.version, "Ubuntu")) {
+ // Check Ubuntu 16.04
+ int r1 = internal_simple_strtoll(ptr+1, &ptr, 10);
+ if (r1 >= 13) // 4.4.0-13 or later
+ return true;
+ }
+ // Otherwise, OK if 4.5+.
+ return minor >= 5;
+ } else {
+ // Linux 5 and up are fine.
+ return true;
+ }
+}
+
+void AvoidCVE_2016_2143() {
+ // Older kernels are affected by CVE-2016-2143 - they will crash hard
+ // if someone uses 4-level page tables (ie. virtual addresses >= 4TB)
+ // and fork() in the same process. Unfortunately, sanitizers tend to
+ // require such addresses. Since this is very likely to crash the whole
+ // machine (sanitizers themselves use fork() for llvm-symbolizer, for one),
+ // abort the process at initialization instead.
+ if (FixedCVE_2016_2143())
+ return;
+ if (GetEnv("SANITIZER_IGNORE_CVE_2016_2143"))
+ return;
+ Report(
+ "ERROR: Your kernel seems to be vulnerable to CVE-2016-2143. Using ASan,\n"
+ "MSan, TSan, DFSan or LSan with such kernel can and will crash your\n"
+ "machine, or worse.\n"
+ "\n"
+ "If you are certain your kernel is not vulnerable (you have compiled it\n"
+ "yourself, or are using an unrecognized distribution kernel), you can\n"
+ "override this safety check by exporting SANITIZER_IGNORE_CVE_2016_2143\n"
+ "with any value.\n");
+ Die();
+}
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX && SANITIZER_S390
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_linux_s390.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_list.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_list.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_list.h (revision 351984)
@@ -0,0 +1,166 @@
+//===-- sanitizer_list.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains implementation of a list class to be used by
+// ThreadSanitizer, etc run-times.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_LIST_H
+#define SANITIZER_LIST_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// Intrusive singly-linked list with size(), push_back(), push_front()
+// pop_front(), append_front() and append_back().
+// This class should be a POD (so that it can be put into TLS)
+// and an object with all zero fields should represent a valid empty list.
+// This class does not have a CTOR, so clear() should be called on all
+// non-zero-initialized objects before using.
+template<class Item>
+struct IntrusiveList {
+ friend class Iterator;
+
+ void clear() {
+ first_ = last_ = nullptr;
+ size_ = 0;
+ }
+
+ bool empty() const { return size_ == 0; }
+ uptr size() const { return size_; }
+
+ void push_back(Item *x) {
+ if (empty()) {
+ x->next = nullptr;
+ first_ = last_ = x;
+ size_ = 1;
+ } else {
+ x->next = nullptr;
+ last_->next = x;
+ last_ = x;
+ size_++;
+ }
+ }
+
+ void push_front(Item *x) {
+ if (empty()) {
+ x->next = nullptr;
+ first_ = last_ = x;
+ size_ = 1;
+ } else {
+ x->next = first_;
+ first_ = x;
+ size_++;
+ }
+ }
+
+ void pop_front() {
+ CHECK(!empty());
+ first_ = first_->next;
+ if (!first_)
+ last_ = nullptr;
+ size_--;
+ }
+
+ void extract(Item *prev, Item *x) {
+ CHECK(!empty());
+ CHECK_NE(prev, nullptr);
+ CHECK_NE(x, nullptr);
+ CHECK_EQ(prev->next, x);
+ prev->next = x->next;
+ if (last_ == x)
+ last_ = prev;
+ size_--;
+ }
+
+ Item *front() { return first_; }
+ const Item *front() const { return first_; }
+ Item *back() { return last_; }
+ const Item *back() const { return last_; }
+
+ void append_front(IntrusiveList<Item> *l) {
+ CHECK_NE(this, l);
+ if (l->empty())
+ return;
+ if (empty()) {
+ *this = *l;
+ } else if (!l->empty()) {
+ l->last_->next = first_;
+ first_ = l->first_;
+ size_ += l->size();
+ }
+ l->clear();
+ }
+
+ void append_back(IntrusiveList<Item> *l) {
+ CHECK_NE(this, l);
+ if (l->empty())
+ return;
+ if (empty()) {
+ *this = *l;
+ } else {
+ last_->next = l->first_;
+ last_ = l->last_;
+ size_ += l->size();
+ }
+ l->clear();
+ }
+
+ void CheckConsistency() {
+ if (size_ == 0) {
+ CHECK_EQ(first_, 0);
+ CHECK_EQ(last_, 0);
+ } else {
+ uptr count = 0;
+ for (Item *i = first_; ; i = i->next) {
+ count++;
+ if (i == last_) break;
+ }
+ CHECK_EQ(size(), count);
+ CHECK_EQ(last_->next, 0);
+ }
+ }
+
+ template<class ItemTy>
+ class IteratorBase {
+ public:
+ explicit IteratorBase(ItemTy *current) : current_(current) {}
+ IteratorBase &operator++() {
+ current_ = current_->next;
+ return *this;
+ }
+ bool operator!=(IteratorBase other) const {
+ return current_ != other.current_;
+ }
+ ItemTy &operator*() {
+ return *current_;
+ }
+ private:
+ ItemTy *current_;
+ };
+
+ typedef IteratorBase<Item> Iterator;
+ typedef IteratorBase<const Item> ConstIterator;
+
+ Iterator begin() { return Iterator(first_); }
+ Iterator end() { return Iterator(0); }
+
+ ConstIterator begin() const { return ConstIterator(first_); }
+ ConstIterator end() const { return ConstIterator(0); }
+
+// private, don't use directly.
+ uptr size_;
+ Item *first_;
+ Item *last_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LIST_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_local_address_space_view.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_local_address_space_view.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_local_address_space_view.h (revision 351984)
@@ -0,0 +1,76 @@
+//===-- sanitizer_local_address_space_view.h --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// `LocalAddressSpaceView` provides the local (i.e. target and current address
+// space are the same) implementation of the `AddressSpaveView` interface which
+// provides a simple interface to load memory from another process (i.e.
+// out-of-process)
+//
+// The `AddressSpaceView` interface requires that the type can be used as a
+// template parameter to objects that wish to be able to operate in an
+// out-of-process manner. In normal usage, objects are in-process and are thus
+// instantiated with the `LocalAddressSpaceView` type. This type is used to
+// load any pointers in instance methods. This implementation is effectively
+// a no-op. When an object is to be used in an out-of-process manner it is
+// instansiated with the `RemoteAddressSpaceView` type.
+//
+// By making `AddressSpaceView` a template parameter of an object, it can
+// change its implementation at compile time which has no run time overhead.
+// This also allows unifying in-process and out-of-process code which avoids
+// code duplication.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LOCAL_ADDRES_SPACE_VIEW_H
+#define SANITIZER_LOCAL_ADDRES_SPACE_VIEW_H
+
+namespace __sanitizer {
+struct LocalAddressSpaceView {
+ // Load memory `sizeof(T) * num_elements` bytes of memory from the target
+ // process (always local for this implementation) starting at address
+ // `target_address`. The local copy of this memory is returned as a pointer.
+ // The caller should not write to this memory. The behaviour when doing so is
+ // undefined. Callers should use `LoadWritable()` to get access to memory
+ // that is writable.
+ //
+ // The lifetime of loaded memory is implementation defined.
+ template <typename T>
+ static const T *Load(const T *target_address, uptr num_elements = 1) {
+ // The target address space is the local address space so
+ // nothing needs to be copied. Just return the pointer.
+ return target_address;
+ }
+
+ // Load memory `sizeof(T) * num_elements` bytes of memory from the target
+ // process (always local for this implementation) starting at address
+ // `target_address`. The local copy of this memory is returned as a pointer.
+ // The memory returned may be written to.
+ //
+ // Writes made to the returned memory will be visible in the memory returned
+ // by subsequent `Load()` or `LoadWritable()` calls provided the
+ // `target_address` parameter is the same. It is not guaranteed that the
+ // memory returned by previous calls to `Load()` will contain any performed
+ // writes. If two or more overlapping regions of memory are loaded via
+ // separate calls to `LoadWritable()`, it is implementation defined whether
+ // writes made to the region returned by one call are visible in the regions
+ // returned by other calls.
+ //
+ // Given the above it is recommended to load the largest possible object
+ // that requires modification (e.g. a class) rather than individual fields
+ // from a class to avoid issues with overlapping writable regions.
+ //
+ // The lifetime of loaded memory is implementation defined.
+ template <typename T>
+ static T *LoadWritable(T *target_address, uptr num_elements = 1) {
+ // The target address space is the local address space so
+ // nothing needs to be copied. Just return the pointer.
+ return target_address;
+ }
+};
+} // namespace __sanitizer
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac.cc (revision 351984)
@@ -0,0 +1,1135 @@
+//===-- sanitizer_mac.cc --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements OSX-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_mac.h"
+
+// Use 64-bit inodes in file operations. ASan does not support OS X 10.5, so
+// the clients will most certainly use 64-bit ones as well.
+#ifndef _DARWIN_USE_64_BIT_INODE
+#define _DARWIN_USE_64_BIT_INODE 1
+#endif
+#include <stdio.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_procmaps.h"
+
+#if !SANITIZER_IOS
+#include <crt_externs.h> // for _NSGetEnviron
+#else
+extern char **environ;
+#endif
+
+#if defined(__has_include) && __has_include(<os/trace.h>)
+#define SANITIZER_OS_TRACE 1
+#include <os/trace.h>
+#else
+#define SANITIZER_OS_TRACE 0
+#endif
+
+#if !SANITIZER_IOS
+#include <crt_externs.h> // for _NSGetArgv and _NSGetEnviron
+#else
+extern "C" {
+ extern char ***_NSGetArgv(void);
+}
+#endif
+
+#include <asl.h>
+#include <dlfcn.h> // for dladdr()
+#include <errno.h>
+#include <fcntl.h>
+#include <libkern/OSAtomic.h>
+#include <mach-o/dyld.h>
+#include <mach/mach.h>
+#include <mach/mach_time.h>
+#include <mach/vm_statistics.h>
+#include <malloc/malloc.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <util.h>
+
+// From <crt_externs.h>, but we don't have that file on iOS.
+extern "C" {
+ extern char ***_NSGetArgv(void);
+ extern char ***_NSGetEnviron(void);
+}
+
+// From <mach/mach_vm.h>, but we don't have that file on iOS.
+extern "C" {
+ extern kern_return_t mach_vm_region_recurse(
+ vm_map_t target_task,
+ mach_vm_address_t *address,
+ mach_vm_size_t *size,
+ natural_t *nesting_depth,
+ vm_region_recurse_info_t info,
+ mach_msg_type_number_t *infoCnt);
+}
+
+namespace __sanitizer {
+
+#include "sanitizer_syscall_generic.inc"
+
+// Direct syscalls, don't call libmalloc hooks (but not available on 10.6).
+extern "C" void *__mmap(void *addr, size_t len, int prot, int flags, int fildes,
+ off_t off) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int __munmap(void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
+
+// ---------------------- sanitizer_libc.h
+
+// From <mach/vm_statistics.h>, but not on older OSs.
+#ifndef VM_MEMORY_SANITIZER
+#define VM_MEMORY_SANITIZER 99
+#endif
+
+// XNU on Darwin provides a mmap flag that optimizes allocation/deallocation of
+// giant memory regions (i.e. shadow memory regions).
+#define kXnuFastMmapFd 0x4
+static size_t kXnuFastMmapThreshold = 2 << 30; // 2 GB
+static bool use_xnu_fast_mmap = false;
+
+uptr internal_mmap(void *addr, size_t length, int prot, int flags,
+ int fd, u64 offset) {
+ if (fd == -1) {
+ fd = VM_MAKE_TAG(VM_MEMORY_SANITIZER);
+ if (length >= kXnuFastMmapThreshold) {
+ if (use_xnu_fast_mmap) fd |= kXnuFastMmapFd;
+ }
+ }
+ if (&__mmap) return (uptr)__mmap(addr, length, prot, flags, fd, offset);
+ return (uptr)mmap(addr, length, prot, flags, fd, offset);
+}
+
+uptr internal_munmap(void *addr, uptr length) {
+ if (&__munmap) return __munmap(addr, length);
+ return munmap(addr, length);
+}
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+ return mprotect(addr, length, prot);
+}
+
+uptr internal_close(fd_t fd) {
+ return close(fd);
+}
+
+uptr internal_open(const char *filename, int flags) {
+ return open(filename, flags);
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+ return open(filename, flags, mode);
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ return read(fd, buf, count);
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ return write(fd, buf, count);
+}
+
+uptr internal_stat(const char *path, void *buf) {
+ return stat(path, (struct stat *)buf);
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+ return lstat(path, (struct stat *)buf);
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+ return fstat(fd, (struct stat *)buf);
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+uptr internal_dup(int oldfd) {
+ return dup(oldfd);
+}
+
+uptr internal_dup2(int oldfd, int newfd) {
+ return dup2(oldfd, newfd);
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+ return readlink(path, buf, bufsize);
+}
+
+uptr internal_unlink(const char *path) {
+ return unlink(path);
+}
+
+uptr internal_sched_yield() {
+ return sched_yield();
+}
+
+void internal__exit(int exitcode) {
+ _exit(exitcode);
+}
+
+unsigned int internal_sleep(unsigned int seconds) {
+ return sleep(seconds);
+}
+
+uptr internal_getpid() {
+ return getpid();
+}
+
+int internal_sigaction(int signum, const void *act, void *oldact) {
+ return sigaction(signum,
+ (const struct sigaction *)act, (struct sigaction *)oldact);
+}
+
+void internal_sigfillset(__sanitizer_sigset_t *set) { sigfillset(set); }
+
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ // Don't use sigprocmask here, because it affects all threads.
+ return pthread_sigmask(how, set, oldset);
+}
+
+// Doesn't call pthread_atfork() handlers (but not available on 10.6).
+extern "C" pid_t __fork(void) SANITIZER_WEAK_ATTRIBUTE;
+
+int internal_fork() {
+ if (&__fork)
+ return __fork();
+ return fork();
+}
+
+int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
+ uptr *oldlenp, const void *newp, uptr newlen) {
+ return sysctl(const_cast<int *>(name), namelen, oldp, (size_t *)oldlenp,
+ const_cast<void *>(newp), (size_t)newlen);
+}
+
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen) {
+ return sysctlbyname(sname, oldp, (size_t *)oldlenp, const_cast<void *>(newp),
+ (size_t)newlen);
+}
+
+int internal_forkpty(int *aparent) {
+ int parent, worker;
+ if (openpty(&parent, &worker, nullptr, nullptr, nullptr) == -1) return -1;
+ int pid = internal_fork();
+ if (pid == -1) {
+ close(parent);
+ close(worker);
+ return -1;
+ }
+ if (pid == 0) {
+ close(parent);
+ if (login_tty(worker) != 0) {
+ // We already forked, there's not much we can do. Let's quit.
+ Report("login_tty failed (errno %d)\n", errno);
+ internal__exit(1);
+ }
+ } else {
+ *aparent = parent;
+ close(worker);
+ }
+ return pid;
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+ return rename(oldpath, newpath);
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ return ftruncate(fd, size);
+}
+
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]) {
+ return execve(filename, argv, envp);
+}
+
+uptr internal_waitpid(int pid, int *status, int options) {
+ return waitpid(pid, status, options);
+}
+
+// ----------------- sanitizer_common.h
+bool FileExists(const char *filename) {
+ if (ShouldMockFailureToOpen(filename))
+ return false;
+ struct stat st;
+ if (stat(filename, &st))
+ return false;
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+tid_t GetTid() {
+ tid_t tid;
+ pthread_threadid_np(nullptr, &tid);
+ return tid;
+}
+
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ uptr stacksize = pthread_get_stacksize_np(pthread_self());
+ // pthread_get_stacksize_np() returns an incorrect stack size for the main
+ // thread on Mavericks. See
+ // https://github.com/google/sanitizers/issues/261
+ if ((GetMacosVersion() >= MACOS_VERSION_MAVERICKS) && at_initialization &&
+ stacksize == (1 << 19)) {
+ struct rlimit rl;
+ CHECK_EQ(getrlimit(RLIMIT_STACK, &rl), 0);
+ // Most often rl.rlim_cur will be the desired 8M.
+ if (rl.rlim_cur < kMaxThreadStackSize) {
+ stacksize = rl.rlim_cur;
+ } else {
+ stacksize = kMaxThreadStackSize;
+ }
+ }
+ void *stackaddr = pthread_get_stackaddr_np(pthread_self());
+ *stack_top = (uptr)stackaddr;
+ *stack_bottom = *stack_top - stacksize;
+}
+
+char **GetEnviron() {
+#if !SANITIZER_IOS
+ char ***env_ptr = _NSGetEnviron();
+ if (!env_ptr) {
+ Report("_NSGetEnviron() returned NULL. Please make sure __asan_init() is "
+ "called after libSystem_initializer().\n");
+ CHECK(env_ptr);
+ }
+ char **environ = *env_ptr;
+#endif
+ CHECK(environ);
+ return environ;
+}
+
+const char *GetEnv(const char *name) {
+ char **env = GetEnviron();
+ uptr name_len = internal_strlen(name);
+ while (*env != 0) {
+ uptr len = internal_strlen(*env);
+ if (len > name_len) {
+ const char *p = *env;
+ if (!internal_memcmp(p, name, name_len) &&
+ p[name_len] == '=') { // Match.
+ return *env + name_len + 1; // String starting after =.
+ }
+ }
+ env++;
+ }
+ return 0;
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ CHECK_LE(kMaxPathLength, buf_len);
+
+ // On OS X the executable path is saved to the stack by dyld. Reading it
+ // from there is much faster than calling dladdr, especially for large
+ // binaries with symbols.
+ InternalScopedString exe_path(kMaxPathLength);
+ uint32_t size = exe_path.size();
+ if (_NSGetExecutablePath(exe_path.data(), &size) == 0 &&
+ realpath(exe_path.data(), buf) != 0) {
+ return internal_strlen(buf);
+ }
+ return 0;
+}
+
+uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
+ return ReadBinaryName(buf, buf_len);
+}
+
+void ReExec() {
+ UNIMPLEMENTED();
+}
+
+void CheckASLR() {
+ // Do nothing
+}
+
+void CheckMPROTECT() {
+ // Do nothing
+}
+
+uptr GetPageSize() {
+ return sysconf(_SC_PAGESIZE);
+}
+
+extern "C" unsigned malloc_num_zones;
+extern "C" malloc_zone_t **malloc_zones;
+malloc_zone_t sanitizer_zone;
+
+// We need to make sure that sanitizer_zone is registered as malloc_zones[0]. If
+// libmalloc tries to set up a different zone as malloc_zones[0], it will call
+// mprotect(malloc_zones, ..., PROT_READ). This interceptor will catch that and
+// make sure we are still the first (default) zone.
+void MprotectMallocZones(void *addr, int prot) {
+ if (addr == malloc_zones && prot == PROT_READ) {
+ if (malloc_num_zones > 1 && malloc_zones[0] != &sanitizer_zone) {
+ for (unsigned i = 1; i < malloc_num_zones; i++) {
+ if (malloc_zones[i] == &sanitizer_zone) {
+ // Swap malloc_zones[0] and malloc_zones[i].
+ malloc_zones[i] = malloc_zones[0];
+ malloc_zones[0] = &sanitizer_zone;
+ break;
+ }
+ }
+ }
+ }
+}
+
+BlockingMutex::BlockingMutex() {
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ CHECK(sizeof(OSSpinLock) <= sizeof(opaque_storage_));
+ CHECK_EQ(OS_SPINLOCK_INIT, 0);
+ CHECK_EQ(owner_, 0);
+ OSSpinLockLock((OSSpinLock*)&opaque_storage_);
+}
+
+void BlockingMutex::Unlock() {
+ OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
+}
+
+void BlockingMutex::CheckLocked() {
+ CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
+}
+
+u64 NanoTime() {
+ timeval tv;
+ internal_memset(&tv, 0, sizeof(tv));
+ gettimeofday(&tv, 0);
+ return (u64)tv.tv_sec * 1000*1000*1000 + tv.tv_usec * 1000;
+}
+
+// This needs to be called during initialization to avoid being racy.
+u64 MonotonicNanoTime() {
+ static mach_timebase_info_data_t timebase_info;
+ if (timebase_info.denom == 0) mach_timebase_info(&timebase_info);
+ return (mach_absolute_time() * timebase_info.numer) / timebase_info.denom;
+}
+
+uptr GetTlsSize() {
+ return 0;
+}
+
+void InitTlsSize() {
+}
+
+uptr TlsBaseAddr() {
+ uptr segbase = 0;
+#if defined(__x86_64__)
+ asm("movq %%gs:0,%0" : "=r"(segbase));
+#elif defined(__i386__)
+ asm("movl %%gs:0,%0" : "=r"(segbase));
+#endif
+ return segbase;
+}
+
+// The size of the tls on darwin does not appear to be well documented,
+// however the vm memory map suggests that it is 1024 uptrs in size,
+// with a size of 0x2000 bytes on x86_64 and 0x1000 bytes on i386.
+uptr TlsSize() {
+#if defined(__x86_64__) || defined(__i386__)
+ return 1024 * sizeof(uptr);
+#else
+ return 0;
+#endif
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+#if !SANITIZER_GO
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+ *tls_addr = TlsBaseAddr();
+ *tls_size = TlsSize();
+#else
+ *stk_addr = 0;
+ *stk_size = 0;
+ *tls_addr = 0;
+ *tls_size = 0;
+#endif
+}
+
+void ListOfModules::init() {
+ clearOrInit();
+ MemoryMappingLayout memory_mapping(false);
+ memory_mapping.DumpListOfModules(&modules_);
+}
+
+void ListOfModules::fallbackInit() { clear(); }
+
+static HandleSignalMode GetHandleSignalModeImpl(int signum) {
+ switch (signum) {
+ case SIGABRT:
+ return common_flags()->handle_abort;
+ case SIGILL:
+ return common_flags()->handle_sigill;
+ case SIGTRAP:
+ return common_flags()->handle_sigtrap;
+ case SIGFPE:
+ return common_flags()->handle_sigfpe;
+ case SIGSEGV:
+ return common_flags()->handle_segv;
+ case SIGBUS:
+ return common_flags()->handle_sigbus;
+ }
+ return kHandleSignalNo;
+}
+
+HandleSignalMode GetHandleSignalMode(int signum) {
+ // Handling fatal signals on watchOS and tvOS devices is disallowed.
+ if ((SANITIZER_WATCHOS || SANITIZER_TVOS) && !(SANITIZER_IOSSIM))
+ return kHandleSignalNo;
+ HandleSignalMode result = GetHandleSignalModeImpl(signum);
+ if (result == kHandleSignalYes && !common_flags()->allow_user_segv_handler)
+ return kHandleSignalExclusive;
+ return result;
+}
+
+MacosVersion cached_macos_version = MACOS_VERSION_UNINITIALIZED;
+
+MacosVersion GetMacosVersionInternal() {
+ int mib[2] = { CTL_KERN, KERN_OSRELEASE };
+ char version[100];
+ uptr len = 0, maxlen = sizeof(version) / sizeof(version[0]);
+ for (uptr i = 0; i < maxlen; i++) version[i] = '\0';
+ // Get the version length.
+ CHECK_NE(internal_sysctl(mib, 2, 0, &len, 0, 0), -1);
+ CHECK_LT(len, maxlen);
+ CHECK_NE(internal_sysctl(mib, 2, version, &len, 0, 0), -1);
+
+ // Expect <major>.<minor>(.<patch>)
+ CHECK_GE(len, 3);
+ const char *p = version;
+ int major = internal_simple_strtoll(p, &p, /*base=*/10);
+ if (*p != '.') return MACOS_VERSION_UNKNOWN;
+ p += 1;
+ int minor = internal_simple_strtoll(p, &p, /*base=*/10);
+ if (*p != '.') return MACOS_VERSION_UNKNOWN;
+
+ switch (major) {
+ case 9: return MACOS_VERSION_LEOPARD;
+ case 10: return MACOS_VERSION_SNOW_LEOPARD;
+ case 11: return MACOS_VERSION_LION;
+ case 12: return MACOS_VERSION_MOUNTAIN_LION;
+ case 13: return MACOS_VERSION_MAVERICKS;
+ case 14: return MACOS_VERSION_YOSEMITE;
+ case 15: return MACOS_VERSION_EL_CAPITAN;
+ case 16: return MACOS_VERSION_SIERRA;
+ case 17:
+ // Not a typo, 17.5 Darwin Kernel Version maps to High Sierra 10.13.4.
+ if (minor >= 5)
+ return MACOS_VERSION_HIGH_SIERRA_DOT_RELEASE_4;
+ return MACOS_VERSION_HIGH_SIERRA;
+ case 18: return MACOS_VERSION_MOJAVE;
+ case 19: return MACOS_VERSION_CATALINA;
+ default:
+ if (major < 9) return MACOS_VERSION_UNKNOWN;
+ return MACOS_VERSION_UNKNOWN_NEWER;
+ }
+}
+
+MacosVersion GetMacosVersion() {
+ atomic_uint32_t *cache =
+ reinterpret_cast<atomic_uint32_t*>(&cached_macos_version);
+ MacosVersion result =
+ static_cast<MacosVersion>(atomic_load(cache, memory_order_acquire));
+ if (result == MACOS_VERSION_UNINITIALIZED) {
+ result = GetMacosVersionInternal();
+ atomic_store(cache, result, memory_order_release);
+ }
+ return result;
+}
+
+bool PlatformHasDifferentMemcpyAndMemmove() {
+ // On OS X 10.7 memcpy() and memmove() are both resolved
+ // into memmove$VARIANT$sse42.
+ // See also https://github.com/google/sanitizers/issues/34.
+ // TODO(glider): need to check dynamically that memcpy() and memmove() are
+ // actually the same function.
+ return GetMacosVersion() == MACOS_VERSION_SNOW_LEOPARD;
+}
+
+uptr GetRSS() {
+ struct task_basic_info info;
+ unsigned count = TASK_BASIC_INFO_COUNT;
+ kern_return_t result =
+ task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &count);
+ if (UNLIKELY(result != KERN_SUCCESS)) {
+ Report("Cannot get task info. Error: %d\n", result);
+ Die();
+ }
+ return info.resident_size;
+}
+
+void *internal_start_thread(void(*func)(void *arg), void *arg) {
+ // Start the thread with signals blocked, otherwise it can steal user signals.
+ __sanitizer_sigset_t set, old;
+ internal_sigfillset(&set);
+ internal_sigprocmask(SIG_SETMASK, &set, &old);
+ pthread_t th;
+ pthread_create(&th, 0, (void*(*)(void *arg))func, arg);
+ internal_sigprocmask(SIG_SETMASK, &old, 0);
+ return th;
+}
+
+void internal_join_thread(void *th) { pthread_join((pthread_t)th, 0); }
+
+#if !SANITIZER_GO
+static BlockingMutex syslog_lock(LINKER_INITIALIZED);
+#endif
+
+void WriteOneLineToSyslog(const char *s) {
+#if !SANITIZER_GO
+ syslog_lock.CheckLocked();
+ asl_log(nullptr, nullptr, ASL_LEVEL_ERR, "%s", s);
+#endif
+}
+
+void LogMessageOnPrintf(const char *str) {
+ // Log all printf output to CrashLog.
+ if (common_flags()->abort_on_error)
+ CRAppendCrashLogMessage(str);
+}
+
+void LogFullErrorReport(const char *buffer) {
+#if !SANITIZER_GO
+ // Log with os_trace. This will make it into the crash log.
+#if SANITIZER_OS_TRACE
+ if (GetMacosVersion() >= MACOS_VERSION_YOSEMITE) {
+ // os_trace requires the message (format parameter) to be a string literal.
+ if (internal_strncmp(SanitizerToolName, "AddressSanitizer",
+ sizeof("AddressSanitizer") - 1) == 0)
+ os_trace("Address Sanitizer reported a failure.");
+ else if (internal_strncmp(SanitizerToolName, "UndefinedBehaviorSanitizer",
+ sizeof("UndefinedBehaviorSanitizer") - 1) == 0)
+ os_trace("Undefined Behavior Sanitizer reported a failure.");
+ else if (internal_strncmp(SanitizerToolName, "ThreadSanitizer",
+ sizeof("ThreadSanitizer") - 1) == 0)
+ os_trace("Thread Sanitizer reported a failure.");
+ else
+ os_trace("Sanitizer tool reported a failure.");
+
+ if (common_flags()->log_to_syslog)
+ os_trace("Consult syslog for more information.");
+ }
+#endif
+
+ // Log to syslog.
+ // The logging on OS X may call pthread_create so we need the threading
+ // environment to be fully initialized. Also, this should never be called when
+ // holding the thread registry lock since that may result in a deadlock. If
+ // the reporting thread holds the thread registry mutex, and asl_log waits
+ // for GCD to dispatch a new thread, the process will deadlock, because the
+ // pthread_create wrapper needs to acquire the lock as well.
+ BlockingMutexLock l(&syslog_lock);
+ if (common_flags()->log_to_syslog)
+ WriteToSyslog(buffer);
+
+ // The report is added to CrashLog as part of logging all of Printf output.
+#endif
+}
+
+SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
+#if defined(__x86_64__) || defined(__i386__)
+ ucontext_t *ucontext = static_cast<ucontext_t*>(context);
+ return ucontext->uc_mcontext->__es.__err & 2 /*T_PF_WRITE*/ ? WRITE : READ;
+#else
+ return UNKNOWN;
+#endif
+}
+
+static void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
+ ucontext_t *ucontext = (ucontext_t*)context;
+# if defined(__aarch64__)
+ *pc = ucontext->uc_mcontext->__ss.__pc;
+# if defined(__IPHONE_8_0) && __IPHONE_OS_VERSION_MAX_ALLOWED >= __IPHONE_8_0
+ *bp = ucontext->uc_mcontext->__ss.__fp;
+# else
+ *bp = ucontext->uc_mcontext->__ss.__lr;
+# endif
+ *sp = ucontext->uc_mcontext->__ss.__sp;
+# elif defined(__x86_64__)
+ *pc = ucontext->uc_mcontext->__ss.__rip;
+ *bp = ucontext->uc_mcontext->__ss.__rbp;
+ *sp = ucontext->uc_mcontext->__ss.__rsp;
+# elif defined(__arm__)
+ *pc = ucontext->uc_mcontext->__ss.__pc;
+ *bp = ucontext->uc_mcontext->__ss.__r[7];
+ *sp = ucontext->uc_mcontext->__ss.__sp;
+# elif defined(__i386__)
+ *pc = ucontext->uc_mcontext->__ss.__eip;
+ *bp = ucontext->uc_mcontext->__ss.__ebp;
+ *sp = ucontext->uc_mcontext->__ss.__esp;
+# else
+# error "Unknown architecture"
+# endif
+}
+
+void SignalContext::InitPcSpBp() { GetPcSpBp(context, &pc, &sp, &bp); }
+
+void InitializePlatformEarly() {
+ // Only use xnu_fast_mmap when on x86_64 and the OS supports it.
+ use_xnu_fast_mmap =
+#if defined(__x86_64__)
+ GetMacosVersion() >= MACOS_VERSION_HIGH_SIERRA_DOT_RELEASE_4;
+#else
+ false;
+#endif
+}
+
+#if !SANITIZER_GO
+static const char kDyldInsertLibraries[] = "DYLD_INSERT_LIBRARIES";
+LowLevelAllocator allocator_for_env;
+
+// Change the value of the env var |name|, leaking the original value.
+// If |name_value| is NULL, the variable is deleted from the environment,
+// otherwise the corresponding "NAME=value" string is replaced with
+// |name_value|.
+void LeakyResetEnv(const char *name, const char *name_value) {
+ char **env = GetEnviron();
+ uptr name_len = internal_strlen(name);
+ while (*env != 0) {
+ uptr len = internal_strlen(*env);
+ if (len > name_len) {
+ const char *p = *env;
+ if (!internal_memcmp(p, name, name_len) && p[name_len] == '=') {
+ // Match.
+ if (name_value) {
+ // Replace the old value with the new one.
+ *env = const_cast<char*>(name_value);
+ } else {
+ // Shift the subsequent pointers back.
+ char **del = env;
+ do {
+ del[0] = del[1];
+ } while (*del++);
+ }
+ }
+ }
+ env++;
+ }
+}
+
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool ReexecDisabled() {
+ return false;
+}
+
+extern "C" SANITIZER_WEAK_ATTRIBUTE double dyldVersionNumber;
+static const double kMinDyldVersionWithAutoInterposition = 360.0;
+
+bool DyldNeedsEnvVariable() {
+ // Although sanitizer support was added to LLVM on OS X 10.7+, GCC users
+ // still may want use them on older systems. On older Darwin platforms, dyld
+ // doesn't export dyldVersionNumber symbol and we simply return true.
+ if (!&dyldVersionNumber) return true;
+ // If running on OS X 10.11+ or iOS 9.0+, dyld will interpose even if
+ // DYLD_INSERT_LIBRARIES is not set. However, checking OS version via
+ // GetMacosVersion() doesn't work for the simulator. Let's instead check
+ // `dyldVersionNumber`, which is exported by dyld, against a known version
+ // number from the first OS release where this appeared.
+ return dyldVersionNumber < kMinDyldVersionWithAutoInterposition;
+}
+
+void MaybeReexec() {
+ // FIXME: This should really live in some "InitializePlatform" method.
+ MonotonicNanoTime();
+
+ if (ReexecDisabled()) return;
+
+ // Make sure the dynamic runtime library is preloaded so that the
+ // wrappers work. If it is not, set DYLD_INSERT_LIBRARIES and re-exec
+ // ourselves.
+ Dl_info info;
+ RAW_CHECK(dladdr((void*)((uptr)&__sanitizer_report_error_summary), &info));
+ char *dyld_insert_libraries =
+ const_cast<char*>(GetEnv(kDyldInsertLibraries));
+ uptr old_env_len = dyld_insert_libraries ?
+ internal_strlen(dyld_insert_libraries) : 0;
+ uptr fname_len = internal_strlen(info.dli_fname);
+ const char *dylib_name = StripModuleName(info.dli_fname);
+ uptr dylib_name_len = internal_strlen(dylib_name);
+
+ bool lib_is_in_env = dyld_insert_libraries &&
+ internal_strstr(dyld_insert_libraries, dylib_name);
+ if (DyldNeedsEnvVariable() && !lib_is_in_env) {
+ // DYLD_INSERT_LIBRARIES is not set or does not contain the runtime
+ // library.
+ InternalScopedString program_name(1024);
+ uint32_t buf_size = program_name.size();
+ _NSGetExecutablePath(program_name.data(), &buf_size);
+ char *new_env = const_cast<char*>(info.dli_fname);
+ if (dyld_insert_libraries) {
+ // Append the runtime dylib name to the existing value of
+ // DYLD_INSERT_LIBRARIES.
+ new_env = (char*)allocator_for_env.Allocate(old_env_len + fname_len + 2);
+ internal_strncpy(new_env, dyld_insert_libraries, old_env_len);
+ new_env[old_env_len] = ':';
+ // Copy fname_len and add a trailing zero.
+ internal_strncpy(new_env + old_env_len + 1, info.dli_fname,
+ fname_len + 1);
+ // Ok to use setenv() since the wrappers don't depend on the value of
+ // asan_inited.
+ setenv(kDyldInsertLibraries, new_env, /*overwrite*/1);
+ } else {
+ // Set DYLD_INSERT_LIBRARIES equal to the runtime dylib name.
+ setenv(kDyldInsertLibraries, info.dli_fname, /*overwrite*/0);
+ }
+ VReport(1, "exec()-ing the program with\n");
+ VReport(1, "%s=%s\n", kDyldInsertLibraries, new_env);
+ VReport(1, "to enable wrappers.\n");
+ execv(program_name.data(), *_NSGetArgv());
+
+ // We get here only if execv() failed.
+ Report("ERROR: The process is launched without DYLD_INSERT_LIBRARIES, "
+ "which is required for the sanitizer to work. We tried to set the "
+ "environment variable and re-execute itself, but execv() failed, "
+ "possibly because of sandbox restrictions. Make sure to launch the "
+ "executable with:\n%s=%s\n", kDyldInsertLibraries, new_env);
+ RAW_CHECK("execv failed" && 0);
+ }
+
+ // Verify that interceptors really work. We'll use dlsym to locate
+ // "pthread_create", if interceptors are working, it should really point to
+ // "wrap_pthread_create" within our own dylib.
+ Dl_info info_pthread_create;
+ void *dlopen_addr = dlsym(RTLD_DEFAULT, "pthread_create");
+ RAW_CHECK(dladdr(dlopen_addr, &info_pthread_create));
+ if (internal_strcmp(info.dli_fname, info_pthread_create.dli_fname) != 0) {
+ Report(
+ "ERROR: Interceptors are not working. This may be because %s is "
+ "loaded too late (e.g. via dlopen). Please launch the executable "
+ "with:\n%s=%s\n",
+ SanitizerToolName, kDyldInsertLibraries, info.dli_fname);
+ RAW_CHECK("interceptors not installed" && 0);
+ }
+
+ if (!lib_is_in_env)
+ return;
+
+ if (!common_flags()->strip_env)
+ return;
+
+ // DYLD_INSERT_LIBRARIES is set and contains the runtime library. Let's remove
+ // the dylib from the environment variable, because interceptors are installed
+ // and we don't want our children to inherit the variable.
+
+ uptr env_name_len = internal_strlen(kDyldInsertLibraries);
+ // Allocate memory to hold the previous env var name, its value, the '='
+ // sign and the '\0' char.
+ char *new_env = (char*)allocator_for_env.Allocate(
+ old_env_len + 2 + env_name_len);
+ RAW_CHECK(new_env);
+ internal_memset(new_env, '\0', old_env_len + 2 + env_name_len);
+ internal_strncpy(new_env, kDyldInsertLibraries, env_name_len);
+ new_env[env_name_len] = '=';
+ char *new_env_pos = new_env + env_name_len + 1;
+
+ // Iterate over colon-separated pieces of |dyld_insert_libraries|.
+ char *piece_start = dyld_insert_libraries;
+ char *piece_end = NULL;
+ char *old_env_end = dyld_insert_libraries + old_env_len;
+ do {
+ if (piece_start[0] == ':') piece_start++;
+ piece_end = internal_strchr(piece_start, ':');
+ if (!piece_end) piece_end = dyld_insert_libraries + old_env_len;
+ if ((uptr)(piece_start - dyld_insert_libraries) > old_env_len) break;
+ uptr piece_len = piece_end - piece_start;
+
+ char *filename_start =
+ (char *)internal_memrchr(piece_start, '/', piece_len);
+ uptr filename_len = piece_len;
+ if (filename_start) {
+ filename_start += 1;
+ filename_len = piece_len - (filename_start - piece_start);
+ } else {
+ filename_start = piece_start;
+ }
+
+ // If the current piece isn't the runtime library name,
+ // append it to new_env.
+ if ((dylib_name_len != filename_len) ||
+ (internal_memcmp(filename_start, dylib_name, dylib_name_len) != 0)) {
+ if (new_env_pos != new_env + env_name_len + 1) {
+ new_env_pos[0] = ':';
+ new_env_pos++;
+ }
+ internal_strncpy(new_env_pos, piece_start, piece_len);
+ new_env_pos += piece_len;
+ }
+ // Move on to the next piece.
+ piece_start = piece_end;
+ } while (piece_start < old_env_end);
+
+ // Can't use setenv() here, because it requires the allocator to be
+ // initialized.
+ // FIXME: instead of filtering DYLD_INSERT_LIBRARIES here, do it in
+ // a separate function called after InitializeAllocator().
+ if (new_env_pos == new_env + env_name_len + 1) new_env = NULL;
+ LeakyResetEnv(kDyldInsertLibraries, new_env);
+}
+#endif // SANITIZER_GO
+
+char **GetArgv() {
+ return *_NSGetArgv();
+}
+
+#if SANITIZER_IOS
+// The task_vm_info struct is normally provided by the macOS SDK, but we need
+// fields only available in 10.12+. Declare the struct manually to be able to
+// build against older SDKs.
+struct __sanitizer_task_vm_info {
+ mach_vm_size_t virtual_size;
+ integer_t region_count;
+ integer_t page_size;
+ mach_vm_size_t resident_size;
+ mach_vm_size_t resident_size_peak;
+ mach_vm_size_t device;
+ mach_vm_size_t device_peak;
+ mach_vm_size_t internal;
+ mach_vm_size_t internal_peak;
+ mach_vm_size_t external;
+ mach_vm_size_t external_peak;
+ mach_vm_size_t reusable;
+ mach_vm_size_t reusable_peak;
+ mach_vm_size_t purgeable_volatile_pmap;
+ mach_vm_size_t purgeable_volatile_resident;
+ mach_vm_size_t purgeable_volatile_virtual;
+ mach_vm_size_t compressed;
+ mach_vm_size_t compressed_peak;
+ mach_vm_size_t compressed_lifetime;
+ mach_vm_size_t phys_footprint;
+ mach_vm_address_t min_address;
+ mach_vm_address_t max_address;
+};
+#define __SANITIZER_TASK_VM_INFO_COUNT ((mach_msg_type_number_t) \
+ (sizeof(__sanitizer_task_vm_info) / sizeof(natural_t)))
+
+static uptr GetTaskInfoMaxAddress() {
+ __sanitizer_task_vm_info vm_info = {} /* zero initialize */;
+ mach_msg_type_number_t count = __SANITIZER_TASK_VM_INFO_COUNT;
+ int err = task_info(mach_task_self(), TASK_VM_INFO, (int *)&vm_info, &count);
+ return err ? 0 : vm_info.max_address;
+}
+
+uptr GetMaxUserVirtualAddress() {
+ static uptr max_vm = GetTaskInfoMaxAddress();
+ if (max_vm != 0)
+ return max_vm - 1;
+
+ // xnu cannot provide vm address limit
+# if SANITIZER_WORDSIZE == 32
+ return 0xffe00000 - 1;
+# else
+ return 0x200000000 - 1;
+# endif
+}
+
+#else // !SANITIZER_IOS
+
+uptr GetMaxUserVirtualAddress() {
+# if SANITIZER_WORDSIZE == 64
+ return (1ULL << 47) - 1; // 0x00007fffffffffffUL;
+# else // SANITIZER_WORDSIZE == 32
+ static_assert(SANITIZER_WORDSIZE == 32, "Wrong wordsize");
+ return (1ULL << 32) - 1; // 0xffffffff;
+# endif
+}
+#endif
+
+uptr GetMaxVirtualAddress() {
+ return GetMaxUserVirtualAddress();
+}
+
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found,
+ uptr *max_occupied_addr) {
+ typedef vm_region_submap_short_info_data_64_t RegionInfo;
+ enum { kRegionInfoSize = VM_REGION_SUBMAP_SHORT_INFO_COUNT_64 };
+ // Start searching for available memory region past PAGEZERO, which is
+ // 4KB on 32-bit and 4GB on 64-bit.
+ mach_vm_address_t start_address =
+ (SANITIZER_WORDSIZE == 32) ? 0x000000001000 : 0x000100000000;
+
+ mach_vm_address_t address = start_address;
+ mach_vm_address_t free_begin = start_address;
+ kern_return_t kr = KERN_SUCCESS;
+ if (largest_gap_found) *largest_gap_found = 0;
+ if (max_occupied_addr) *max_occupied_addr = 0;
+ while (kr == KERN_SUCCESS) {
+ mach_vm_size_t vmsize = 0;
+ natural_t depth = 0;
+ RegionInfo vminfo;
+ mach_msg_type_number_t count = kRegionInfoSize;
+ kr = mach_vm_region_recurse(mach_task_self(), &address, &vmsize, &depth,
+ (vm_region_info_t)&vminfo, &count);
+ if (kr == KERN_INVALID_ADDRESS) {
+ // No more regions beyond "address", consider the gap at the end of VM.
+ address = GetMaxVirtualAddress() + 1;
+ vmsize = 0;
+ } else {
+ if (max_occupied_addr) *max_occupied_addr = address + vmsize;
+ }
+ if (free_begin != address) {
+ // We found a free region [free_begin..address-1].
+ uptr gap_start = RoundUpTo((uptr)free_begin + left_padding, alignment);
+ uptr gap_end = RoundDownTo((uptr)address, alignment);
+ uptr gap_size = gap_end > gap_start ? gap_end - gap_start : 0;
+ if (size < gap_size) {
+ return gap_start;
+ }
+
+ if (largest_gap_found && *largest_gap_found < gap_size) {
+ *largest_gap_found = gap_size;
+ }
+ }
+ // Move to the next region.
+ address += vmsize;
+ free_begin = address;
+ }
+
+ // We looked at all free regions and could not find one large enough.
+ return 0;
+}
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
+void SignalContext::DumpAllRegisters(void *context) {
+ Report("Register values:\n");
+
+ ucontext_t *ucontext = (ucontext_t*)context;
+# define DUMPREG64(r) \
+ Printf("%s = 0x%016llx ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREG32(r) \
+ Printf("%s = 0x%08x ", #r, ucontext->uc_mcontext->__ss.__ ## r);
+# define DUMPREG_(r) Printf(" "); DUMPREG(r);
+# define DUMPREG__(r) Printf(" "); DUMPREG(r);
+# define DUMPREG___(r) Printf(" "); DUMPREG(r);
+
+# if defined(__x86_64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG(rax); DUMPREG(rbx); DUMPREG(rcx); DUMPREG(rdx); Printf("\n");
+ DUMPREG(rdi); DUMPREG(rsi); DUMPREG(rbp); DUMPREG(rsp); Printf("\n");
+ DUMPREG_(r8); DUMPREG_(r9); DUMPREG(r10); DUMPREG(r11); Printf("\n");
+ DUMPREG(r12); DUMPREG(r13); DUMPREG(r14); DUMPREG(r15); Printf("\n");
+# elif defined(__i386__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG(eax); DUMPREG(ebx); DUMPREG(ecx); DUMPREG(edx); Printf("\n");
+ DUMPREG(edi); DUMPREG(esi); DUMPREG(ebp); DUMPREG(esp); Printf("\n");
+# elif defined(__aarch64__)
+# define DUMPREG(r) DUMPREG64(r)
+ DUMPREG_(x[0]); DUMPREG_(x[1]); DUMPREG_(x[2]); DUMPREG_(x[3]); Printf("\n");
+ DUMPREG_(x[4]); DUMPREG_(x[5]); DUMPREG_(x[6]); DUMPREG_(x[7]); Printf("\n");
+ DUMPREG_(x[8]); DUMPREG_(x[9]); DUMPREG(x[10]); DUMPREG(x[11]); Printf("\n");
+ DUMPREG(x[12]); DUMPREG(x[13]); DUMPREG(x[14]); DUMPREG(x[15]); Printf("\n");
+ DUMPREG(x[16]); DUMPREG(x[17]); DUMPREG(x[18]); DUMPREG(x[19]); Printf("\n");
+ DUMPREG(x[20]); DUMPREG(x[21]); DUMPREG(x[22]); DUMPREG(x[23]); Printf("\n");
+ DUMPREG(x[24]); DUMPREG(x[25]); DUMPREG(x[26]); DUMPREG(x[27]); Printf("\n");
+ DUMPREG(x[28]); DUMPREG___(fp); DUMPREG___(lr); DUMPREG___(sp); Printf("\n");
+# elif defined(__arm__)
+# define DUMPREG(r) DUMPREG32(r)
+ DUMPREG_(r[0]); DUMPREG_(r[1]); DUMPREG_(r[2]); DUMPREG_(r[3]); Printf("\n");
+ DUMPREG_(r[4]); DUMPREG_(r[5]); DUMPREG_(r[6]); DUMPREG_(r[7]); Printf("\n");
+ DUMPREG_(r[8]); DUMPREG_(r[9]); DUMPREG(r[10]); DUMPREG(r[11]); Printf("\n");
+ DUMPREG(r[12]); DUMPREG___(sp); DUMPREG___(lr); DUMPREG___(pc); Printf("\n");
+# else
+# error "Unknown architecture"
+# endif
+
+# undef DUMPREG64
+# undef DUMPREG32
+# undef DUMPREG_
+# undef DUMPREG__
+# undef DUMPREG___
+# undef DUMPREG
+}
+
+static inline bool CompareBaseAddress(const LoadedModule &a,
+ const LoadedModule &b) {
+ return a.base_address() < b.base_address();
+}
+
+void FormatUUID(char *out, uptr size, const u8 *uuid) {
+ internal_snprintf(out, size,
+ "<%02X%02X%02X%02X-%02X%02X-%02X%02X-%02X%02X-"
+ "%02X%02X%02X%02X%02X%02X>",
+ uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
+ uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
+ uuid[12], uuid[13], uuid[14], uuid[15]);
+}
+
+void PrintModuleMap() {
+ Printf("Process module map:\n");
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules;
+ modules.reserve(128);
+ memory_mapping.DumpListOfModules(&modules);
+ Sort(modules.data(), modules.size(), CompareBaseAddress);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ char uuid_str[128];
+ FormatUUID(uuid_str, sizeof(uuid_str), modules[i].uuid());
+ Printf("0x%zx-0x%zx %s (%s) %s\n", modules[i].base_address(),
+ modules[i].max_executable_address(), modules[i].full_name(),
+ ModuleArchToString(modules[i].arch()), uuid_str);
+ }
+ Printf("End of module map.\n");
+}
+
+void CheckNoDeepBind(const char *filename, int flag) {
+ // Do nothing.
+}
+
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ if (!buffer || !length || length > 256)
+ return false;
+ // arc4random never fails.
+ arc4random_buf(buffer, length);
+ return true;
+}
+
+u32 GetNumberOfCPUs() {
+ return (u32)sysconf(_SC_NPROCESSORS_ONLN);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac.h (revision 351984)
@@ -0,0 +1,77 @@
+//===-- sanitizer_mac.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// provides definitions for OSX-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_MAC_H
+#define SANITIZER_MAC_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_posix.h"
+
+namespace __sanitizer {
+
+struct MemoryMappingLayoutData {
+ int current_image;
+ u32 current_magic;
+ u32 current_filetype;
+ ModuleArch current_arch;
+ u8 current_uuid[kModuleUUIDSize];
+ int current_load_cmd_count;
+ const char *current_load_cmd_addr;
+ bool current_instrumented;
+};
+
+enum MacosVersion {
+ MACOS_VERSION_UNINITIALIZED = 0,
+ MACOS_VERSION_UNKNOWN,
+ MACOS_VERSION_LEOPARD,
+ MACOS_VERSION_SNOW_LEOPARD,
+ MACOS_VERSION_LION,
+ MACOS_VERSION_MOUNTAIN_LION,
+ MACOS_VERSION_MAVERICKS,
+ MACOS_VERSION_YOSEMITE,
+ MACOS_VERSION_EL_CAPITAN,
+ MACOS_VERSION_SIERRA,
+ MACOS_VERSION_HIGH_SIERRA,
+ MACOS_VERSION_HIGH_SIERRA_DOT_RELEASE_4,
+ MACOS_VERSION_MOJAVE,
+ MACOS_VERSION_CATALINA,
+ MACOS_VERSION_UNKNOWN_NEWER
+};
+
+MacosVersion GetMacosVersion();
+
+char **GetEnviron();
+
+void RestrictMemoryToMaxAddress(uptr max_address);
+
+} // namespace __sanitizer
+
+extern "C" {
+static char __crashreporter_info_buff__[__sanitizer::kErrorMessageBufferSize] =
+ {};
+static const char *__crashreporter_info__ __attribute__((__used__)) =
+ &__crashreporter_info_buff__[0];
+asm(".desc ___crashreporter_info__, 0x10");
+} // extern "C"
+
+namespace __sanitizer {
+static BlockingMutex crashreporter_info_mutex(LINKER_INITIALIZED);
+
+INLINE void CRAppendCrashLogMessage(const char *msg) {
+ BlockingMutexLock l(&crashreporter_info_mutex);
+ internal_strlcat(__crashreporter_info_buff__, msg,
+ sizeof(__crashreporter_info_buff__)); }
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
+#endif // SANITIZER_MAC_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac_libcdep.cc (revision 351984)
@@ -0,0 +1,29 @@
+//===-- sanitizer_mac_libcdep.cc ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements OSX-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_mac.h"
+
+#include <sys/mman.h>
+
+namespace __sanitizer {
+
+void RestrictMemoryToMaxAddress(uptr max_address) {
+ uptr size_to_mmap = GetMaxUserVirtualAddress() + 1 - max_address;
+ void *res = MmapFixedNoAccess(max_address, size_to_mmap, "high gap");
+ CHECK(res != MAP_FAILED);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mac_libcdep.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_malloc_mac.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_malloc_mac.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_malloc_mac.inc (revision 351984)
@@ -0,0 +1,415 @@
+//===-- sanitizer_malloc_mac.inc --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains Mac-specific malloc interceptors and a custom zone
+// implementation, which together replace the system allocator.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if !SANITIZER_MAC
+#error "This file should only be compiled on Darwin."
+#endif
+
+#include <AvailabilityMacros.h>
+#include <CoreFoundation/CFBase.h>
+#include <dlfcn.h>
+#include <malloc/malloc.h>
+#include <sys/mman.h>
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_mac.h"
+
+// Similar code is used in Google Perftools,
+// https://github.com/gperftools/gperftools.
+
+namespace __sanitizer {
+
+extern malloc_zone_t sanitizer_zone;
+
+struct sanitizer_malloc_introspection_t : public malloc_introspection_t {
+ // IMPORTANT: Do not change the order, alignment, or types of these fields to
+ // maintain binary compatibility. You should only add fields to this struct.
+
+ // Used to track changes to the allocator that will affect
+ // zone enumeration.
+ u64 allocator_enumeration_version;
+ uptr allocator_ptr;
+ uptr allocator_size;
+};
+
+u64 GetMallocZoneAllocatorEnumerationVersion() {
+ // This represents the current allocator ABI version.
+ // This field should be incremented every time the Allocator
+ // ABI changes in a way that breaks allocator enumeration.
+ return 0;
+}
+
+} // namespace __sanitizer
+
+INTERCEPTOR(malloc_zone_t *, malloc_create_zone,
+ vm_size_t start_size, unsigned zone_flags) {
+ COMMON_MALLOC_ENTER();
+ uptr page_size = GetPageSizeCached();
+ uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);
+ COMMON_MALLOC_MEMALIGN(page_size, allocated_size);
+ malloc_zone_t *new_zone = (malloc_zone_t *)p;
+ internal_memcpy(new_zone, &sanitizer_zone, sizeof(sanitizer_zone));
+ new_zone->zone_name = NULL; // The name will be changed anyway.
+ if (GetMacosVersion() >= MACOS_VERSION_LION) {
+ // Prevent the client app from overwriting the zone contents.
+ // Library functions that need to modify the zone will set PROT_WRITE on it.
+ // This matches the behavior of malloc_create_zone() on OSX 10.7 and higher.
+ mprotect(new_zone, allocated_size, PROT_READ);
+ }
+ // We're explicitly *NOT* registering the zone.
+ return new_zone;
+}
+
+INTERCEPTOR(void, malloc_destroy_zone, malloc_zone_t *zone) {
+ COMMON_MALLOC_ENTER();
+ // We don't need to do anything here. We're not registering new zones, so we
+ // don't to unregister. Just un-mprotect and free() the zone.
+ if (GetMacosVersion() >= MACOS_VERSION_LION) {
+ uptr page_size = GetPageSizeCached();
+ uptr allocated_size = RoundUpTo(sizeof(sanitizer_zone), page_size);
+ mprotect(zone, allocated_size, PROT_READ | PROT_WRITE);
+ }
+ if (zone->zone_name) {
+ COMMON_MALLOC_FREE((void *)zone->zone_name);
+ }
+ COMMON_MALLOC_FREE(zone);
+}
+
+INTERCEPTOR(malloc_zone_t *, malloc_default_zone, void) {
+ COMMON_MALLOC_ENTER();
+ return &sanitizer_zone;
+}
+
+INTERCEPTOR(malloc_zone_t *, malloc_default_purgeable_zone, void) {
+ // FIXME: ASan should support purgeable allocations.
+ // https://github.com/google/sanitizers/issues/139
+ COMMON_MALLOC_ENTER();
+ return &sanitizer_zone;
+}
+
+INTERCEPTOR(void, malloc_make_purgeable, void *ptr) {
+ // FIXME: ASan should support purgeable allocations. Ignoring them is fine
+ // for now.
+ COMMON_MALLOC_ENTER();
+}
+
+INTERCEPTOR(int, malloc_make_nonpurgeable, void *ptr) {
+ // FIXME: ASan should support purgeable allocations. Ignoring them is fine
+ // for now.
+ COMMON_MALLOC_ENTER();
+ // Must return 0 if the contents were not purged since the last call to
+ // malloc_make_purgeable().
+ return 0;
+}
+
+INTERCEPTOR(void, malloc_set_zone_name, malloc_zone_t *zone, const char *name) {
+ COMMON_MALLOC_ENTER();
+ // Allocate |sizeof(COMMON_MALLOC_ZONE_NAME "-") + internal_strlen(name)|
+ // bytes.
+ size_t buflen =
+ sizeof(COMMON_MALLOC_ZONE_NAME "-") + (name ? internal_strlen(name) : 0);
+ InternalScopedString new_name(buflen);
+ if (name && zone->introspect == sanitizer_zone.introspect) {
+ new_name.append(COMMON_MALLOC_ZONE_NAME "-%s", name);
+ name = new_name.data();
+ }
+
+ // Call the system malloc's implementation for both external and our zones,
+ // since that appropriately changes VM region protections on the zone.
+ REAL(malloc_set_zone_name)(zone, name);
+}
+
+INTERCEPTOR(void *, malloc, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_MALLOC(size);
+ return p;
+}
+
+INTERCEPTOR(void, free, void *ptr) {
+ COMMON_MALLOC_ENTER();
+ if (!ptr) return;
+ COMMON_MALLOC_FREE(ptr);
+}
+
+INTERCEPTOR(void *, realloc, void *ptr, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_REALLOC(ptr, size);
+ return p;
+}
+
+INTERCEPTOR(void *, calloc, size_t nmemb, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_CALLOC(nmemb, size);
+ return p;
+}
+
+INTERCEPTOR(void *, valloc, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_VALLOC(size);
+ return p;
+}
+
+INTERCEPTOR(size_t, malloc_good_size, size_t size) {
+ COMMON_MALLOC_ENTER();
+ return sanitizer_zone.introspect->good_size(&sanitizer_zone, size);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, size_t alignment, size_t size) {
+ COMMON_MALLOC_ENTER();
+ CHECK(memptr);
+ COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size);
+ return res;
+}
+
+namespace {
+
+// TODO(glider): the __sanitizer_mz_* functions should be united with the Linux
+// wrappers, as they are basically copied from there.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+size_t __sanitizer_mz_size(malloc_zone_t* zone, const void* ptr) {
+ COMMON_MALLOC_SIZE(ptr);
+ return size;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_malloc(malloc_zone_t *zone, uptr size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_MALLOC(size);
+ return p;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_calloc(malloc_zone_t *zone, size_t nmemb, size_t size) {
+ if (UNLIKELY(!COMMON_MALLOC_SANITIZER_INITIALIZED)) {
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ const size_t kCallocPoolSize = 1024;
+ static uptr calloc_memory_for_dlsym[kCallocPoolSize];
+ static size_t allocated;
+ size_t size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
+ void *mem = (void*)&calloc_memory_for_dlsym[allocated];
+ allocated += size_in_words;
+ CHECK(allocated < kCallocPoolSize);
+ return mem;
+ }
+ COMMON_MALLOC_CALLOC(nmemb, size);
+ return p;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_valloc(malloc_zone_t *zone, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_VALLOC(size);
+ return p;
+}
+
+// TODO(glider): the allocation callbacks need to be refactored.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_mz_free(malloc_zone_t *zone, void *ptr) {
+ if (!ptr) return;
+ COMMON_MALLOC_FREE(ptr);
+}
+
+#define GET_ZONE_FOR_PTR(ptr) \
+ malloc_zone_t *zone_ptr = malloc_zone_from_ptr(ptr); \
+ const char *zone_name = (zone_ptr == 0) ? 0 : zone_ptr->zone_name
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_realloc(malloc_zone_t *zone, void *ptr, size_t new_size) {
+ if (!ptr) {
+ COMMON_MALLOC_MALLOC(new_size);
+ return p;
+ } else {
+ COMMON_MALLOC_SIZE(ptr);
+ if (size) {
+ COMMON_MALLOC_REALLOC(ptr, new_size);
+ return p;
+ } else {
+ // We can't recover from reallocating an unknown address, because
+ // this would require reading at most |new_size| bytes from
+ // potentially unaccessible memory.
+ GET_ZONE_FOR_PTR(ptr);
+ COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name);
+ return nullptr;
+ }
+ }
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_mz_destroy(malloc_zone_t* zone) {
+ // A no-op -- we will not be destroyed!
+ Report("__sanitizer_mz_destroy() called -- ignoring\n");
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__sanitizer_mz_memalign(malloc_zone_t *zone, size_t align, size_t size) {
+ COMMON_MALLOC_ENTER();
+ COMMON_MALLOC_MEMALIGN(align, size);
+ return p;
+}
+
+// This public API exists purely for testing purposes.
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+malloc_zone_t* __sanitizer_mz_default_zone() {
+ return &sanitizer_zone;
+}
+
+// This function is currently unused, and we build with -Werror.
+#if 0
+void __sanitizer_mz_free_definite_size(
+ malloc_zone_t* zone, void *ptr, size_t size) {
+ // TODO(glider): check that |size| is valid.
+ UNIMPLEMENTED();
+}
+#endif
+
+#ifndef COMMON_MALLOC_HAS_ZONE_ENUMERATOR
+#error "COMMON_MALLOC_HAS_ZONE_ENUMERATOR must be defined"
+#endif
+static_assert((COMMON_MALLOC_HAS_ZONE_ENUMERATOR) == 0 ||
+ (COMMON_MALLOC_HAS_ZONE_ENUMERATOR) == 1,
+ "COMMON_MALLOC_HAS_ZONE_ENUMERATOR must be 0 or 1");
+
+#if COMMON_MALLOC_HAS_ZONE_ENUMERATOR
+// Forward declare and expect the implementation to provided by
+// includer.
+kern_return_t mi_enumerator(task_t task, void *, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder);
+#else
+// Provide stub implementation that fails.
+kern_return_t mi_enumerator(task_t task, void *, unsigned type_mask,
+ vm_address_t zone_address, memory_reader_t reader,
+ vm_range_recorder_t recorder) {
+ // Not supported.
+ return KERN_FAILURE;
+}
+#endif
+
+#ifndef COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT
+#error "COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT must be defined"
+#endif
+static_assert((COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT) == 0 ||
+ (COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT) == 1,
+ "COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT must be 0 or 1");
+#if COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT
+// Forward declare and expect the implementation to provided by
+// includer.
+void mi_extra_init(
+ sanitizer_malloc_introspection_t *mi);
+#else
+void mi_extra_init(
+ sanitizer_malloc_introspection_t *mi) {
+ // Just zero initialize the fields.
+ mi->allocator_ptr = 0;
+ mi->allocator_size = 0;
+}
+#endif
+
+size_t mi_good_size(malloc_zone_t *zone, size_t size) {
+ // I think it's always safe to return size, but we maybe could do better.
+ return size;
+}
+
+boolean_t mi_check(malloc_zone_t *zone) {
+ UNIMPLEMENTED();
+}
+
+void mi_print(malloc_zone_t *zone, boolean_t verbose) {
+ UNIMPLEMENTED();
+}
+
+void mi_log(malloc_zone_t *zone, void *address) {
+ // I don't think we support anything like this
+}
+
+void mi_force_lock(malloc_zone_t *zone) {
+ COMMON_MALLOC_FORCE_LOCK();
+}
+
+void mi_force_unlock(malloc_zone_t *zone) {
+ COMMON_MALLOC_FORCE_UNLOCK();
+}
+
+void mi_statistics(malloc_zone_t *zone, malloc_statistics_t *stats) {
+ COMMON_MALLOC_FILL_STATS(zone, stats);
+}
+
+boolean_t mi_zone_locked(malloc_zone_t *zone) {
+ // UNIMPLEMENTED();
+ return false;
+}
+
+} // unnamed namespace
+
+namespace COMMON_MALLOC_NAMESPACE {
+
+void InitMallocZoneFields() {
+ static sanitizer_malloc_introspection_t sanitizer_zone_introspection;
+ // Ok to use internal_memset, these places are not performance-critical.
+ internal_memset(&sanitizer_zone_introspection, 0,
+ sizeof(sanitizer_zone_introspection));
+
+ sanitizer_zone_introspection.enumerator = &mi_enumerator;
+ sanitizer_zone_introspection.good_size = &mi_good_size;
+ sanitizer_zone_introspection.check = &mi_check;
+ sanitizer_zone_introspection.print = &mi_print;
+ sanitizer_zone_introspection.log = &mi_log;
+ sanitizer_zone_introspection.force_lock = &mi_force_lock;
+ sanitizer_zone_introspection.force_unlock = &mi_force_unlock;
+ sanitizer_zone_introspection.statistics = &mi_statistics;
+ sanitizer_zone_introspection.zone_locked = &mi_zone_locked;
+
+ // Set current allocator enumeration version.
+ sanitizer_zone_introspection.allocator_enumeration_version =
+ GetMallocZoneAllocatorEnumerationVersion();
+
+ // Perform any sanitizer specific initialization.
+ mi_extra_init(&sanitizer_zone_introspection);
+
+ internal_memset(&sanitizer_zone, 0, sizeof(malloc_zone_t));
+
+ // Use version 6 for OSX >= 10.6.
+ sanitizer_zone.version = 6;
+ sanitizer_zone.zone_name = COMMON_MALLOC_ZONE_NAME;
+ sanitizer_zone.size = &__sanitizer_mz_size;
+ sanitizer_zone.malloc = &__sanitizer_mz_malloc;
+ sanitizer_zone.calloc = &__sanitizer_mz_calloc;
+ sanitizer_zone.valloc = &__sanitizer_mz_valloc;
+ sanitizer_zone.free = &__sanitizer_mz_free;
+ sanitizer_zone.realloc = &__sanitizer_mz_realloc;
+ sanitizer_zone.destroy = &__sanitizer_mz_destroy;
+ sanitizer_zone.batch_malloc = 0;
+ sanitizer_zone.batch_free = 0;
+ sanitizer_zone.free_definite_size = 0;
+ sanitizer_zone.memalign = &__sanitizer_mz_memalign;
+ sanitizer_zone.introspect = &sanitizer_zone_introspection;
+}
+
+void ReplaceSystemMalloc() {
+ InitMallocZoneFields();
+
+ // Register the zone.
+ malloc_zone_register(&sanitizer_zone);
+}
+
+} // namespace COMMON_MALLOC_NAMESPACE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_malloc_mac.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mutex.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mutex.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_mutex.h (revision 351984)
@@ -0,0 +1,223 @@
+//===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_MUTEX_H
+#define SANITIZER_MUTEX_H
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+class StaticSpinMutex {
+ public:
+ void Init() {
+ atomic_store(&state_, 0, memory_order_relaxed);
+ }
+
+ void Lock() {
+ if (TryLock())
+ return;
+ LockSlow();
+ }
+
+ bool TryLock() {
+ return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
+ }
+
+ void Unlock() {
+ atomic_store(&state_, 0, memory_order_release);
+ }
+
+ void CheckLocked() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
+ }
+
+ private:
+ atomic_uint8_t state_;
+
+ void NOINLINE LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ if (atomic_load(&state_, memory_order_relaxed) == 0
+ && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
+ return;
+ }
+ }
+};
+
+class SpinMutex : public StaticSpinMutex {
+ public:
+ SpinMutex() {
+ Init();
+ }
+
+ private:
+ SpinMutex(const SpinMutex&);
+ void operator=(const SpinMutex&);
+};
+
+class BlockingMutex {
+ public:
+ explicit constexpr BlockingMutex(LinkerInitialized)
+ : opaque_storage_ {0, }, owner_ {0} {}
+ BlockingMutex();
+ void Lock();
+ void Unlock();
+
+ // This function does not guarantee an explicit check that the calling thread
+ // is the thread which owns the mutex. This behavior, while more strictly
+ // correct, causes problems in cases like StopTheWorld, where a parent thread
+ // owns the mutex but a child checks that it is locked. Rather than
+ // maintaining complex state to work around those situations, the check only
+ // checks that the mutex is owned, and assumes callers to be generally
+ // well-behaved.
+ void CheckLocked();
+
+ private:
+ // Solaris mutex_t has a member that requires 64-bit alignment.
+ ALIGNED(8) uptr opaque_storage_[10];
+ uptr owner_; // for debugging
+};
+
+// Reader-writer spin mutex.
+class RWMutex {
+ public:
+ RWMutex() {
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);
+ }
+
+ ~RWMutex() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ void Lock() {
+ u32 cmp = kUnlocked;
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ LockSlow();
+ }
+
+ void Unlock() {
+ u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
+ DCHECK_NE(prev & kWriteLock, 0);
+ (void)prev;
+ }
+
+ void ReadLock() {
+ u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ ReadLockSlow();
+ }
+
+ void ReadUnlock() {
+ u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
+ DCHECK_EQ(prev & kWriteLock, 0);
+ DCHECK_GT(prev & ~kWriteLock, 0);
+ (void)prev;
+ }
+
+ void CheckLocked() {
+ CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+ }
+
+ private:
+ atomic_uint32_t state_;
+
+ enum {
+ kUnlocked = 0,
+ kWriteLock = 1,
+ kReadLock = 2
+ };
+
+ void NOINLINE LockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 cmp = atomic_load(&state_, memory_order_relaxed);
+ if (cmp == kUnlocked &&
+ atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ }
+ }
+
+ void NOINLINE ReadLockSlow() {
+ for (int i = 0;; i++) {
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ u32 prev = atomic_load(&state_, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ }
+ }
+
+ RWMutex(const RWMutex&);
+ void operator = (const RWMutex&);
+};
+
+template<typename MutexType>
+class GenericScopedLock {
+ public:
+ explicit GenericScopedLock(MutexType *mu)
+ : mu_(mu) {
+ mu_->Lock();
+ }
+
+ ~GenericScopedLock() {
+ mu_->Unlock();
+ }
+
+ private:
+ MutexType *mu_;
+
+ GenericScopedLock(const GenericScopedLock&);
+ void operator=(const GenericScopedLock&);
+};
+
+template<typename MutexType>
+class GenericScopedReadLock {
+ public:
+ explicit GenericScopedReadLock(MutexType *mu)
+ : mu_(mu) {
+ mu_->ReadLock();
+ }
+
+ ~GenericScopedReadLock() {
+ mu_->ReadUnlock();
+ }
+
+ private:
+ MutexType *mu_;
+
+ GenericScopedReadLock(const GenericScopedReadLock&);
+ void operator=(const GenericScopedReadLock&);
+};
+
+typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
+typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
+typedef GenericScopedLock<RWMutex> RWMutexLock;
+typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MUTEX_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_netbsd.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_netbsd.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_netbsd.cc (revision 351984)
@@ -0,0 +1,338 @@
+//===-- sanitizer_netbsd.cc -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between Sanitizer run-time libraries and implements
+// NetBSD-specific functions from sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_NETBSD
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_getauxval.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#include <sys/param.h>
+#include <sys/types.h>
+
+#include <sys/exec.h>
+#include <sys/mman.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <link.h>
+#include <lwp.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <ucontext.h>
+#include <unistd.h>
+
+extern "C" void *__mmap(void *, size_t, int, int, int, int,
+ off_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int __sysctl(const int *, unsigned int, void *, size_t *,
+ const void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys_close(int) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys_open(const char *, int, ...) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" ssize_t _sys_read(int, void *, size_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" ssize_t _sys_write(int, const void *,
+ size_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int __ftruncate(int, int, off_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" ssize_t _sys_readlink(const char *, char *,
+ size_t) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys_sched_yield() SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys___nanosleep50(const void *,
+ void *) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys_execve(const char *, char *const[],
+ char *const[]) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" off_t __lseek(int, int, off_t, int) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int __fork() SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys___sigprocmask14(int, const void *,
+ void *) SANITIZER_WEAK_ATTRIBUTE;
+extern "C" int _sys___wait450(int wpid, int *, int,
+ void *) SANITIZER_WEAK_ATTRIBUTE;
+
+namespace __sanitizer {
+
+static void *GetRealLibcAddress(const char *symbol) {
+ void *real = dlsym(RTLD_NEXT, symbol);
+ if (!real)
+ real = dlsym(RTLD_DEFAULT, symbol);
+ if (!real) {
+ Printf("GetRealLibcAddress failed for symbol=%s", symbol);
+ Die();
+ }
+ return real;
+}
+
+#define _REAL(func, ...) real##_##func(__VA_ARGS__)
+#define DEFINE__REAL(ret_type, func, ...) \
+ static ret_type (*real_##func)(__VA_ARGS__) = NULL; \
+ if (!real_##func) { \
+ real_##func = (ret_type(*)(__VA_ARGS__))GetRealLibcAddress(#func); \
+ } \
+ CHECK(real_##func);
+
+// --------------- sanitizer_libc.h
+uptr internal_mmap(void *addr, uptr length, int prot, int flags, int fd,
+ OFF_T offset) {
+ CHECK(&__mmap);
+ return (uptr)__mmap(addr, length, prot, flags, fd, 0, offset);
+}
+
+uptr internal_munmap(void *addr, uptr length) {
+ DEFINE__REAL(int, munmap, void *a, uptr b);
+ return _REAL(munmap, addr, length);
+}
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+ DEFINE__REAL(int, mprotect, void *a, uptr b, int c);
+ return _REAL(mprotect, addr, length, prot);
+}
+
+uptr internal_close(fd_t fd) {
+ CHECK(&_sys_close);
+ return _sys_close(fd);
+}
+
+uptr internal_open(const char *filename, int flags) {
+ CHECK(&_sys_open);
+ return _sys_open(filename, flags);
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+ CHECK(&_sys_open);
+ return _sys_open(filename, flags, mode);
+}
+
+uptr internal_read(fd_t fd, void *buf, uptr count) {
+ sptr res;
+ CHECK(&_sys_read);
+ HANDLE_EINTR(res, (sptr)_sys_read(fd, buf, (size_t)count));
+ return res;
+}
+
+uptr internal_write(fd_t fd, const void *buf, uptr count) {
+ sptr res;
+ CHECK(&_sys_write);
+ HANDLE_EINTR(res, (sptr)_sys_write(fd, buf, count));
+ return res;
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ sptr res;
+ CHECK(&__ftruncate);
+ HANDLE_EINTR(res, __ftruncate(fd, 0, (s64)size));
+ return res;
+}
+
+uptr internal_stat(const char *path, void *buf) {
+ DEFINE__REAL(int, __stat50, const char *a, void *b);
+ return _REAL(__stat50, path, buf);
+}
+
+uptr internal_lstat(const char *path, void *buf) {
+ DEFINE__REAL(int, __lstat50, const char *a, void *b);
+ return _REAL(__lstat50, path, buf);
+}
+
+uptr internal_fstat(fd_t fd, void *buf) {
+ DEFINE__REAL(int, __fstat50, int a, void *b);
+ return _REAL(__fstat50, fd, buf);
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+uptr internal_dup(int oldfd) {
+ DEFINE__REAL(int, dup, int a);
+ return _REAL(dup, oldfd);
+}
+
+uptr internal_dup2(int oldfd, int newfd) {
+ DEFINE__REAL(int, dup2, int a, int b);
+ return _REAL(dup2, oldfd, newfd);
+}
+
+uptr internal_readlink(const char *path, char *buf, uptr bufsize) {
+ CHECK(&_sys_readlink);
+ return (uptr)_sys_readlink(path, buf, bufsize);
+}
+
+uptr internal_unlink(const char *path) {
+ DEFINE__REAL(int, unlink, const char *a);
+ return _REAL(unlink, path);
+}
+
+uptr internal_rename(const char *oldpath, const char *newpath) {
+ DEFINE__REAL(int, rename, const char *a, const char *b);
+ return _REAL(rename, oldpath, newpath);
+}
+
+uptr internal_sched_yield() {
+ CHECK(&_sys_sched_yield);
+ return _sys_sched_yield();
+}
+
+void internal__exit(int exitcode) {
+ DEFINE__REAL(void, _exit, int a);
+ _REAL(_exit, exitcode);
+ Die(); // Unreachable.
+}
+
+unsigned int internal_sleep(unsigned int seconds) {
+ struct timespec ts;
+ ts.tv_sec = seconds;
+ ts.tv_nsec = 0;
+ CHECK(&_sys___nanosleep50);
+ int res = _sys___nanosleep50(&ts, &ts);
+ if (res)
+ return ts.tv_sec;
+ return 0;
+}
+
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]) {
+ CHECK(&_sys_execve);
+ return _sys_execve(filename, argv, envp);
+}
+
+tid_t GetTid() {
+ DEFINE__REAL(int, _lwp_self);
+ return _REAL(_lwp_self);
+}
+
+int TgKill(pid_t pid, tid_t tid, int sig) {
+ DEFINE__REAL(int, _lwp_kill, int a, int b);
+ (void)pid;
+ return _REAL(_lwp_kill, tid, sig);
+}
+
+u64 NanoTime() {
+ timeval tv;
+ DEFINE__REAL(int, __gettimeofday50, void *a, void *b);
+ internal_memset(&tv, 0, sizeof(tv));
+ _REAL(__gettimeofday50, &tv, 0);
+ return (u64)tv.tv_sec * 1000 * 1000 * 1000 + tv.tv_usec * 1000;
+}
+
+uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
+ DEFINE__REAL(int, __clock_gettime50, __sanitizer_clockid_t a, void *b);
+ return _REAL(__clock_gettime50, clk_id, tp);
+}
+
+uptr internal_ptrace(int request, int pid, void *addr, int data) {
+ DEFINE__REAL(int, ptrace, int a, int b, void *c, int d);
+ return _REAL(ptrace, request, pid, addr, data);
+}
+
+uptr internal_waitpid(int pid, int *status, int options) {
+ CHECK(&_sys___wait450);
+ return _sys___wait450(pid, status, options, 0 /* rusage */);
+}
+
+uptr internal_getpid() {
+ DEFINE__REAL(int, getpid);
+ return _REAL(getpid);
+}
+
+uptr internal_getppid() {
+ DEFINE__REAL(int, getppid);
+ return _REAL(getppid);
+}
+
+uptr internal_getdents(fd_t fd, void *dirp, unsigned int count) {
+ DEFINE__REAL(int, __getdents30, int a, void *b, size_t c);
+ return _REAL(__getdents30, fd, dirp, count);
+}
+
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence) {
+ CHECK(&__lseek);
+ return __lseek(fd, 0, offset, whence);
+}
+
+uptr internal_prctl(int option, uptr arg2, uptr arg3, uptr arg4, uptr arg5) {
+ Printf("internal_prctl not implemented for NetBSD");
+ Die();
+ return 0;
+}
+
+uptr internal_sigaltstack(const void *ss, void *oss) {
+ DEFINE__REAL(int, __sigaltstack14, const void *a, void *b);
+ return _REAL(__sigaltstack14, ss, oss);
+}
+
+int internal_fork() {
+ CHECK(&__fork);
+ return __fork();
+}
+
+int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
+ uptr *oldlenp, const void *newp, uptr newlen) {
+ CHECK(&__sysctl);
+ return __sysctl(name, namelen, oldp, (size_t *)oldlenp, newp, (size_t)newlen);
+}
+
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen) {
+ DEFINE__REAL(int, sysctlbyname, const char *a, void *b, size_t *c,
+ const void *d, size_t e);
+ return _REAL(sysctlbyname, sname, oldp, (size_t *)oldlenp, newp,
+ (size_t)newlen);
+}
+
+uptr internal_sigprocmask(int how, __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ CHECK(&_sys___sigprocmask14);
+ return _sys___sigprocmask14(how, set, oldset);
+}
+
+void internal_sigfillset(__sanitizer_sigset_t *set) {
+ DEFINE__REAL(int, __sigfillset14, const void *a);
+ (void)_REAL(__sigfillset14, set);
+}
+
+void internal_sigemptyset(__sanitizer_sigset_t *set) {
+ DEFINE__REAL(int, __sigemptyset14, const void *a);
+ (void)_REAL(__sigemptyset14, set);
+}
+
+void internal_sigdelset(__sanitizer_sigset_t *set, int signo) {
+ DEFINE__REAL(int, __sigdelset14, const void *a, int b);
+ (void)_REAL(__sigdelset14, set, signo);
+}
+
+uptr internal_clone(int (*fn)(void *), void *child_stack, int flags,
+ void *arg) {
+ DEFINE__REAL(int, clone, int (*a)(void *b), void *c, int d, void *e);
+
+ return _REAL(clone, fn, child_stack, flags, arg);
+}
+
+} // namespace __sanitizer
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_openbsd.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_openbsd.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_openbsd.cc (revision 351984)
@@ -0,0 +1,115 @@
+//===-- sanitizer_openbsd.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements Solaris-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_OPENBSD
+
+#include <stdio.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_procmaps.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/shm.h>
+#include <sys/sysctl.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+extern char **environ;
+
+namespace __sanitizer {
+
+uptr internal_mmap(void *addr, size_t length, int prot, int flags, int fd,
+ u64 offset) {
+ return (uptr)mmap(addr, length, prot, flags, fd, offset);
+}
+
+uptr internal_munmap(void *addr, uptr length) { return munmap(addr, length); }
+
+int internal_mprotect(void *addr, uptr length, int prot) {
+ return mprotect(addr, length, prot);
+}
+
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen) {
+ Printf("internal_sysctlbyname not implemented for OpenBSD");
+ Die();
+ return 0;
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ // On OpenBSD we cannot get the full path
+ struct kinfo_proc kp;
+ uptr kl;
+ const int Mib[4] = {CTL_KERN, KERN_PROC, KERN_PROC_PID, getpid()};
+ if (internal_sysctl(Mib, ARRAY_SIZE(Mib), &kp, &kl, NULL, 0) != -1)
+ return internal_snprintf(buf,
+ (KI_MAXCOMLEN < buf_len ? KI_MAXCOMLEN : buf_len),
+ "%s", kp.p_comm);
+ return (uptr)0;
+}
+
+static void GetArgsAndEnv(char ***argv, char ***envp) {
+ uptr nargv;
+ uptr nenv;
+ int argvmib[4] = {CTL_KERN, KERN_PROC_ARGS, getpid(), KERN_PROC_ARGV};
+ int envmib[4] = {CTL_KERN, KERN_PROC_ARGS, getpid(), KERN_PROC_ENV};
+ if (internal_sysctl(argvmib, 4, NULL, &nargv, NULL, 0) == -1) {
+ Printf("sysctl KERN_PROC_NARGV failed\n");
+ Die();
+ }
+ if (internal_sysctl(envmib, 4, NULL, &nenv, NULL, 0) == -1) {
+ Printf("sysctl KERN_PROC_NENV failed\n");
+ Die();
+ }
+ if (internal_sysctl(argvmib, 4, &argv, &nargv, NULL, 0) == -1) {
+ Printf("sysctl KERN_PROC_ARGV failed\n");
+ Die();
+ }
+ if (internal_sysctl(envmib, 4, &envp, &nenv, NULL, 0) == -1) {
+ Printf("sysctl KERN_PROC_ENV failed\n");
+ Die();
+ }
+}
+
+char **GetArgv() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ return argv;
+}
+
+char **GetEnviron() {
+ char **argv, **envp;
+ GetArgsAndEnv(&argv, &envp);
+ return envp;
+}
+
+void ReExec() {
+ UNIMPLEMENTED();
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_OPENBSD
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_openbsd.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_persistent_allocator.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_persistent_allocator.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_persistent_allocator.cc (revision 351984)
@@ -0,0 +1,18 @@
+//===-- sanitizer_persistent_allocator.cc -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+PersistentAllocator thePersistentAllocator;
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_persistent_allocator.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_persistent_allocator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_persistent_allocator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_persistent_allocator.h (revision 351984)
@@ -0,0 +1,71 @@
+//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// A fast memory allocator that does not support free() nor realloc().
+// All allocations are forever.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H
+#define SANITIZER_PERSISTENT_ALLOCATOR_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+class PersistentAllocator {
+ public:
+ void *alloc(uptr size);
+
+ private:
+ void *tryAlloc(uptr size);
+ StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator.
+ atomic_uintptr_t region_pos; // Region allocator for Node's.
+ atomic_uintptr_t region_end;
+};
+
+inline void *PersistentAllocator::tryAlloc(uptr size) {
+ // Optimisic lock-free allocation, essentially try to bump the region ptr.
+ for (;;) {
+ uptr cmp = atomic_load(&region_pos, memory_order_acquire);
+ uptr end = atomic_load(&region_end, memory_order_acquire);
+ if (cmp == 0 || cmp + size > end) return nullptr;
+ if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
+ memory_order_acquire))
+ return (void *)cmp;
+ }
+}
+
+inline void *PersistentAllocator::alloc(uptr size) {
+ // First, try to allocate optimisitically.
+ void *s = tryAlloc(size);
+ if (s) return s;
+ // If failed, lock, retry and alloc new superblock.
+ SpinMutexLock l(&mtx);
+ for (;;) {
+ s = tryAlloc(size);
+ if (s) return s;
+ atomic_store(&region_pos, 0, memory_order_relaxed);
+ uptr allocsz = 64 * 1024;
+ if (allocsz < size) allocsz = size;
+ uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+ atomic_store(&region_end, mem + allocsz, memory_order_release);
+ atomic_store(&region_pos, mem, memory_order_release);
+ }
+}
+
+extern PersistentAllocator thePersistentAllocator;
+inline void *PersistentAlloc(uptr sz) {
+ return thePersistentAllocator.alloc(sz);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_PERSISTENT_ALLOCATOR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_persistent_allocator.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_placement_new.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_placement_new.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_placement_new.h (revision 351984)
@@ -0,0 +1,24 @@
+//===-- sanitizer_placement_new.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//
+// The file provides 'placement new'.
+// Do not include it into header files, only into source files.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PLACEMENT_NEW_H
+#define SANITIZER_PLACEMENT_NEW_H
+
+#include "sanitizer_internal_defs.h"
+
+inline void *operator new(__sanitizer::operator_new_size_type sz, void *p) {
+ return p;
+}
+
+#endif // SANITIZER_PLACEMENT_NEW_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform.h (revision 351984)
@@ -0,0 +1,358 @@
+//===-- sanitizer_platform.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common platform macros.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_H
+#define SANITIZER_PLATFORM_H
+
+#if !defined(__linux__) && !defined(__FreeBSD__) && !defined(__NetBSD__) && \
+ !defined(__OpenBSD__) && !defined(__APPLE__) && !defined(_WIN32) && \
+ !defined(__Fuchsia__) && !defined(__rtems__) && \
+ !(defined(__sun__) && defined(__svr4__))
+# error "This operating system is not supported"
+#endif
+
+#if defined(__linux__)
+# define SANITIZER_LINUX 1
+#else
+# define SANITIZER_LINUX 0
+#endif
+
+#if defined(__FreeBSD__)
+# define SANITIZER_FREEBSD 1
+#else
+# define SANITIZER_FREEBSD 0
+#endif
+
+#if defined(__NetBSD__)
+# define SANITIZER_NETBSD 1
+#else
+# define SANITIZER_NETBSD 0
+#endif
+
+#if defined(__OpenBSD__)
+# define SANITIZER_OPENBSD 1
+#else
+# define SANITIZER_OPENBSD 0
+#endif
+
+#if defined(__sun__) && defined(__svr4__)
+# define SANITIZER_SOLARIS 1
+#else
+# define SANITIZER_SOLARIS 0
+#endif
+
+#if defined(__APPLE__)
+# define SANITIZER_MAC 1
+# include <TargetConditionals.h>
+# if TARGET_OS_IPHONE
+# define SANITIZER_IOS 1
+# else
+# define SANITIZER_IOS 0
+# endif
+# if TARGET_OS_SIMULATOR
+# define SANITIZER_IOSSIM 1
+# else
+# define SANITIZER_IOSSIM 0
+# endif
+#else
+# define SANITIZER_MAC 0
+# define SANITIZER_IOS 0
+# define SANITIZER_IOSSIM 0
+#endif
+
+#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_WATCH
+# define SANITIZER_WATCHOS 1
+#else
+# define SANITIZER_WATCHOS 0
+#endif
+
+#if defined(__APPLE__) && TARGET_OS_IPHONE && TARGET_OS_TV
+# define SANITIZER_TVOS 1
+#else
+# define SANITIZER_TVOS 0
+#endif
+
+#if defined(_WIN32)
+# define SANITIZER_WINDOWS 1
+#else
+# define SANITIZER_WINDOWS 0
+#endif
+
+#if defined(_WIN64)
+# define SANITIZER_WINDOWS64 1
+#else
+# define SANITIZER_WINDOWS64 0
+#endif
+
+#if defined(__ANDROID__)
+# define SANITIZER_ANDROID 1
+#else
+# define SANITIZER_ANDROID 0
+#endif
+
+#if defined(__Fuchsia__)
+# define SANITIZER_FUCHSIA 1
+#else
+# define SANITIZER_FUCHSIA 0
+#endif
+
+#if defined(__rtems__)
+# define SANITIZER_RTEMS 1
+#else
+# define SANITIZER_RTEMS 0
+#endif
+
+#define SANITIZER_POSIX \
+ (SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_MAC || \
+ SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_SOLARIS)
+
+#if __LP64__ || defined(_WIN64)
+# define SANITIZER_WORDSIZE 64
+#else
+# define SANITIZER_WORDSIZE 32
+#endif
+
+#if SANITIZER_WORDSIZE == 64
+# define FIRST_32_SECOND_64(a, b) (b)
+#else
+# define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#if defined(__x86_64__) && !defined(_LP64)
+# define SANITIZER_X32 1
+#else
+# define SANITIZER_X32 0
+#endif
+
+#if defined(__mips__)
+# define SANITIZER_MIPS 1
+# if defined(__mips64)
+# define SANITIZER_MIPS32 0
+# define SANITIZER_MIPS64 1
+# else
+# define SANITIZER_MIPS32 1
+# define SANITIZER_MIPS64 0
+# endif
+#else
+# define SANITIZER_MIPS 0
+# define SANITIZER_MIPS32 0
+# define SANITIZER_MIPS64 0
+#endif
+
+#if defined(__s390__)
+# define SANITIZER_S390 1
+# if defined(__s390x__)
+# define SANITIZER_S390_31 0
+# define SANITIZER_S390_64 1
+# else
+# define SANITIZER_S390_31 1
+# define SANITIZER_S390_64 0
+# endif
+#else
+# define SANITIZER_S390 0
+# define SANITIZER_S390_31 0
+# define SANITIZER_S390_64 0
+#endif
+
+#if defined(__powerpc__)
+# define SANITIZER_PPC 1
+# if defined(__powerpc64__)
+# define SANITIZER_PPC32 0
+# define SANITIZER_PPC64 1
+// 64-bit PPC has two ABIs (v1 and v2). The old powerpc64 target is
+// big-endian, and uses v1 ABI (known for its function descriptors),
+// while the new powerpc64le target is little-endian and uses v2.
+// In theory, you could convince gcc to compile for their evil twins
+// (eg. big-endian v2), but you won't find such combinations in the wild
+// (it'd require bootstrapping a whole system, which would be quite painful
+// - there's no target triple for that). LLVM doesn't support them either.
+# if _CALL_ELF == 2
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 1
+# else
+# define SANITIZER_PPC64V1 1
+# define SANITIZER_PPC64V2 0
+# endif
+# else
+# define SANITIZER_PPC32 1
+# define SANITIZER_PPC64 0
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 0
+# endif
+#else
+# define SANITIZER_PPC 0
+# define SANITIZER_PPC32 0
+# define SANITIZER_PPC64 0
+# define SANITIZER_PPC64V1 0
+# define SANITIZER_PPC64V2 0
+#endif
+
+#if defined(__arm__)
+# define SANITIZER_ARM 1
+#else
+# define SANITIZER_ARM 0
+#endif
+
+#if SANITIZER_SOLARIS && SANITIZER_WORDSIZE == 32
+# define SANITIZER_SOLARIS32 1
+#else
+# define SANITIZER_SOLARIS32 0
+#endif
+
+#if defined(__myriad2__)
+# define SANITIZER_MYRIAD2 1
+#else
+# define SANITIZER_MYRIAD2 0
+#endif
+
+// By default we allow to use SizeClassAllocator64 on 64-bit platform.
+// But in some cases (e.g. AArch64's 39-bit address space) SizeClassAllocator64
+// does not work well and we need to fallback to SizeClassAllocator32.
+// For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
+// change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
+#ifndef SANITIZER_CAN_USE_ALLOCATOR64
+# if (SANITIZER_ANDROID && defined(__aarch64__)) || SANITIZER_FUCHSIA
+# define SANITIZER_CAN_USE_ALLOCATOR64 1
+# elif defined(__mips64) || defined(__aarch64__)
+# define SANITIZER_CAN_USE_ALLOCATOR64 0
+# else
+# define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)
+# endif
+#endif
+
+// The range of addresses which can be returned my mmap.
+// FIXME: this value should be different on different platforms. Larger values
+// will still work but will consume more memory for TwoLevelByteMap.
+#if defined(__mips__)
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 40)
+#elif defined(__aarch64__)
+# if SANITIZER_MAC
+// Darwin iOS/ARM64 has a 36-bit VMA, 64GiB VM
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 36)
+# else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
+# endif
+#elif defined(__sparc__)
+#define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 52)
+#else
+# define SANITIZER_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// Whether the addresses are sign-extended from the VMA range to the word.
+// The SPARC64 Linux port implements this to split the VMA space into two
+// non-contiguous halves with a huge hole in the middle.
+#if defined(__sparc__) && SANITIZER_WORDSIZE == 64
+#define SANITIZER_SIGN_EXTENDED_ADDRESSES 1
+#else
+#define SANITIZER_SIGN_EXTENDED_ADDRESSES 0
+#endif
+
+// The AArch64 linux port uses the canonical syscall set as mandated by
+// the upstream linux community for all new ports. Other ports may still
+// use legacy syscalls.
+#ifndef SANITIZER_USES_CANONICAL_LINUX_SYSCALLS
+# if defined(__aarch64__) && SANITIZER_LINUX
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 1
+# else
+# define SANITIZER_USES_CANONICAL_LINUX_SYSCALLS 0
+# endif
+#endif
+
+// udi16 syscalls can only be used when the following conditions are
+// met:
+// * target is one of arm32, x86-32, sparc32, sh or m68k
+// * libc version is libc5, glibc-2.0, glibc-2.1 or glibc-2.2 to 2.15
+// built against > linux-2.2 kernel headers
+// Since we don't want to include libc headers here, we check the
+// target only.
+#if defined(__arm__) || SANITIZER_X32 || defined(__sparc__)
+#define SANITIZER_USES_UID16_SYSCALLS 1
+#else
+#define SANITIZER_USES_UID16_SYSCALLS 0
+#endif
+
+#if defined(__mips__)
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 10)
+#else
+# define SANITIZER_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+#endif
+
+/// \macro MSC_PREREQ
+/// \brief Is the compiler MSVC of at least the specified version?
+/// The common \param version values to check for are:
+/// * 1800: Microsoft Visual Studio 2013 / 12.0
+/// * 1900: Microsoft Visual Studio 2015 / 14.0
+#ifdef _MSC_VER
+# define MSC_PREREQ(version) (_MSC_VER >= (version))
+#else
+# define MSC_PREREQ(version) 0
+#endif
+
+#if SANITIZER_MAC && !(defined(__arm64__) && SANITIZER_IOS)
+# define SANITIZER_NON_UNIQUE_TYPEINFO 0
+#else
+# define SANITIZER_NON_UNIQUE_TYPEINFO 1
+#endif
+
+// On linux, some architectures had an ABI transition from 64-bit long double
+// (ie. same as double) to 128-bit long double. On those, glibc symbols
+// involving long doubles come in two versions, and we need to pass the
+// correct one to dlvsym when intercepting them.
+#if SANITIZER_LINUX && (SANITIZER_S390 || SANITIZER_PPC32 || SANITIZER_PPC64V1)
+#define SANITIZER_NLDBL_VERSION "GLIBC_2.4"
+#endif
+
+#if SANITIZER_GO == 0
+# define SANITIZER_GO 0
+#endif
+
+// On PowerPC and ARM Thumb, calling pthread_exit() causes LSan to detect leaks.
+// pthread_exit() performs unwinding that leads to dlopen'ing libgcc_s.so.
+// dlopen mallocs "libgcc_s.so" string which confuses LSan, it fails to realize
+// that this allocation happens in dynamic linker and should be ignored.
+#if SANITIZER_PPC || defined(__thumb__)
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 1
+#else
+# define SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT 0
+#endif
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+# define SANITIZER_MADVISE_DONTNEED MADV_FREE
+#else
+# define SANITIZER_MADVISE_DONTNEED MADV_DONTNEED
+#endif
+
+// Older gcc have issues aligning to a constexpr, and require an integer.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
+#if defined(__powerpc__) || defined(__powerpc64__)
+# define SANITIZER_CACHE_LINE_SIZE 128
+#else
+# define SANITIZER_CACHE_LINE_SIZE 64
+#endif
+
+// Enable offline markup symbolizer for Fuchsia and RTEMS.
+#if SANITIZER_FUCHSIA || SANITIZER_RTEMS
+#define SANITIZER_SYMBOLIZER_MARKUP 1
+#else
+#define SANITIZER_SYMBOLIZER_MARKUP 0
+#endif
+
+// Enable ability to support sanitizer initialization that is
+// compatible with the sanitizer library being loaded via
+// `dlopen()`.
+#if SANITIZER_MAC
+#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 1
+#else
+#define SANITIZER_SUPPORTS_INIT_FOR_DLOPEN 0
+#endif
+
+#endif // SANITIZER_PLATFORM_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_interceptors.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_interceptors.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_interceptors.h (revision 351984)
@@ -0,0 +1,569 @@
+//===-- sanitizer_platform_interceptors.h -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines macro telling whether sanitizer tools can/should intercept
+// given library functions on a given platform.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
+#define SANITIZER_PLATFORM_INTERCEPTORS_H
+
+#include "sanitizer_internal_defs.h"
+
+#if SANITIZER_POSIX
+# define SI_POSIX 1
+#else
+# define SI_POSIX 0
+#endif
+
+#if !SANITIZER_WINDOWS
+# define SI_WINDOWS 0
+#else
+# define SI_WINDOWS 1
+#endif
+
+#if SI_WINDOWS && SI_POSIX
+# error "Windows is not POSIX!"
+#endif
+
+#if SI_POSIX
+# include "sanitizer_platform_limits_freebsd.h"
+# include "sanitizer_platform_limits_netbsd.h"
+# include "sanitizer_platform_limits_openbsd.h"
+# include "sanitizer_platform_limits_posix.h"
+# include "sanitizer_platform_limits_solaris.h"
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+# define SI_LINUX_NOT_ANDROID 1
+#else
+# define SI_LINUX_NOT_ANDROID 0
+#endif
+
+#if SANITIZER_ANDROID
+# define SI_ANDROID 1
+#else
+# define SI_ANDROID 0
+#endif
+
+#if SANITIZER_FREEBSD
+# define SI_FREEBSD 1
+#else
+# define SI_FREEBSD 0
+#endif
+
+#if SANITIZER_NETBSD
+# define SI_NETBSD 1
+#else
+# define SI_NETBSD 0
+#endif
+
+#if SANITIZER_OPENBSD
+#define SI_OPENBSD 1
+#else
+#define SI_OPENBSD 0
+#endif
+
+#if SANITIZER_LINUX
+# define SI_LINUX 1
+#else
+# define SI_LINUX 0
+#endif
+
+#if SANITIZER_MAC
+# define SI_MAC 1
+# define SI_NOT_MAC 0
+#else
+# define SI_MAC 0
+# define SI_NOT_MAC 1
+#endif
+
+#if SANITIZER_IOS
+# define SI_IOS 1
+#else
+# define SI_IOS 0
+#endif
+
+#if SANITIZER_FUCHSIA
+# define SI_NOT_FUCHSIA 0
+#else
+# define SI_NOT_FUCHSIA 1
+#endif
+
+#if SANITIZER_RTEMS
+# define SI_NOT_RTEMS 0
+#else
+# define SI_NOT_RTEMS 1
+#endif
+
+#if SANITIZER_SOLARIS
+# define SI_SOLARIS 1
+#else
+# define SI_SOLARIS 0
+#endif
+
+#if SANITIZER_SOLARIS32
+# define SI_SOLARIS32 1
+#else
+# define SI_SOLARIS32 0
+#endif
+
+#if SANITIZER_POSIX && !SANITIZER_MAC
+# define SI_POSIX_NOT_MAC 1
+#else
+# define SI_POSIX_NOT_MAC 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_FREEBSD
+# define SI_LINUX_NOT_FREEBSD 1
+# else
+# define SI_LINUX_NOT_FREEBSD 0
+#endif
+
+#define SANITIZER_INTERCEPT_STRLEN SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRNLEN (SI_NOT_MAC && SI_NOT_FUCHSIA)
+#define SANITIZER_INTERCEPT_STRCMP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRSTR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCASESTR SI_POSIX
+#define SANITIZER_INTERCEPT_STRTOK SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCHR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRCHRNUL SI_POSIX_NOT_MAC
+#define SANITIZER_INTERCEPT_STRRCHR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRSPN SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_STRPBRK SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_TEXTDOMAIN SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_STRCASECMP SI_POSIX
+#define SANITIZER_INTERCEPT_MEMSET 1
+#define SANITIZER_INTERCEPT_MEMMOVE 1
+#define SANITIZER_INTERCEPT_MEMCPY 1
+#define SANITIZER_INTERCEPT_MEMCMP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_BCMP \
+ SANITIZER_INTERCEPT_MEMCMP && \
+ ((SI_POSIX && _GNU_SOURCE) || SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_STRNDUP SI_POSIX
+#define SANITIZER_INTERCEPT___STRNDUP SI_LINUX_NOT_FREEBSD
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1070
+# define SI_MAC_DEPLOYMENT_BELOW_10_7 1
+#else
+# define SI_MAC_DEPLOYMENT_BELOW_10_7 0
+#endif
+// memmem on Darwin doesn't exist on 10.6
+// FIXME: enable memmem on Windows.
+#define SANITIZER_INTERCEPT_MEMMEM (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_7)
+#define SANITIZER_INTERCEPT_MEMCHR SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_MEMRCHR \
+ (SI_FREEBSD || SI_LINUX || SI_NETBSD || SI_OPENBSD)
+
+#define SANITIZER_INTERCEPT_READ SI_POSIX
+#define SANITIZER_INTERCEPT_PREAD SI_POSIX
+#define SANITIZER_INTERCEPT_WRITE SI_POSIX
+#define SANITIZER_INTERCEPT_PWRITE SI_POSIX
+
+#define SANITIZER_INTERCEPT_FREAD SI_POSIX
+#define SANITIZER_INTERCEPT_FWRITE SI_POSIX
+#define SANITIZER_INTERCEPT_FGETS SI_POSIX
+#define SANITIZER_INTERCEPT_FPUTS SI_POSIX
+#define SANITIZER_INTERCEPT_PUTS SI_POSIX
+
+#define SANITIZER_INTERCEPT_PREAD64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_PWRITE64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+
+#define SANITIZER_INTERCEPT_READV SI_POSIX
+#define SANITIZER_INTERCEPT_WRITEV SI_POSIX
+
+#define SANITIZER_INTERCEPT_PREADV \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_PWRITEV SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PREADV64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PWRITEV64 SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_PRCTL SI_LINUX
+
+#define SANITIZER_INTERCEPT_LOCALTIME_AND_FRIENDS SI_POSIX
+#define SANITIZER_INTERCEPT_STRPTIME SI_POSIX
+
+#define SANITIZER_INTERCEPT_SCANF SI_POSIX
+#define SANITIZER_INTERCEPT_ISOC99_SCANF SI_LINUX_NOT_ANDROID
+
+#ifndef SANITIZER_INTERCEPT_PRINTF
+# define SANITIZER_INTERCEPT_PRINTF SI_POSIX
+# define SANITIZER_INTERCEPT_PRINTF_L (SI_FREEBSD || SI_NETBSD)
+# define SANITIZER_INTERCEPT_ISOC99_PRINTF SI_LINUX_NOT_ANDROID
+#endif
+
+#define SANITIZER_INTERCEPT___PRINTF_CHK \
+ (SANITIZER_INTERCEPT_PRINTF && SI_LINUX_NOT_ANDROID)
+
+#define SANITIZER_INTERCEPT_FREXP SI_NOT_FUCHSIA
+#define SANITIZER_INTERCEPT_FREXPF_FREXPL SI_POSIX
+
+#define SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS SI_POSIX
+#define SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETPWENT \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETGRENT_R \
+ (SI_FREEBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETPWENT SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_GETPWENT_R \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_FGETPWENT_R \
+ (SI_FREEBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SETPWENT \
+ (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CLOCK_GETTIME \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETITIMER SI_POSIX
+#define SANITIZER_INTERCEPT_TIME SI_POSIX
+#define SANITIZER_INTERCEPT_GLOB SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_GLOB64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WAIT SI_POSIX
+#define SANITIZER_INTERCEPT_INET SI_POSIX
+#define SANITIZER_INTERCEPT_PTHREAD_GETSCHEDPARAM (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_GETADDRINFO SI_POSIX
+#define SANITIZER_INTERCEPT_GETNAMEINFO SI_POSIX
+#define SANITIZER_INTERCEPT_GETSOCKNAME SI_POSIX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME SI_POSIX
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2 SI_POSIX && !SI_SOLARIS
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME_R \
+ (SI_FREEBSD || SI_LINUX || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETHOSTBYNAME2_R \
+ (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_GETHOSTBYADDR_R \
+ (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETHOSTENT_R \
+ (SI_FREEBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETSOCKOPT SI_POSIX
+#define SANITIZER_INTERCEPT_ACCEPT SI_POSIX
+#define SANITIZER_INTERCEPT_ACCEPT4 \
+ (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PACCEPT SI_NETBSD
+#define SANITIZER_INTERCEPT_MODF SI_POSIX
+#define SANITIZER_INTERCEPT_RECVMSG SI_POSIX
+#define SANITIZER_INTERCEPT_SENDMSG SI_POSIX
+#define SANITIZER_INTERCEPT_RECVMMSG SI_LINUX
+#define SANITIZER_INTERCEPT_SENDMMSG SI_LINUX
+#define SANITIZER_INTERCEPT_GETPEERNAME SI_POSIX
+#define SANITIZER_INTERCEPT_IOCTL SI_POSIX
+#define SANITIZER_INTERCEPT_INET_ATON SI_POSIX
+#define SANITIZER_INTERCEPT_SYSINFO SI_LINUX
+#define SANITIZER_INTERCEPT_READDIR SI_POSIX
+#define SANITIZER_INTERCEPT_READDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#if SI_LINUX_NOT_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__))
+#define SANITIZER_INTERCEPT_PTRACE 1
+#else
+#define SANITIZER_INTERCEPT_PTRACE 0
+#endif
+#define SANITIZER_INTERCEPT_SETLOCALE SI_POSIX
+#define SANITIZER_INTERCEPT_GETCWD SI_POSIX
+#define SANITIZER_INTERCEPT_GET_CURRENT_DIR_NAME SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STRTOIMAX SI_POSIX
+#define SANITIZER_INTERCEPT_MBSTOWCS SI_POSIX
+#define SANITIZER_INTERCEPT_MBSNRTOWCS \
+ (SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCSTOMBS SI_POSIX
+#define SANITIZER_INTERCEPT_STRXFRM SI_POSIX
+#define SANITIZER_INTERCEPT___STRXFRM_L SI_LINUX
+#define SANITIZER_INTERCEPT_WCSXFRM SI_POSIX
+#define SANITIZER_INTERCEPT___WCSXFRM_L SI_LINUX
+#define SANITIZER_INTERCEPT_WCSNRTOMBS \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCRTOMB \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_WCTOMB \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_TCGETATTR SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_REALPATH SI_POSIX
+#define SANITIZER_INTERCEPT_CANONICALIZE_FILE_NAME \
+ (SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CONFSTR \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SCHED_GETAFFINITY SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCHED_GETPARAM SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_STRERROR SI_POSIX
+#define SANITIZER_INTERCEPT_STRERROR_R SI_POSIX
+#define SANITIZER_INTERCEPT_XPG_STRERROR_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SCANDIR \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SCANDIR64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_GETGROUPS SI_POSIX
+#define SANITIZER_INTERCEPT_POLL SI_POSIX
+#define SANITIZER_INTERCEPT_PPOLL SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_WORDEXP \
+ (SI_FREEBSD || SI_NETBSD || (SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SIGWAIT SI_POSIX
+#define SANITIZER_INTERCEPT_SIGWAITINFO SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_SIGTIMEDWAIT SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_SIGSETOPS \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_SIGPENDING SI_POSIX
+#define SANITIZER_INTERCEPT_SIGPROCMASK SI_POSIX
+#define SANITIZER_INTERCEPT_PTHREAD_SIGMASK SI_POSIX
+#define SANITIZER_INTERCEPT_BACKTRACE \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_GETMNTENT SI_LINUX
+#define SANITIZER_INTERCEPT_GETMNTENT_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_STATFS \
+ (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_STATFS64 \
+ ((SI_MAC && !SI_IOS) || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_STATVFS \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_STATVFS64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_INITGROUPS SI_POSIX
+#define SANITIZER_INTERCEPT_ETHER_NTOA_ATON (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_ETHER_HOST \
+ (SI_FREEBSD || SI_MAC || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_ETHER_R (SI_FREEBSD || SI_LINUX_NOT_ANDROID)
+#define SANITIZER_INTERCEPT_SHMCTL \
+ (SI_NETBSD || SI_OPENBSD || SI_SOLARIS || \
+ ((SI_FREEBSD || SI_LINUX_NOT_ANDROID) && \
+ SANITIZER_WORDSIZE == 64)) // NOLINT
+#define SANITIZER_INTERCEPT_RANDOM_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET SI_POSIX
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETINHERITSCHED \
+ (SI_FREEBSD || SI_NETBSD || SI_MAC || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GETAFFINITY_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_ATTR_GET_SCHED (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPSHARED \
+ (SI_POSIX && !SI_NETBSD && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETTYPE (SI_POSIX && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPROTOCOL \
+ (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETPRIOCEILING \
+ (SI_MAC || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST \
+ (SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEXATTR_GETROBUST_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETPSHARED \
+ (SI_POSIX && !SI_NETBSD && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_RWLOCKATTR_GETKIND_NP SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETPSHARED \
+ (SI_POSIX && !SI_NETBSD && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_PTHREAD_CONDATTR_GETCLOCK \
+ (SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_BARRIERATTR_GETPSHARED \
+ (SI_LINUX_NOT_ANDROID && !SI_NETBSD && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_THR_EXIT SI_FREEBSD
+#define SANITIZER_INTERCEPT_TMPNAM SI_POSIX
+#define SANITIZER_INTERCEPT_TMPNAM_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_TTYNAME SI_POSIX
+#define SANITIZER_INTERCEPT_TTYNAME_R SI_POSIX
+#define SANITIZER_INTERCEPT_TEMPNAM SI_POSIX
+#define SANITIZER_INTERCEPT_SINCOS SI_LINUX || SI_SOLARIS
+#define SANITIZER_INTERCEPT_REMQUO SI_POSIX
+#define SANITIZER_INTERCEPT_REMQUOL (SI_POSIX && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_LGAMMA SI_POSIX
+#define SANITIZER_INTERCEPT_LGAMMAL (SI_POSIX && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_LGAMMA_R (SI_FREEBSD || SI_LINUX || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_LGAMMAL_R SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_DRAND48_R SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_RAND_R \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_LINUX_NOT_ANDROID || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_ICONV \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_TIMES SI_POSIX
+
+// FIXME: getline seems to be available on OSX 10.7
+#define SANITIZER_INTERCEPT_GETLINE \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPT__EXIT \
+ (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_MAC || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPT_PTHREAD_MUTEX SI_POSIX
+#define SANITIZER_INTERCEPT___PTHREAD_MUTEX SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___LIBC_MUTEX SI_NETBSD
+#define SANITIZER_INTERCEPT_PTHREAD_SETNAME_NP \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_GETNAME_NP \
+ (SI_FREEBSD || SI_NETBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPT_TLS_GET_ADDR \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPT_LISTXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETXATTR SI_LINUX
+#define SANITIZER_INTERCEPT_GETRESID SI_LINUX
+#define SANITIZER_INTERCEPT_GETIFADDRS \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_IF_INDEXTONAME \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \
+ SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CAPGET SI_LINUX_NOT_ANDROID
+#if SI_LINUX && defined(__arm__)
+#define SANITIZER_INTERCEPT_AEABI_MEM 1
+#else
+#define SANITIZER_INTERCEPT_AEABI_MEM 0
+#endif
+#define SANITIZER_INTERCEPT___BZERO SI_MAC || SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_BZERO SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FTIME \
+ (!SI_FREEBSD && !SI_NETBSD && !SI_OPENBSD && SI_POSIX)
+#define SANITIZER_INTERCEPT_XDR SI_LINUX_NOT_ANDROID || SI_SOLARIS
+#define SANITIZER_INTERCEPT_TSEARCH \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_LIBIO_INTERNALS SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FOPEN SI_POSIX
+#define SANITIZER_INTERCEPT_FOPEN64 SI_LINUX_NOT_ANDROID || SI_SOLARIS32
+#define SANITIZER_INTERCEPT_OPEN_MEMSTREAM \
+ (SI_LINUX_NOT_ANDROID || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_OBSTACK SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_FFLUSH SI_POSIX
+#define SANITIZER_INTERCEPT_FCLOSE SI_POSIX
+
+#ifndef SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+#define SANITIZER_INTERCEPT_DLOPEN_DLCLOSE \
+ (SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_LINUX_NOT_ANDROID || SI_MAC || \
+ SI_SOLARIS)
+#endif
+
+#define SANITIZER_INTERCEPT_GETPASS \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_NETBSD || SI_OPENBSD)
+#define SANITIZER_INTERCEPT_TIMERFD SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_MLOCKX SI_POSIX
+#define SANITIZER_INTERCEPT_FOPENCOOKIE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_SEM \
+ (SI_LINUX || SI_FREEBSD || SI_NETBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PTHREAD_SETCANCEL SI_POSIX
+#define SANITIZER_INTERCEPT_MINCORE \
+ (SI_LINUX || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_PROCESS_VM_READV SI_LINUX
+#define SANITIZER_INTERCEPT_CTERMID \
+ (SI_LINUX || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_CTERMID_R (SI_MAC || SI_FREEBSD || SI_SOLARIS)
+
+#define SANITIZER_INTERCEPTOR_HOOKS \
+ (SI_LINUX || SI_MAC || SI_WINDOWS || SI_NETBSD)
+#define SANITIZER_INTERCEPT_RECV_RECVFROM SI_POSIX
+#define SANITIZER_INTERCEPT_SEND_SENDTO SI_POSIX
+#define SANITIZER_INTERCEPT_EVENTFD_READ_WRITE SI_LINUX
+
+#define SANITIZER_INTERCEPT_STAT \
+ (SI_FREEBSD || SI_MAC || SI_ANDROID || SI_NETBSD || SI_OPENBSD || SI_SOLARIS)
+#define SANITIZER_INTERCEPT_LSTAT (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT___XSTAT (!SANITIZER_INTERCEPT_STAT && SI_POSIX)
+#define SANITIZER_INTERCEPT___XSTAT64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT___LXSTAT SANITIZER_INTERCEPT___XSTAT
+#define SANITIZER_INTERCEPT___LXSTAT64 SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_UTMP \
+ (SI_POSIX && !SI_MAC && !SI_FREEBSD && !SI_NETBSD)
+#define SANITIZER_INTERCEPT_UTMPX \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD)
+
+#define SANITIZER_INTERCEPT_GETLOADAVG \
+ (SI_LINUX_NOT_ANDROID || SI_MAC || SI_FREEBSD || SI_NETBSD || SI_OPENBSD)
+
+#define SANITIZER_INTERCEPT_MMAP SI_POSIX
+#define SANITIZER_INTERCEPT_MMAP64 SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
+ SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_MEMALIGN \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_PVALLOC \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
+ SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_CFREE \
+ (!SI_FREEBSD && !SI_MAC && !SI_NETBSD && !SI_OPENBSD && SI_NOT_FUCHSIA && \
+ SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_REALLOCARRAY SI_POSIX
+#define SANITIZER_INTERCEPT_ALIGNED_ALLOC (!SI_MAC && SI_NOT_RTEMS)
+#define SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE (!SI_MAC && !SI_OPENBSD)
+#define SANITIZER_INTERCEPT_MCHECK_MPROBE SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_WCSCAT SI_POSIX
+#define SANITIZER_INTERCEPT_WCSDUP SI_POSIX
+#define SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION (!SI_WINDOWS && SI_NOT_FUCHSIA)
+#define SANITIZER_INTERCEPT_BSD_SIGNAL SI_ANDROID
+
+#define SANITIZER_INTERCEPT_ACCT (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_USER_FROM_UID SI_NETBSD
+#define SANITIZER_INTERCEPT_UID_FROM_USER SI_NETBSD
+#define SANITIZER_INTERCEPT_GROUP_FROM_GID SI_NETBSD
+#define SANITIZER_INTERCEPT_GID_FROM_GROUP SI_NETBSD
+#define SANITIZER_INTERCEPT_ACCESS (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FACCESSAT (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_GETGROUPLIST (SI_NETBSD || SI_OPENBSD)
+#define SANITIZER_INTERCEPT_STRLCPY \
+ (SI_NETBSD || SI_FREEBSD || SI_OPENBSD || SI_MAC || SI_ANDROID)
+
+#define SANITIZER_INTERCEPT_NAME_TO_HANDLE_AT SI_LINUX_NOT_ANDROID
+#define SANITIZER_INTERCEPT_OPEN_BY_HANDLE_AT SI_LINUX_NOT_ANDROID
+
+#define SANITIZER_INTERCEPT_READLINK SI_POSIX
+#if defined(__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__) && \
+ __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101000
+# define SI_MAC_DEPLOYMENT_BELOW_10_10 1
+#else
+# define SI_MAC_DEPLOYMENT_BELOW_10_10 0
+#endif
+#define SANITIZER_INTERCEPT_READLINKAT \
+ (SI_POSIX && !SI_MAC_DEPLOYMENT_BELOW_10_10)
+
+#define SANITIZER_INTERCEPT_DEVNAME (SI_NETBSD || SI_OPENBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_DEVNAME_R (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FGETLN (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_STRMODE (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_TTYENT SI_NETBSD
+#define SANITIZER_INTERCEPT_PROTOENT SI_NETBSD
+#define SANITIZER_INTERCEPT_NETENT SI_NETBSD
+#define SANITIZER_INTERCEPT_SETVBUF (SI_NETBSD || SI_FREEBSD || \
+ SI_LINUX || SI_MAC)
+#define SANITIZER_INTERCEPT_GETMNTINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
+#define SANITIZER_INTERCEPT_MI_VECTOR_HASH SI_NETBSD
+#define SANITIZER_INTERCEPT_GETVFSSTAT SI_NETBSD
+#define SANITIZER_INTERCEPT_REGEX (SI_NETBSD || SI_FREEBSD || SI_LINUX)
+#define SANITIZER_INTERCEPT_REGEXSUB SI_NETBSD
+#define SANITIZER_INTERCEPT_FTS (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_SYSCTL (SI_NETBSD || SI_FREEBSD || SI_MAC)
+#define SANITIZER_INTERCEPT_ASYSCTL SI_NETBSD
+#define SANITIZER_INTERCEPT_SYSCTLGETMIBINFO SI_NETBSD
+#define SANITIZER_INTERCEPT_NL_LANGINFO (SI_NETBSD || SI_FREEBSD || SI_MAC)
+#define SANITIZER_INTERCEPT_MODCTL SI_NETBSD
+#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
+#define SANITIZER_INTERCEPT_STRTONUM (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FPARSELN SI_NETBSD
+#define SANITIZER_INTERCEPT_STATVFS1 SI_NETBSD
+#define SANITIZER_INTERCEPT_STRTOI SI_NETBSD
+#define SANITIZER_INTERCEPT_CAPSICUM SI_FREEBSD
+#define SANITIZER_INTERCEPT_SHA1 SI_NETBSD
+#define SANITIZER_INTERCEPT_MD4 SI_NETBSD
+#define SANITIZER_INTERCEPT_RMD160 SI_NETBSD
+#define SANITIZER_INTERCEPT_MD5 SI_NETBSD
+#define SANITIZER_INTERCEPT_FSEEK (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_MD2 SI_NETBSD
+#define SANITIZER_INTERCEPT_SHA2 SI_NETBSD
+#define SANITIZER_INTERCEPT_CDB SI_NETBSD
+#define SANITIZER_INTERCEPT_VIS (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_POPEN SI_POSIX
+#define SANITIZER_INTERCEPT_POPENVE SI_NETBSD
+#define SANITIZER_INTERCEPT_PCLOSE SI_POSIX
+#define SANITIZER_INTERCEPT_FUNOPEN (SI_NETBSD || SI_FREEBSD)
+#define SANITIZER_INTERCEPT_FUNOPEN2 SI_NETBSD
+#define SANITIZER_INTERCEPT_GETFSENT (SI_FREEBSD || SI_NETBSD || SI_MAC)
+#define SANITIZER_INTERCEPT_ARC4RANDOM (SI_FREEBSD || SI_NETBSD)
+#define SANITIZER_INTERCEPT_FDEVNAME SI_FREEBSD
+#define SANITIZER_INTERCEPT_GETUSERSHELL (SI_POSIX && !SI_POSIX)
+#define SANITIZER_INTERCEPT_SL_INIT (SI_FREEBSD || SI_NETBSD)
+
+#endif // #ifndef SANITIZER_PLATFORM_INTERCEPTORS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_freebsd.cc (revision 351984)
@@ -0,0 +1,525 @@
+//===-- sanitizer_platform_limits_freebsd.cc ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific FreeBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD
+
+#include <arpa/inet.h>
+#include <dirent.h>
+#include <fts.h>
+#include <fstab.h>
+#include <grp.h>
+#include <limits.h>
+#include <net/if.h>
+#include <netdb.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <regex.h>
+#include <signal.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/capsicum.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <termios.h>
+#include <time.h>
+
+#include <net/route.h>
+#include <sys/mount.h>
+#include <sys/sockio.h>
+#include <sys/socket.h>
+#include <sys/filio.h>
+#include <sys/signal.h>
+#include <sys/timespec.h>
+#include <sys/timeb.h>
+#include <sys/mqueue.h>
+#include <sys/msg.h>
+#include <sys/ipc.h>
+#include <sys/msg.h>
+#include <sys/statvfs.h>
+#include <sys/soundcard.h>
+#include <sys/mtio.h>
+#include <sys/consio.h>
+#include <sys/kbio.h>
+#include <sys/link_elf.h>
+#include <netinet/ip_mroute.h>
+#include <netinet/in.h>
+#include <net/ethernet.h>
+#include <net/ppp_defs.h>
+#include <glob.h>
+#include <stdio.h>
+#include <stringlist.h>
+#include <term.h>
+#include <utmpx.h>
+#include <wchar.h>
+#include <vis.h>
+
+#define _KERNEL // to declare 'shminfo' structure
+# include <sys/shm.h>
+#undef _KERNEL
+
+#undef INLINE // to avoid clashes with sanitizers' definitions
+
+#undef IOC_DIRMASK
+
+# include <utime.h>
+# include <sys/ptrace.h>
+# include <semaphore.h>
+
+#include <ifaddrs.h>
+#include <sys/ucontext.h>
+#include <wordexp.h>
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_freebsd.h"
+
+namespace __sanitizer {
+ unsigned struct_cap_rights_sz = sizeof(cap_rights_t);
+ unsigned struct_utsname_sz = sizeof(struct utsname);
+ unsigned struct_stat_sz = sizeof(struct stat);
+ unsigned struct_rusage_sz = sizeof(struct rusage);
+ unsigned struct_tm_sz = sizeof(struct tm);
+ unsigned struct_passwd_sz = sizeof(struct passwd);
+ unsigned struct_group_sz = sizeof(struct group);
+ unsigned siginfo_t_sz = sizeof(siginfo_t);
+ unsigned struct_sigaction_sz = sizeof(struct sigaction);
+ unsigned struct_itimerval_sz = sizeof(struct itimerval);
+ unsigned pthread_t_sz = sizeof(pthread_t);
+ unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+ unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+ unsigned pid_t_sz = sizeof(pid_t);
+ unsigned timeval_sz = sizeof(timeval);
+ unsigned uid_t_sz = sizeof(uid_t);
+ unsigned gid_t_sz = sizeof(gid_t);
+ unsigned fpos_t_sz = sizeof(fpos_t);
+ unsigned mbstate_t_sz = sizeof(mbstate_t);
+ unsigned sigset_t_sz = sizeof(sigset_t);
+ unsigned struct_timezone_sz = sizeof(struct timezone);
+ unsigned struct_tms_sz = sizeof(struct tms);
+ unsigned struct_sigevent_sz = sizeof(struct sigevent);
+ unsigned struct_sched_param_sz = sizeof(struct sched_param);
+ unsigned struct_statfs_sz = sizeof(struct statfs);
+ unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+ unsigned ucontext_t_sz = sizeof(ucontext_t);
+ unsigned struct_rlimit_sz = sizeof(struct rlimit);
+ unsigned struct_timespec_sz = sizeof(struct timespec);
+ unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+ unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+ unsigned struct_timeb_sz = sizeof(struct timeb);
+ unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
+ unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
+ unsigned struct_statvfs_sz = sizeof(struct statvfs);
+ unsigned struct_shminfo_sz = sizeof(struct shminfo);
+ unsigned struct_shm_info_sz = sizeof(struct shm_info);
+ unsigned struct_regmatch_sz = sizeof(regmatch_t);
+ unsigned struct_regex_sz = sizeof(regex_t);
+ unsigned struct_fstab_sz = sizeof(struct fstab);
+ unsigned struct_FTS_sz = sizeof(FTS);
+ unsigned struct_FTSENT_sz = sizeof(FTSENT);
+ unsigned struct_StringList_sz = sizeof(StringList);
+
+ const uptr sig_ign = (uptr)SIG_IGN;
+ const uptr sig_dfl = (uptr)SIG_DFL;
+ const uptr sig_err = (uptr)SIG_ERR;
+ const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+ int shmctl_ipc_stat = (int)IPC_STAT;
+ int shmctl_ipc_info = (int)IPC_INFO;
+ int shmctl_shm_info = (int)SHM_INFO;
+ int shmctl_shm_stat = (int)SHM_STAT;
+ unsigned struct_utmpx_sz = sizeof(struct utmpx);
+
+ int map_fixed = MAP_FIXED;
+
+ int af_inet = (int)AF_INET;
+ int af_inet6 = (int)AF_INET6;
+
+ uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+ }
+
+ unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
+ int glob_nomatch = GLOB_NOMATCH;
+ int glob_altdirfunc = GLOB_ALTDIRFUNC;
+
+ unsigned path_max = PATH_MAX;
+
+ // ioctl arguments
+ unsigned struct_ifreq_sz = sizeof(struct ifreq);
+ unsigned struct_termios_sz = sizeof(struct termios);
+ unsigned struct_winsize_sz = sizeof(struct winsize);
+#if SOUND_VERSION >= 0x040000
+ unsigned struct_copr_buffer_sz = 0;
+ unsigned struct_copr_debug_buf_sz = 0;
+ unsigned struct_copr_msg_sz = 0;
+#else
+ unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
+ unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
+ unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
+#endif
+ unsigned struct_midi_info_sz = sizeof(struct midi_info);
+ unsigned struct_mtget_sz = sizeof(struct mtget);
+ unsigned struct_mtop_sz = sizeof(struct mtop);
+ unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument);
+ unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec);
+ unsigned struct_synth_info_sz = sizeof(struct synth_info);
+ unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
+ unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
+ unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
+ unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+ const unsigned long __sanitizer_bufsiz = BUFSIZ;
+
+ const unsigned IOCTL_NOT_PRESENT = 0;
+
+ unsigned IOCTL_FIOASYNC = FIOASYNC;
+ unsigned IOCTL_FIOCLEX = FIOCLEX;
+ unsigned IOCTL_FIOGETOWN = FIOGETOWN;
+ unsigned IOCTL_FIONBIO = FIONBIO;
+ unsigned IOCTL_FIONCLEX = FIONCLEX;
+ unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+ unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
+ unsigned IOCTL_SIOCATMARK = SIOCATMARK;
+ unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
+ unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;
+ unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;
+ unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;
+ unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;
+ unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;
+ unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;
+ unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;
+ unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;
+ unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;
+ unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;
+ unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;
+ unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;
+ unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;
+ unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;
+ unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
+ unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
+ unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+ unsigned IOCTL_TIOCCONS = TIOCCONS;
+ unsigned IOCTL_TIOCEXCL = TIOCEXCL;
+ unsigned IOCTL_TIOCGETD = TIOCGETD;
+ unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
+ unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
+ unsigned IOCTL_TIOCMBIC = TIOCMBIC;
+ unsigned IOCTL_TIOCMBIS = TIOCMBIS;
+ unsigned IOCTL_TIOCMGET = TIOCMGET;
+ unsigned IOCTL_TIOCMSET = TIOCMSET;
+ unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+ unsigned IOCTL_TIOCNXCL = TIOCNXCL;
+ unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
+ unsigned IOCTL_TIOCPKT = TIOCPKT;
+ unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
+ unsigned IOCTL_TIOCSETD = TIOCSETD;
+ unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
+ unsigned IOCTL_TIOCSTI = TIOCSTI;
+ unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
+ unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
+ unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
+ unsigned IOCTL_MTIOCGET = MTIOCGET;
+ unsigned IOCTL_MTIOCTOP = MTIOCTOP;
+ unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
+ unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
+ unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
+ unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST;
+ unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET;
+ unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT;
+ unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT;
+ unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED;
+ unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO;
+ unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE;
+ unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC;
+ unsigned IOCTL_SNDCTL_FM_4OP_ENABLE = SNDCTL_FM_4OP_ENABLE;
+ unsigned IOCTL_SNDCTL_FM_LOAD_INSTR = SNDCTL_FM_LOAD_INSTR;
+ unsigned IOCTL_SNDCTL_MIDI_INFO = SNDCTL_MIDI_INFO;
+ unsigned IOCTL_SNDCTL_MIDI_PRETIME = SNDCTL_MIDI_PRETIME;
+ unsigned IOCTL_SNDCTL_SEQ_CTRLRATE = SNDCTL_SEQ_CTRLRATE;
+ unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT = SNDCTL_SEQ_GETINCOUNT;
+ unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT = SNDCTL_SEQ_GETOUTCOUNT;
+ unsigned IOCTL_SNDCTL_SEQ_NRMIDIS = SNDCTL_SEQ_NRMIDIS;
+ unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS = SNDCTL_SEQ_NRSYNTHS;
+ unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND = SNDCTL_SEQ_OUTOFBAND;
+ unsigned IOCTL_SNDCTL_SEQ_PANIC = SNDCTL_SEQ_PANIC;
+ unsigned IOCTL_SNDCTL_SEQ_PERCMODE = SNDCTL_SEQ_PERCMODE;
+ unsigned IOCTL_SNDCTL_SEQ_RESET = SNDCTL_SEQ_RESET;
+ unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES = SNDCTL_SEQ_RESETSAMPLES;
+ unsigned IOCTL_SNDCTL_SEQ_SYNC = SNDCTL_SEQ_SYNC;
+ unsigned IOCTL_SNDCTL_SEQ_TESTMIDI = SNDCTL_SEQ_TESTMIDI;
+ unsigned IOCTL_SNDCTL_SEQ_THRESHOLD = SNDCTL_SEQ_THRESHOLD;
+ unsigned IOCTL_SNDCTL_SYNTH_INFO = SNDCTL_SYNTH_INFO;
+ unsigned IOCTL_SNDCTL_SYNTH_MEMAVL = SNDCTL_SYNTH_MEMAVL;
+ unsigned IOCTL_SNDCTL_TMR_CONTINUE = SNDCTL_TMR_CONTINUE;
+ unsigned IOCTL_SNDCTL_TMR_METRONOME = SNDCTL_TMR_METRONOME;
+ unsigned IOCTL_SNDCTL_TMR_SELECT = SNDCTL_TMR_SELECT;
+ unsigned IOCTL_SNDCTL_TMR_SOURCE = SNDCTL_TMR_SOURCE;
+ unsigned IOCTL_SNDCTL_TMR_START = SNDCTL_TMR_START;
+ unsigned IOCTL_SNDCTL_TMR_STOP = SNDCTL_TMR_STOP;
+ unsigned IOCTL_SNDCTL_TMR_TEMPO = SNDCTL_TMR_TEMPO;
+ unsigned IOCTL_SNDCTL_TMR_TIMEBASE = SNDCTL_TMR_TIMEBASE;
+ unsigned IOCTL_SOUND_MIXER_READ_ALTPCM = SOUND_MIXER_READ_ALTPCM;
+ unsigned IOCTL_SOUND_MIXER_READ_BASS = SOUND_MIXER_READ_BASS;
+ unsigned IOCTL_SOUND_MIXER_READ_CAPS = SOUND_MIXER_READ_CAPS;
+ unsigned IOCTL_SOUND_MIXER_READ_CD = SOUND_MIXER_READ_CD;
+ unsigned IOCTL_SOUND_MIXER_READ_DEVMASK = SOUND_MIXER_READ_DEVMASK;
+ unsigned IOCTL_SOUND_MIXER_READ_ENHANCE = SOUND_MIXER_READ_ENHANCE;
+ unsigned IOCTL_SOUND_MIXER_READ_IGAIN = SOUND_MIXER_READ_IGAIN;
+ unsigned IOCTL_SOUND_MIXER_READ_IMIX = SOUND_MIXER_READ_IMIX;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE = SOUND_MIXER_READ_LINE;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE1 = SOUND_MIXER_READ_LINE1;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE2 = SOUND_MIXER_READ_LINE2;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE3 = SOUND_MIXER_READ_LINE3;
+ unsigned IOCTL_SOUND_MIXER_READ_LOUD = SOUND_MIXER_READ_LOUD;
+ unsigned IOCTL_SOUND_MIXER_READ_MIC = SOUND_MIXER_READ_MIC;
+ unsigned IOCTL_SOUND_MIXER_READ_MUTE = SOUND_MIXER_READ_MUTE;
+ unsigned IOCTL_SOUND_MIXER_READ_OGAIN = SOUND_MIXER_READ_OGAIN;
+ unsigned IOCTL_SOUND_MIXER_READ_PCM = SOUND_MIXER_READ_PCM;
+ unsigned IOCTL_SOUND_MIXER_READ_RECLEV = SOUND_MIXER_READ_RECLEV;
+ unsigned IOCTL_SOUND_MIXER_READ_RECMASK = SOUND_MIXER_READ_RECMASK;
+ unsigned IOCTL_SOUND_MIXER_READ_RECSRC = SOUND_MIXER_READ_RECSRC;
+ unsigned IOCTL_SOUND_MIXER_READ_SPEAKER = SOUND_MIXER_READ_SPEAKER;
+ unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS = SOUND_MIXER_READ_STEREODEVS;
+ unsigned IOCTL_SOUND_MIXER_READ_SYNTH = SOUND_MIXER_READ_SYNTH;
+ unsigned IOCTL_SOUND_MIXER_READ_TREBLE = SOUND_MIXER_READ_TREBLE;
+ unsigned IOCTL_SOUND_MIXER_READ_VOLUME = SOUND_MIXER_READ_VOLUME;
+ unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM = SOUND_MIXER_WRITE_ALTPCM;
+ unsigned IOCTL_SOUND_MIXER_WRITE_BASS = SOUND_MIXER_WRITE_BASS;
+ unsigned IOCTL_SOUND_MIXER_WRITE_CD = SOUND_MIXER_WRITE_CD;
+ unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE = SOUND_MIXER_WRITE_ENHANCE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN = SOUND_MIXER_WRITE_IGAIN;
+ unsigned IOCTL_SOUND_MIXER_WRITE_IMIX = SOUND_MIXER_WRITE_IMIX;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE = SOUND_MIXER_WRITE_LINE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE1 = SOUND_MIXER_WRITE_LINE1;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE2 = SOUND_MIXER_WRITE_LINE2;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE3 = SOUND_MIXER_WRITE_LINE3;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LOUD = SOUND_MIXER_WRITE_LOUD;
+ unsigned IOCTL_SOUND_MIXER_WRITE_MIC = SOUND_MIXER_WRITE_MIC;
+ unsigned IOCTL_SOUND_MIXER_WRITE_MUTE = SOUND_MIXER_WRITE_MUTE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN = SOUND_MIXER_WRITE_OGAIN;
+ unsigned IOCTL_SOUND_MIXER_WRITE_PCM = SOUND_MIXER_WRITE_PCM;
+ unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV = SOUND_MIXER_WRITE_RECLEV;
+ unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC = SOUND_MIXER_WRITE_RECSRC;
+ unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER = SOUND_MIXER_WRITE_SPEAKER;
+ unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH;
+ unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME;
+ unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;
+ unsigned IOCTL_VT_GETMODE = VT_GETMODE;
+ unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;
+ unsigned IOCTL_VT_RELDISP = VT_RELDISP;
+ unsigned IOCTL_VT_SETMODE = VT_SETMODE;
+ unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;
+ unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
+ unsigned IOCTL_KDDISABIO = KDDISABIO;
+ unsigned IOCTL_KDENABIO = KDENABIO;
+ unsigned IOCTL_KDGETLED = KDGETLED;
+ unsigned IOCTL_KDGETMODE = KDGETMODE;
+ unsigned IOCTL_KDGKBMODE = KDGKBMODE;
+ unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
+ unsigned IOCTL_KDMKTONE = KDMKTONE;
+ unsigned IOCTL_KDSETLED = KDSETLED;
+ unsigned IOCTL_KDSETMODE = KDSETMODE;
+ unsigned IOCTL_KDSKBMODE = KDSKBMODE;
+ unsigned IOCTL_KIOCSOUND = KIOCSOUND;
+ unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
+ unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
+
+ const int si_SEGV_MAPERR = SEGV_MAPERR;
+ const int si_SEGV_ACCERR = SEGV_ACCERR;
+ const int unvis_valid = UNVIS_VALID;
+ const int unvis_validpush = UNVIS_VALIDPUSH;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+// There are more undocumented fields in dl_phdr_info that we are not interested
+// in.
+COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_ino);
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+
+CHECK_TYPE_SIZE(wordexp_t);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
+CHECK_SIZE_AND_OFFSET(tm, tm_zone);
+
+CHECK_TYPE_SIZE(ether_addr);
+
+CHECK_TYPE_SIZE(ipc_perm);
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+
+CHECK_TYPE_SIZE(clock_t);
+
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+#undef ifa_dstaddr
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#if HAVE_RPC_XDR_H
+CHECK_TYPE_SIZE(XDR);
+CHECK_SIZE_AND_OFFSET(XDR, x_op);
+CHECK_SIZE_AND_OFFSET(XDR, x_ops);
+CHECK_SIZE_AND_OFFSET(XDR, x_public);
+CHECK_SIZE_AND_OFFSET(XDR, x_private);
+CHECK_SIZE_AND_OFFSET(XDR, x_base);
+CHECK_SIZE_AND_OFFSET(XDR, x_handy);
+COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);
+COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
+COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
+#endif
+
+CHECK_TYPE_SIZE(sem_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_cap_rights_t) >= sizeof(cap_rights_t));
+#endif // SANITIZER_FREEBSD
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_freebsd.h (revision 351984)
@@ -0,0 +1,656 @@
+//===-- sanitizer_platform_limits_freebsd.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific FreeBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_FREEBSD_H
+#define SANITIZER_PLATFORM_LIMITS_FREEBSD_H
+
+#if SANITIZER_FREEBSD
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+#include "sanitizer_platform_limits_posix.h"
+
+// FreeBSD's dlopen() returns a pointer to an Obj_Entry structure that
+// incorporates the map structure.
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ ((link_map*)((handle) == nullptr ? nullptr : ((char*)(handle) + 560)))
+// Get sys/_types.h, because that tells us whether 64-bit inodes are
+// used in struct dirent below.
+#include <sys/_types.h>
+
+namespace __sanitizer {
+ extern unsigned struct_utsname_sz;
+ extern unsigned struct_stat_sz;
+#if defined(__powerpc64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+#else
+ const unsigned struct___old_kernel_stat_sz = 32;
+#endif
+ extern unsigned struct_rusage_sz;
+ extern unsigned siginfo_t_sz;
+ extern unsigned struct_itimerval_sz;
+ extern unsigned pthread_t_sz;
+ extern unsigned pthread_mutex_t_sz;
+ extern unsigned pthread_cond_t_sz;
+ extern unsigned pid_t_sz;
+ extern unsigned timeval_sz;
+ extern unsigned uid_t_sz;
+ extern unsigned gid_t_sz;
+ extern unsigned fpos_t_sz;
+ extern unsigned mbstate_t_sz;
+ extern unsigned struct_timezone_sz;
+ extern unsigned struct_tms_sz;
+ extern unsigned struct_itimerspec_sz;
+ extern unsigned struct_sigevent_sz;
+ extern unsigned struct_sched_param_sz;
+ extern unsigned struct_statfs64_sz;
+ extern unsigned struct_statfs_sz;
+ extern unsigned struct_sockaddr_sz;
+ extern unsigned ucontext_t_sz;
+ extern unsigned struct_rlimit_sz;
+ extern unsigned struct_utimbuf_sz;
+ extern unsigned struct_timespec_sz;
+ extern unsigned struct_regmatch_sz;
+ extern unsigned struct_regex_sz;
+ extern unsigned struct_FTS_sz;
+ extern unsigned struct_FTSENT_sz;
+ extern const int unvis_valid;
+ extern const int unvis_validpush;
+
+ struct __sanitizer_iocb {
+ u64 aio_data;
+ u32 aio_key_or_aio_reserved1; // Simply crazy.
+ u32 aio_reserved1_or_aio_key; // Luckily, we don't need these.
+ u16 aio_lio_opcode;
+ s16 aio_reqprio;
+ u32 aio_fildes;
+ u64 aio_buf;
+ u64 aio_nbytes;
+ s64 aio_offset;
+ u64 aio_reserved2;
+ u64 aio_reserved3;
+ };
+
+ struct __sanitizer_io_event {
+ u64 data;
+ u64 obj;
+ u64 res;
+ u64 res2;
+ };
+
+ const unsigned iocb_cmd_pread = 0;
+ const unsigned iocb_cmd_pwrite = 1;
+ const unsigned iocb_cmd_preadv = 7;
+ const unsigned iocb_cmd_pwritev = 8;
+
+ struct __sanitizer___sysctl_args {
+ int *name;
+ int nlen;
+ void *oldval;
+ uptr *oldlenp;
+ void *newval;
+ uptr newlen;
+ unsigned long ___unused[4];
+ };
+
+ struct __sanitizer_ipc_perm {
+ unsigned int cuid;
+ unsigned int cgid;
+ unsigned int uid;
+ unsigned int gid;
+ unsigned short mode;
+ unsigned short seq;
+ long key;
+ };
+
+ struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz;
+ unsigned int shm_lpid;
+ unsigned int shm_cpid;
+ int shm_nattch;
+ unsigned long shm_atime;
+ unsigned long shm_dtime;
+ unsigned long shm_ctime;
+ };
+
+ extern unsigned struct_msqid_ds_sz;
+ extern unsigned struct_mq_attr_sz;
+ extern unsigned struct_timeb_sz;
+ extern unsigned struct_statvfs_sz;
+
+ struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+ };
+
+ struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+# undef ifa_dstaddr
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+ };
+
+ typedef unsigned __sanitizer_pthread_key_t;
+
+ struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+ long pw_change;
+ char *pw_class;
+ char *pw_gecos;
+ char *pw_dir;
+ char *pw_shell;
+ long pw_expire;
+ int pw_fields;
+ };
+
+ struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+ };
+
+#if defined(__LP64___)
+ typedef long long __sanitizer_time_t;
+#else
+ typedef long __sanitizer_time_t;
+#endif
+
+ typedef long __sanitizer_suseconds_t;
+
+ struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+ };
+
+ struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+ };
+
+ struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+ };
+
+ struct __sanitizer_ether_addr {
+ u8 octet[6];
+ };
+
+ struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
+ const char *tm_zone;
+ };
+
+ struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+ };
+
+ struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+ };
+
+ struct __sanitizer_dirent {
+#if defined(__INO64)
+ unsigned long long d_fileno;
+ unsigned long long d_off;
+#else
+ unsigned int d_fileno;
+#endif
+ unsigned short d_reclen;
+ // more fields that we don't care about
+ };
+
+// 'clock_t' is 32 bits wide on x64 FreeBSD
+ typedef int __sanitizer_clock_t;
+ typedef int __sanitizer_clockid_t;
+
+#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)\
+ || defined(__mips__)
+ typedef unsigned __sanitizer___kernel_uid_t;
+ typedef unsigned __sanitizer___kernel_gid_t;
+#else
+ typedef unsigned short __sanitizer___kernel_uid_t;
+ typedef unsigned short __sanitizer___kernel_gid_t;
+#endif
+ typedef long long __sanitizer___kernel_off_t;
+
+#if defined(__powerpc__) || defined(__mips__)
+ typedef unsigned int __sanitizer___kernel_old_uid_t;
+ typedef unsigned int __sanitizer___kernel_old_gid_t;
+#else
+ typedef unsigned short __sanitizer___kernel_old_uid_t;
+ typedef unsigned short __sanitizer___kernel_old_gid_t;
+#endif
+
+ typedef long long __sanitizer___kernel_loff_t;
+ typedef struct {
+ unsigned long fds_bits[1024 / (8 * sizeof(long))];
+ } __sanitizer___kernel_fd_set;
+
+ // This thing depends on the platform. We are only interested in the upper
+ // limit. Verified with a compiler assert in .cc.
+ const int pthread_attr_t_max_sz = 128;
+ union __sanitizer_pthread_attr_t {
+ char size[pthread_attr_t_max_sz]; // NOLINT
+ void *align;
+ };
+
+ const unsigned old_sigset_t_sz = sizeof(unsigned long);
+
+ struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+ };
+
+ typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+
+ struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+ };
+
+ using __sanitizer_sighandler_ptr = void (*)(int sig);
+ using __sanitizer_sigactionhandler_ptr =
+ void (*)(int sig, __sanitizer_siginfo *siginfo, void *uctx);
+
+ struct __sanitizer_sigaction {
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+ };
+
+ struct __sanitizer_sem_t {
+ u32 data[4];
+ };
+
+ extern const uptr sig_ign;
+ extern const uptr sig_dfl;
+ extern const uptr sig_err;
+ extern const uptr sa_siginfo;
+
+ extern int af_inet;
+ extern int af_inet6;
+ uptr __sanitizer_in_addr_sz(int af);
+
+ struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+ };
+
+ extern unsigned struct_ElfW_Phdr_sz;
+
+ struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+ unsigned ai_addrlen;
+ char *ai_canonname;
+ void *ai_addr;
+ struct __sanitizer_addrinfo *ai_next;
+ };
+
+ struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+ };
+
+ struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+ };
+
+ typedef unsigned __sanitizer_nfds_t;
+
+ struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ uptr gl_matchc;
+ uptr gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ int (*gl_errfunc)(const char*, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char*);
+ int (*gl_lstat)(const char*, void* /* struct stat* */);
+ int (*gl_stat)(const char*, void* /* struct stat* */);
+ };
+
+ extern int glob_nomatch;
+ extern int glob_altdirfunc;
+
+ extern unsigned path_max;
+
+ struct __sanitizer_wordexp_t {
+ uptr we_wordc;
+ char **we_wordv;
+ uptr we_offs;
+ char *we_strings;
+ uptr we_nbytes;
+ };
+
+ typedef void __sanitizer_FILE;
+
+ extern unsigned struct_shminfo_sz;
+ extern unsigned struct_shm_info_sz;
+ extern int shmctl_ipc_stat;
+ extern int shmctl_ipc_info;
+ extern int shmctl_shm_info;
+ extern int shmctl_shm_stat;
+
+ extern unsigned struct_utmpx_sz;
+
+ extern int map_fixed;
+
+ // ioctl arguments
+ struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+ };
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__)
+#define IOC_SIZEBITS 13
+#define IOC_DIRBITS 3
+#define IOC_NONE 1U
+#define IOC_WRITE 4U
+#define IOC_READ 2U
+#else
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#endif
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#if defined(IOC_DIRMASK)
+#undef IOC_DIRMASK
+#endif
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+
+ extern unsigned struct_ifreq_sz;
+ extern unsigned struct_termios_sz;
+ extern unsigned struct_winsize_sz;
+
+ extern unsigned struct_copr_buffer_sz;
+ extern unsigned struct_copr_debug_buf_sz;
+ extern unsigned struct_copr_msg_sz;
+ extern unsigned struct_midi_info_sz;
+ extern unsigned struct_mtget_sz;
+ extern unsigned struct_mtop_sz;
+ extern unsigned struct_rtentry_sz;
+ extern unsigned struct_sbi_instrument_sz;
+ extern unsigned struct_seq_event_rec_sz;
+ extern unsigned struct_synth_info_sz;
+ extern unsigned struct_vt_mode_sz;
+
+ extern const unsigned long __sanitizer_bufsiz;
+ extern unsigned struct_audio_buf_info_sz;
+ extern unsigned struct_ppp_stats_sz;
+ extern unsigned struct_sioc_sg_req_sz;
+ extern unsigned struct_sioc_vif_req_sz;
+
+ // ioctl request identifiers
+
+ // A special value to mark ioctls that are not present on the target platform,
+ // when it can not be determined without including any system headers.
+ extern const unsigned IOCTL_NOT_PRESENT;
+
+ extern unsigned IOCTL_FIOASYNC;
+ extern unsigned IOCTL_FIOCLEX;
+ extern unsigned IOCTL_FIOGETOWN;
+ extern unsigned IOCTL_FIONBIO;
+ extern unsigned IOCTL_FIONCLEX;
+ extern unsigned IOCTL_FIOSETOWN;
+ extern unsigned IOCTL_SIOCADDMULTI;
+ extern unsigned IOCTL_SIOCATMARK;
+ extern unsigned IOCTL_SIOCDELMULTI;
+ extern unsigned IOCTL_SIOCGIFADDR;
+ extern unsigned IOCTL_SIOCGIFBRDADDR;
+ extern unsigned IOCTL_SIOCGIFCONF;
+ extern unsigned IOCTL_SIOCGIFDSTADDR;
+ extern unsigned IOCTL_SIOCGIFFLAGS;
+ extern unsigned IOCTL_SIOCGIFMETRIC;
+ extern unsigned IOCTL_SIOCGIFMTU;
+ extern unsigned IOCTL_SIOCGIFNETMASK;
+ extern unsigned IOCTL_SIOCGPGRP;
+ extern unsigned IOCTL_SIOCSIFADDR;
+ extern unsigned IOCTL_SIOCSIFBRDADDR;
+ extern unsigned IOCTL_SIOCSIFDSTADDR;
+ extern unsigned IOCTL_SIOCSIFFLAGS;
+ extern unsigned IOCTL_SIOCSIFMETRIC;
+ extern unsigned IOCTL_SIOCSIFMTU;
+ extern unsigned IOCTL_SIOCSIFNETMASK;
+ extern unsigned IOCTL_SIOCSPGRP;
+ extern unsigned IOCTL_TIOCCONS;
+ extern unsigned IOCTL_TIOCEXCL;
+ extern unsigned IOCTL_TIOCGETD;
+ extern unsigned IOCTL_TIOCGPGRP;
+ extern unsigned IOCTL_TIOCGWINSZ;
+ extern unsigned IOCTL_TIOCMBIC;
+ extern unsigned IOCTL_TIOCMBIS;
+ extern unsigned IOCTL_TIOCMGET;
+ extern unsigned IOCTL_TIOCMSET;
+ extern unsigned IOCTL_TIOCNOTTY;
+ extern unsigned IOCTL_TIOCNXCL;
+ extern unsigned IOCTL_TIOCOUTQ;
+ extern unsigned IOCTL_TIOCPKT;
+ extern unsigned IOCTL_TIOCSCTTY;
+ extern unsigned IOCTL_TIOCSETD;
+ extern unsigned IOCTL_TIOCSPGRP;
+ extern unsigned IOCTL_TIOCSTI;
+ extern unsigned IOCTL_TIOCSWINSZ;
+ extern unsigned IOCTL_SIOCGETSGCNT;
+ extern unsigned IOCTL_SIOCGETVIFCNT;
+ extern unsigned IOCTL_MTIOCGET;
+ extern unsigned IOCTL_MTIOCTOP;
+ extern unsigned IOCTL_SIOCADDRT;
+ extern unsigned IOCTL_SIOCDELRT;
+ extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
+ extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
+ extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
+ extern unsigned IOCTL_SNDCTL_DSP_POST;
+ extern unsigned IOCTL_SNDCTL_DSP_RESET;
+ extern unsigned IOCTL_SNDCTL_DSP_SETFMT;
+ extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;
+ extern unsigned IOCTL_SNDCTL_DSP_SPEED;
+ extern unsigned IOCTL_SNDCTL_DSP_STEREO;
+ extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;
+ extern unsigned IOCTL_SNDCTL_DSP_SYNC;
+ extern unsigned IOCTL_SNDCTL_FM_4OP_ENABLE;
+ extern unsigned IOCTL_SNDCTL_FM_LOAD_INSTR;
+ extern unsigned IOCTL_SNDCTL_MIDI_INFO;
+ extern unsigned IOCTL_SNDCTL_MIDI_PRETIME;
+ extern unsigned IOCTL_SNDCTL_SEQ_CTRLRATE;
+ extern unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT;
+ extern unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT;
+ extern unsigned IOCTL_SNDCTL_SEQ_NRMIDIS;
+ extern unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS;
+ extern unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND;
+ extern unsigned IOCTL_SNDCTL_SEQ_PANIC;
+ extern unsigned IOCTL_SNDCTL_SEQ_PERCMODE;
+ extern unsigned IOCTL_SNDCTL_SEQ_RESET;
+ extern unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES;
+ extern unsigned IOCTL_SNDCTL_SEQ_SYNC;
+ extern unsigned IOCTL_SNDCTL_SEQ_TESTMIDI;
+ extern unsigned IOCTL_SNDCTL_SEQ_THRESHOLD;
+ extern unsigned IOCTL_SNDCTL_SYNTH_INFO;
+ extern unsigned IOCTL_SNDCTL_SYNTH_MEMAVL;
+ extern unsigned IOCTL_SNDCTL_TMR_CONTINUE;
+ extern unsigned IOCTL_SNDCTL_TMR_METRONOME;
+ extern unsigned IOCTL_SNDCTL_TMR_SELECT;
+ extern unsigned IOCTL_SNDCTL_TMR_SOURCE;
+ extern unsigned IOCTL_SNDCTL_TMR_START;
+ extern unsigned IOCTL_SNDCTL_TMR_STOP;
+ extern unsigned IOCTL_SNDCTL_TMR_TEMPO;
+ extern unsigned IOCTL_SNDCTL_TMR_TIMEBASE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_ALTPCM;
+ extern unsigned IOCTL_SOUND_MIXER_READ_BASS;
+ extern unsigned IOCTL_SOUND_MIXER_READ_CAPS;
+ extern unsigned IOCTL_SOUND_MIXER_READ_CD;
+ extern unsigned IOCTL_SOUND_MIXER_READ_DEVMASK;
+ extern unsigned IOCTL_SOUND_MIXER_READ_ENHANCE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_IGAIN;
+ extern unsigned IOCTL_SOUND_MIXER_READ_IMIX;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LINE1;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LINE2;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LINE3;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LINE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LOUD;
+ extern unsigned IOCTL_SOUND_MIXER_READ_MIC;
+ extern unsigned IOCTL_SOUND_MIXER_READ_MUTE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_OGAIN;
+ extern unsigned IOCTL_SOUND_MIXER_READ_PCM;
+ extern unsigned IOCTL_SOUND_MIXER_READ_RECLEV;
+ extern unsigned IOCTL_SOUND_MIXER_READ_RECMASK;
+ extern unsigned IOCTL_SOUND_MIXER_READ_RECSRC;
+ extern unsigned IOCTL_SOUND_MIXER_READ_SPEAKER;
+ extern unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS;
+ extern unsigned IOCTL_SOUND_MIXER_READ_SYNTH;
+ extern unsigned IOCTL_SOUND_MIXER_READ_TREBLE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_VOLUME;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_BASS;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_CD;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_IMIX;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE1;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE2;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE3;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LOUD;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_MIC;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_MUTE;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_PCM;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME;
+ extern unsigned IOCTL_SOUND_PCM_READ_BITS;
+ extern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;
+ extern unsigned IOCTL_SOUND_PCM_READ_FILTER;
+ extern unsigned IOCTL_SOUND_PCM_READ_RATE;
+ extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;
+ extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
+ extern unsigned IOCTL_VT_ACTIVATE;
+ extern unsigned IOCTL_VT_GETMODE;
+ extern unsigned IOCTL_VT_OPENQRY;
+ extern unsigned IOCTL_VT_RELDISP;
+ extern unsigned IOCTL_VT_SETMODE;
+ extern unsigned IOCTL_VT_WAITACTIVE;
+ extern unsigned IOCTL_GIO_SCRNMAP;
+ extern unsigned IOCTL_KDDISABIO;
+ extern unsigned IOCTL_KDENABIO;
+ extern unsigned IOCTL_KDGETLED;
+ extern unsigned IOCTL_KDGETMODE;
+ extern unsigned IOCTL_KDGKBMODE;
+ extern unsigned IOCTL_KDGKBTYPE;
+ extern unsigned IOCTL_KDMKTONE;
+ extern unsigned IOCTL_KDSETLED;
+ extern unsigned IOCTL_KDSETMODE;
+ extern unsigned IOCTL_KDSKBMODE;
+
+ extern const int si_SEGV_MAPERR;
+ extern const int si_SEGV_ACCERR;
+
+ struct __sanitizer_cap_rights {
+ u64 cr_rights[2];
+ };
+
+ typedef struct __sanitizer_cap_rights __sanitizer_cap_rights_t;
+ extern unsigned struct_cap_rights_sz;
+
+ extern unsigned struct_fstab_sz;
+ extern unsigned struct_StringList_sz;
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \
+ sizeof(((CLASS *) NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \
+ sizeof(((struct CLASS *) NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#define SIGACTION_SYMNAME sigaction
+
+#endif
+
+#endif // SANITIZER_FREEBSD
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_linux.cc (revision 351984)
@@ -0,0 +1,108 @@
+//===-- sanitizer_platform_limits_linux.cc --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of linux kernel data structures.
+//===----------------------------------------------------------------------===//
+
+// This is a separate compilation unit for linux headers that conflict with
+// userspace headers.
+// Most "normal" includes go in sanitizer_platform_limits_posix.cc
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
+
+// For offsetof -> __builtin_offsetof definition.
+#include <stddef.h>
+
+// With old kernels (and even new kernels on powerpc) asm/stat.h uses types that
+// are not defined anywhere in userspace headers. Fake them. This seems to work
+// fine with newer headers, too.
+#include <linux/posix_types.h>
+#if defined(__x86_64__) || defined(__mips__)
+#include <sys/stat.h>
+#else
+#define ino_t __kernel_ino_t
+#define mode_t __kernel_mode_t
+#define nlink_t __kernel_nlink_t
+#define uid_t __kernel_uid_t
+#define gid_t __kernel_gid_t
+#define off_t __kernel_off_t
+#define time_t __kernel_time_t
+// This header seems to contain the definitions of _kernel_ stat* structs.
+#include <asm/stat.h>
+#undef ino_t
+#undef mode_t
+#undef nlink_t
+#undef uid_t
+#undef gid_t
+#undef off_t
+#endif
+
+#include <linux/aio_abi.h>
+
+#if !SANITIZER_ANDROID
+#include <sys/statfs.h>
+#include <linux/perf_event.h>
+#endif
+
+using namespace __sanitizer;
+
+namespace __sanitizer {
+#if !SANITIZER_ANDROID
+ unsigned struct_statfs64_sz = sizeof(struct statfs64);
+#endif
+} // namespace __sanitizer
+
+#if !defined(__powerpc64__) && !defined(__x86_64__) && !defined(__aarch64__)\
+ && !defined(__mips__) && !defined(__s390__)\
+ && !defined(__sparc__)
+COMPILER_CHECK(struct___old_kernel_stat_sz == sizeof(struct __old_kernel_stat));
+#endif
+
+COMPILER_CHECK(struct_kernel_stat_sz == sizeof(struct stat));
+
+#if defined(__i386__)
+COMPILER_CHECK(struct_kernel_stat64_sz == sizeof(struct stat64));
+#endif
+
+CHECK_TYPE_SIZE(io_event);
+CHECK_SIZE_AND_OFFSET(io_event, data);
+CHECK_SIZE_AND_OFFSET(io_event, obj);
+CHECK_SIZE_AND_OFFSET(io_event, res);
+CHECK_SIZE_AND_OFFSET(io_event, res2);
+
+#if !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(struct __sanitizer_perf_event_attr) <=
+ sizeof(struct perf_event_attr));
+CHECK_SIZE_AND_OFFSET(perf_event_attr, type);
+CHECK_SIZE_AND_OFFSET(perf_event_attr, size);
+#endif
+
+COMPILER_CHECK(iocb_cmd_pread == IOCB_CMD_PREAD);
+COMPILER_CHECK(iocb_cmd_pwrite == IOCB_CMD_PWRITE);
+#if !SANITIZER_ANDROID
+COMPILER_CHECK(iocb_cmd_preadv == IOCB_CMD_PREADV);
+COMPILER_CHECK(iocb_cmd_pwritev == IOCB_CMD_PWRITEV);
+#endif
+
+CHECK_TYPE_SIZE(iocb);
+CHECK_SIZE_AND_OFFSET(iocb, aio_data);
+// Skip aio_key, it's weird.
+CHECK_SIZE_AND_OFFSET(iocb, aio_lio_opcode);
+CHECK_SIZE_AND_OFFSET(iocb, aio_reqprio);
+CHECK_SIZE_AND_OFFSET(iocb, aio_fildes);
+CHECK_SIZE_AND_OFFSET(iocb, aio_buf);
+CHECK_SIZE_AND_OFFSET(iocb, aio_nbytes);
+CHECK_SIZE_AND_OFFSET(iocb, aio_offset);
+
+#endif // SANITIZER_LINUX
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_linux.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cc (revision 351984)
@@ -0,0 +1,278 @@
+//===-- sanitizer_platform_limits_openbsd.cc ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific NetBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_OPENBSD
+#include <arpa/inet.h>
+#include <dirent.h>
+#include <glob.h>
+#include <grp.h>
+#include <ifaddrs.h>
+#include <limits.h>
+#include <link_elf.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <net/ppp_defs.h>
+#include <net/route.h>
+#include <netdb.h>
+#include <netinet/in.h>
+#include <netinet/ip_mroute.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <soundcard.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <sys/filio.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/msg.h>
+#include <sys/mtio.h>
+#include <sys/ptrace.h>
+#include <sys/resource.h>
+#include <sys/shm.h>
+#include <sys/signal.h>
+#include <sys/sockio.h>
+#include <sys/stat.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <term.h>
+#include <time.h>
+#include <utime.h>
+#include <utmp.h>
+#include <wchar.h>
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_openbsd.h"
+
+namespace __sanitizer {
+unsigned struct_utsname_sz = sizeof(struct utsname);
+unsigned struct_stat_sz = sizeof(struct stat);
+unsigned struct_rusage_sz = sizeof(struct rusage);
+unsigned struct_tm_sz = sizeof(struct tm);
+unsigned struct_passwd_sz = sizeof(struct passwd);
+unsigned struct_group_sz = sizeof(struct group);
+unsigned siginfo_t_sz = sizeof(siginfo_t);
+unsigned struct_sigaction_sz = sizeof(struct sigaction);
+unsigned struct_itimerval_sz = sizeof(struct itimerval);
+unsigned pthread_t_sz = sizeof(pthread_t);
+unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+unsigned pid_t_sz = sizeof(pid_t);
+unsigned timeval_sz = sizeof(timeval);
+unsigned uid_t_sz = sizeof(uid_t);
+unsigned gid_t_sz = sizeof(gid_t);
+unsigned mbstate_t_sz = sizeof(mbstate_t);
+unsigned sigset_t_sz = sizeof(sigset_t);
+unsigned struct_timezone_sz = sizeof(struct timezone);
+unsigned struct_tms_sz = sizeof(struct tms);
+unsigned struct_sched_param_sz = sizeof(struct sched_param);
+unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+unsigned struct_rlimit_sz = sizeof(struct rlimit);
+unsigned struct_timespec_sz = sizeof(struct timespec);
+unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
+unsigned struct_statvfs_sz = sizeof(struct statvfs);
+
+const uptr sig_ign = (uptr)SIG_IGN;
+const uptr sig_dfl = (uptr)SIG_DFL;
+const uptr sig_err = (uptr)SIG_ERR;
+const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+int shmctl_ipc_stat = (int)IPC_STAT;
+
+unsigned struct_utmp_sz = sizeof(struct utmp);
+
+int map_fixed = MAP_FIXED;
+
+int af_inet = (int)AF_INET;
+int af_inet6 = (int)AF_INET6;
+
+uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+}
+
+unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
+
+int glob_nomatch = GLOB_NOMATCH;
+int glob_altdirfunc = GLOB_ALTDIRFUNC;
+
+unsigned path_max = PATH_MAX;
+
+const int si_SEGV_MAPERR = SEGV_MAPERR;
+const int si_SEGV_ACCERR = SEGV_ACCERR;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+CHECK_TYPE_SIZE(dl_phdr_info);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_next);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_fileno);
+CHECK_SIZE_AND_OFFSET(dirent, d_off);
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
+CHECK_SIZE_AND_OFFSET(tm, tm_zone);
+
+CHECK_TYPE_SIZE(ipc_perm);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_atimensec);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_dtimensec);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, __shm_ctimensec);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+
+CHECK_TYPE_SIZE(clock_t);
+
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#endif // SANITIZER_OPENBSD
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_openbsd.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h (revision 351984)
@@ -0,0 +1,381 @@
+//===-- sanitizer_platform_limits_openbsd.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific OpenBSD data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_OPENBSD_H
+#define SANITIZER_PLATFORM_LIMITS_OPENBSD_H
+
+#if SANITIZER_OPENBSD
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+#define _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, shift) \
+ ((link_map *)((handle) == nullptr ? nullptr : ((char *)(handle) + (shift))))
+
+#if defined(__x86_64__)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 312)
+#elif defined(__i386__)
+#define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) \
+ _GET_LINK_MAP_BY_DLOPEN_HANDLE(handle, 164)
+#endif
+
+#define RLIMIT_AS RLIMIT_DATA
+
+namespace __sanitizer {
+extern unsigned struct_utsname_sz;
+extern unsigned struct_stat_sz;
+extern unsigned struct_rusage_sz;
+extern unsigned siginfo_t_sz;
+extern unsigned struct_itimerval_sz;
+extern unsigned pthread_t_sz;
+extern unsigned pthread_mutex_t_sz;
+extern unsigned pthread_cond_t_sz;
+extern unsigned pid_t_sz;
+extern unsigned timeval_sz;
+extern unsigned uid_t_sz;
+extern unsigned gid_t_sz;
+extern unsigned mbstate_t_sz;
+extern unsigned struct_timezone_sz;
+extern unsigned struct_tms_sz;
+extern unsigned struct_itimerspec_sz;
+extern unsigned struct_sigevent_sz;
+extern unsigned struct_statfs_sz;
+extern unsigned struct_sockaddr_sz;
+
+extern unsigned struct_rlimit_sz;
+extern unsigned struct_utimbuf_sz;
+extern unsigned struct_timespec_sz;
+
+struct __sanitizer_iocb {
+ u64 aio_offset;
+ uptr aio_buf;
+ long aio_nbytes;
+ u32 aio_fildes;
+ u32 aio_lio_opcode;
+ long aio_reqprio;
+#if SANITIZER_WORDSIZE == 64
+ u8 aio_sigevent[32];
+#else
+ u8 aio_sigevent[20];
+#endif
+ u32 _state;
+ u32 _errno;
+ long _retval;
+};
+
+struct __sanitizer___sysctl_args {
+ int *name;
+ int nlen;
+ void *oldval;
+ uptr *oldlenp;
+ void *newval;
+ uptr newlen;
+};
+
+struct __sanitizer_sem_t {
+ uptr data[5];
+};
+
+struct __sanitizer_ipc_perm {
+ u32 cuid;
+ u32 cgid;
+ u32 uid;
+ u32 gid;
+ u32 mode;
+ unsigned short seq;
+ long key;
+};
+
+struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ int shm_segsz;
+ u32 shm_lpid;
+ u32 shm_cpid;
+ short shm_nattch;
+ u64 shm_atime;
+ long __shm_atimensec;
+ u64 shm_dtime;
+ long __shm_dtimensec;
+ u64 shm_ctime;
+ long __shm_ctimensec;
+ void *_shm_internal;
+};
+
+extern unsigned struct_msqid_ds_sz;
+extern unsigned struct_mq_attr_sz;
+extern unsigned struct_timex_sz;
+extern unsigned struct_statvfs_sz;
+
+struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+};
+
+struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ struct __sanitizer_sockaddr *ifa_addr; // (struct sockaddr *)
+ struct __sanitizer_sockaddr *ifa_netmask; // (struct sockaddr *)
+ struct __sanitizer_sockaddr *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+};
+
+typedef unsigned __sanitizer_pthread_key_t;
+
+typedef long long __sanitizer_time_t;
+typedef int __sanitizer_suseconds_t;
+
+struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+};
+
+struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+};
+
+struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+ __sanitizer_time_t pw_change;
+ char *pw_class;
+ char *pw_gecos;
+ char *pw_dir;
+ char *pw_shell;
+ __sanitizer_time_t pw_expire;
+};
+
+struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+};
+
+struct __sanitizer_ether_addr {
+ u8 octet[6];
+};
+
+struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
+ const char *tm_zone;
+};
+
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+};
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+struct __sanitizer_dirent {
+ u64 d_fileno;
+ u64 d_off;
+ u16 d_reclen;
+};
+
+typedef u64 __sanitizer_clock_t;
+typedef u32 __sanitizer_clockid_t;
+
+typedef u32 __sanitizer___kernel_uid_t;
+typedef u32 __sanitizer___kernel_gid_t;
+typedef u64 __sanitizer___kernel_off_t;
+typedef struct {
+ u32 fds_bits[8];
+} __sanitizer___kernel_fd_set;
+
+typedef struct {
+ unsigned int pta_magic;
+ int pta_flags;
+ void *pta_private;
+} __sanitizer_pthread_attr_t;
+
+typedef unsigned int __sanitizer_sigset_t;
+
+struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+};
+
+using __sanitizer_sighandler_ptr = void (*)(int sig);
+using __sanitizer_sigactionhandler_ptr = void (*)(int sig,
+ __sanitizer_siginfo *siginfo,
+ void *uctx);
+
+struct __sanitizer_sigaction {
+ union {
+ __sanitizer_sighandler_ptr handler;
+ __sanitizer_sigactionhandler_ptr sigaction;
+ };
+ __sanitizer_sigset_t sa_mask;
+ int sa_flags;
+};
+
+typedef __sanitizer_sigset_t __sanitizer_kernel_sigset_t;
+
+struct __sanitizer_kernel_sigaction_t {
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, void *info, void *ctx);
+ };
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ __sanitizer_kernel_sigset_t sa_mask;
+};
+
+extern const uptr sig_ign;
+extern const uptr sig_dfl;
+extern const uptr sig_err;
+extern const uptr sa_siginfo;
+
+extern int af_inet;
+extern int af_inet6;
+uptr __sanitizer_in_addr_sz(int af);
+
+struct __sanitizer_dl_phdr_info {
+#if SANITIZER_WORDSIZE == 64
+ u64 dlpi_addr;
+#else
+ u32 dlpi_addr;
+#endif
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+#if SANITIZER_WORDSIZE == 64
+ u32 dlpi_phnum;
+#else
+ u16 dlpi_phnum;
+#endif
+};
+
+extern unsigned struct_ElfW_Phdr_sz;
+
+struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+ unsigned ai_addrlen;
+ struct __sanitizer_sockaddr *ai_addr;
+ char *ai_canonname;
+ struct __sanitizer_addrinfo *ai_next;
+};
+
+struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+};
+
+struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+typedef unsigned __sanitizer_nfds_t;
+
+struct __sanitizer_glob_t {
+ int gl_pathc;
+ int gl_matchc;
+ int gl_offs;
+ int gl_flags;
+ char **gl_pathv;
+ void **gl_statv;
+ int (*gl_errfunc)(const char *, int);
+ void (*gl_closedir)(void *dirp);
+ struct dirent *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char *);
+ int (*gl_lstat)(const char *, void * /* struct stat* */);
+ int (*gl_stat)(const char *, void * /* struct stat* */);
+};
+
+extern int glob_nomatch;
+extern int glob_altdirfunc;
+
+extern unsigned path_max;
+
+typedef char __sanitizer_FILE;
+#define SANITIZER_HAS_STRUCT_FILE 0
+
+extern int shmctl_ipc_stat;
+
+// This simplifies generic code
+#define struct_shminfo_sz -1
+#define struct_shm_info_sz -1
+#define shmctl_shm_stat -1
+#define shmctl_ipc_info -1
+#define shmctl_shm_info -1
+
+extern unsigned struct_utmp_sz;
+extern unsigned struct_utmpx_sz;
+
+extern int map_fixed;
+
+// ioctl arguments
+struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+};
+
+extern const int si_SEGV_MAPERR;
+extern const int si_SEGV_ACCERR;
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *)NULL)->MEMBER) == \
+ sizeof(((struct CLASS *)NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#define SIGACTION_SYMNAME __sigaction14
+
+#endif // SANITIZER_OPENBSD
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_openbsd.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_posix.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_posix.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_posix.cc (revision 351984)
@@ -0,0 +1,1271 @@
+//===-- sanitizer_platform_limits_posix.cc --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific POSIX data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+// Tests in this file assume that off_t-dependent data structures match the
+// libc ABI. For example, struct dirent here is what readdir() function (as
+// exported from libc) returns, and not the user-facing "dirent", which
+// depends on _FILE_OFFSET_BITS setting.
+// To get this "true" dirent definition, we undefine _FILE_OFFSET_BITS below.
+#ifdef _FILE_OFFSET_BITS
+#undef _FILE_OFFSET_BITS
+#endif
+#include <arpa/inet.h>
+#include <dirent.h>
+#include <grp.h>
+#include <limits.h>
+#include <net/if.h>
+#include <netdb.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <signal.h>
+#include <stddef.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/socket.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <termios.h>
+#include <time.h>
+#include <wchar.h>
+#include <regex.h>
+#if !SANITIZER_MAC
+#include <utmp.h>
+#endif
+
+#if !SANITIZER_IOS
+#include <net/route.h>
+#endif
+
+#if !SANITIZER_ANDROID
+#include <fstab.h>
+#include <sys/mount.h>
+#include <sys/timeb.h>
+#include <utmpx.h>
+#endif
+
+#if SANITIZER_LINUX
+#include <malloc.h>
+#include <mntent.h>
+#include <netinet/ether.h>
+#include <sys/sysinfo.h>
+#include <sys/vt.h>
+#include <linux/cdrom.h>
+#include <linux/fd.h>
+#include <linux/fs.h>
+#include <linux/hdreg.h>
+#include <linux/input.h>
+#include <linux/ioctl.h>
+#include <linux/soundcard.h>
+#include <linux/sysctl.h>
+#include <linux/utsname.h>
+#include <linux/posix_types.h>
+#include <net/if_arp.h>
+#endif
+
+#if SANITIZER_IOS
+#undef IOC_DIRMASK
+#endif
+
+#if SANITIZER_LINUX
+# include <utime.h>
+# include <sys/ptrace.h>
+# if defined(__mips64) || defined(__aarch64__) || defined(__arm__)
+# include <asm/ptrace.h>
+# ifdef __arm__
+typedef struct user_fpregs elf_fpregset_t;
+# define ARM_VFPREGS_SIZE_ASAN (32 * 8 /*fpregs*/ + 4 /*fpscr*/)
+# if !defined(ARM_VFPREGS_SIZE)
+# define ARM_VFPREGS_SIZE ARM_VFPREGS_SIZE_ASAN
+# endif
+# endif
+# endif
+# include <semaphore.h>
+#endif
+
+#if !SANITIZER_ANDROID
+#include <ifaddrs.h>
+#include <sys/ucontext.h>
+#include <wordexp.h>
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+#include <glob.h>
+#include <obstack.h>
+#include <mqueue.h>
+#include <net/if_ppp.h>
+#include <netax25/ax25.h>
+#include <netipx/ipx.h>
+#include <netrom/netrom.h>
+#if HAVE_RPC_XDR_H
+# include <rpc/xdr.h>
+#endif
+#include <scsi/scsi.h>
+#include <sys/mtio.h>
+#include <sys/kd.h>
+#include <sys/shm.h>
+#include <sys/statvfs.h>
+#include <sys/timex.h>
+#if defined(__mips64)
+# include <sys/procfs.h>
+#endif
+#include <sys/user.h>
+#include <linux/cyclades.h>
+#include <linux/if_eql.h>
+#include <linux/if_plip.h>
+#include <linux/lp.h>
+#include <linux/mroute.h>
+#include <linux/mroute6.h>
+#include <linux/scc.h>
+#include <linux/serial.h>
+#include <sys/msg.h>
+#include <sys/ipc.h>
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if SANITIZER_ANDROID
+#include <linux/kd.h>
+#include <linux/mtio.h>
+#include <linux/ppp_defs.h>
+#include <linux/if_ppp.h>
+#endif
+
+#if SANITIZER_LINUX
+#include <link.h>
+#include <sys/vfs.h>
+#include <sys/epoll.h>
+#include <linux/capability.h>
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_MAC
+#include <net/ethernet.h>
+#include <sys/filio.h>
+#include <sys/sockio.h>
+#endif
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_posix.h"
+
+namespace __sanitizer {
+ unsigned struct_utsname_sz = sizeof(struct utsname);
+ unsigned struct_stat_sz = sizeof(struct stat);
+#if !SANITIZER_IOS
+ unsigned struct_stat64_sz = sizeof(struct stat64);
+#endif // !SANITIZER_IOS
+ unsigned struct_rusage_sz = sizeof(struct rusage);
+ unsigned struct_tm_sz = sizeof(struct tm);
+ unsigned struct_passwd_sz = sizeof(struct passwd);
+ unsigned struct_group_sz = sizeof(struct group);
+ unsigned siginfo_t_sz = sizeof(siginfo_t);
+ unsigned struct_sigaction_sz = sizeof(struct sigaction);
+ unsigned struct_itimerval_sz = sizeof(struct itimerval);
+ unsigned pthread_t_sz = sizeof(pthread_t);
+ unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+ unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+ unsigned pid_t_sz = sizeof(pid_t);
+ unsigned timeval_sz = sizeof(timeval);
+ unsigned uid_t_sz = sizeof(uid_t);
+ unsigned gid_t_sz = sizeof(gid_t);
+ unsigned mbstate_t_sz = sizeof(mbstate_t);
+ unsigned sigset_t_sz = sizeof(sigset_t);
+ unsigned struct_timezone_sz = sizeof(struct timezone);
+ unsigned struct_tms_sz = sizeof(struct tms);
+ unsigned struct_sigevent_sz = sizeof(struct sigevent);
+ unsigned struct_sched_param_sz = sizeof(struct sched_param);
+ unsigned struct_regex_sz = sizeof(regex_t);
+ unsigned struct_regmatch_sz = sizeof(regmatch_t);
+
+#if SANITIZER_MAC && !SANITIZER_IOS
+ unsigned struct_statfs64_sz = sizeof(struct statfs64);
+#endif // SANITIZER_MAC && !SANITIZER_IOS
+
+#if !SANITIZER_ANDROID
+ unsigned struct_fstab_sz = sizeof(struct fstab);
+ unsigned struct_statfs_sz = sizeof(struct statfs);
+ unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+ unsigned ucontext_t_sz = sizeof(ucontext_t);
+#endif // !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX
+ unsigned struct_epoll_event_sz = sizeof(struct epoll_event);
+ unsigned struct_sysinfo_sz = sizeof(struct sysinfo);
+ unsigned __user_cap_header_struct_sz =
+ sizeof(struct __user_cap_header_struct);
+ unsigned __user_cap_data_struct_sz = sizeof(struct __user_cap_data_struct);
+ unsigned struct_new_utsname_sz = sizeof(struct new_utsname);
+ unsigned struct_old_utsname_sz = sizeof(struct old_utsname);
+ unsigned struct_oldold_utsname_sz = sizeof(struct oldold_utsname);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX
+ unsigned struct_rlimit_sz = sizeof(struct rlimit);
+ unsigned struct_timespec_sz = sizeof(struct timespec);
+ unsigned struct_utimbuf_sz = sizeof(struct utimbuf);
+ unsigned struct_itimerspec_sz = sizeof(struct itimerspec);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ // Use pre-computed size of struct ustat to avoid <sys/ustat.h> which
+ // has been removed from glibc 2.28.
+#if defined(__aarch64__) || defined(__s390x__) || defined (__mips64) \
+ || defined(__powerpc64__) || defined(__arch64__) || defined(__sparcv9) \
+ || defined(__x86_64__)
+#define SIZEOF_STRUCT_USTAT 32
+#elif defined(__arm__) || defined(__i386__) || defined(__mips__) \
+ || defined(__powerpc__) || defined(__s390__) || defined(__sparc__)
+#define SIZEOF_STRUCT_USTAT 20
+#else
+#error Unknown size of struct ustat
+#endif
+ unsigned struct_ustat_sz = SIZEOF_STRUCT_USTAT;
+ unsigned struct_rlimit64_sz = sizeof(struct rlimit64);
+ unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_timex_sz = sizeof(struct timex);
+ unsigned struct_msqid_ds_sz = sizeof(struct msqid_ds);
+ unsigned struct_mq_attr_sz = sizeof(struct mq_attr);
+ unsigned struct_statvfs_sz = sizeof(struct statvfs);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ const uptr sig_ign = (uptr)SIG_IGN;
+ const uptr sig_dfl = (uptr)SIG_DFL;
+ const uptr sig_err = (uptr)SIG_ERR;
+ const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+#if SANITIZER_LINUX
+ int e_tabsz = (int)E_TABSZ;
+#endif
+
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_shminfo_sz = sizeof(struct shminfo);
+ unsigned struct_shm_info_sz = sizeof(struct shm_info);
+ int shmctl_ipc_stat = (int)IPC_STAT;
+ int shmctl_ipc_info = (int)IPC_INFO;
+ int shmctl_shm_info = (int)SHM_INFO;
+ int shmctl_shm_stat = (int)SHM_STAT;
+#endif
+
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+ unsigned struct_utmp_sz = sizeof(struct utmp);
+#endif
+#if !SANITIZER_ANDROID
+ unsigned struct_utmpx_sz = sizeof(struct utmpx);
+#endif
+
+ int map_fixed = MAP_FIXED;
+
+ int af_inet = (int)AF_INET;
+ int af_inet6 = (int)AF_INET6;
+
+ uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+ }
+
+#if SANITIZER_LINUX
+unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
+#elif SANITIZER_FREEBSD
+unsigned struct_ElfW_Phdr_sz = sizeof(Elf_Phdr);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ int glob_nomatch = GLOB_NOMATCH;
+ int glob_altdirfunc = GLOB_ALTDIRFUNC;
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__))
+#if defined(__mips64) || defined(__powerpc64__) || defined(__arm__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct pt_regs);
+ unsigned struct_user_fpregs_struct_sz = sizeof(elf_fpregset_t);
+#elif defined(__aarch64__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct user_pt_regs);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpsimd_state);
+#elif defined(__s390__)
+ unsigned struct_user_regs_struct_sz = sizeof(struct _user_regs_struct);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct _user_fpregs_struct);
+#else
+ unsigned struct_user_regs_struct_sz = sizeof(struct user_regs_struct);
+ unsigned struct_user_fpregs_struct_sz = sizeof(struct user_fpregs_struct);
+#endif // __mips64 || __powerpc64__ || __aarch64__
+#if defined(__x86_64) || defined(__mips64) || defined(__powerpc64__) || \
+ defined(__aarch64__) || defined(__arm__) || defined(__s390__)
+ unsigned struct_user_fpxregs_struct_sz = 0;
+#else
+ unsigned struct_user_fpxregs_struct_sz = sizeof(struct user_fpxregs_struct);
+#endif // __x86_64 || __mips64 || __powerpc64__ || __aarch64__ || __arm__
+// || __s390__
+#ifdef __arm__
+ unsigned struct_user_vfpregs_struct_sz = ARM_VFPREGS_SIZE;
+#else
+ unsigned struct_user_vfpregs_struct_sz = 0;
+#endif
+
+ int ptrace_peektext = PTRACE_PEEKTEXT;
+ int ptrace_peekdata = PTRACE_PEEKDATA;
+ int ptrace_peekuser = PTRACE_PEEKUSER;
+#if (defined(PTRACE_GETREGS) && defined(PTRACE_SETREGS)) || \
+ (defined(PT_GETREGS) && defined(PT_SETREGS))
+ int ptrace_getregs = PTRACE_GETREGS;
+ int ptrace_setregs = PTRACE_SETREGS;
+#else
+ int ptrace_getregs = -1;
+ int ptrace_setregs = -1;
+#endif
+#if (defined(PTRACE_GETFPREGS) && defined(PTRACE_SETFPREGS)) || \
+ (defined(PT_GETFPREGS) && defined(PT_SETFPREGS))
+ int ptrace_getfpregs = PTRACE_GETFPREGS;
+ int ptrace_setfpregs = PTRACE_SETFPREGS;
+#else
+ int ptrace_getfpregs = -1;
+ int ptrace_setfpregs = -1;
+#endif
+#if (defined(PTRACE_GETFPXREGS) && defined(PTRACE_SETFPXREGS)) || \
+ (defined(PT_GETFPXREGS) && defined(PT_SETFPXREGS))
+ int ptrace_getfpxregs = PTRACE_GETFPXREGS;
+ int ptrace_setfpxregs = PTRACE_SETFPXREGS;
+#else
+ int ptrace_getfpxregs = -1;
+ int ptrace_setfpxregs = -1;
+#endif // PTRACE_GETFPXREGS/PTRACE_SETFPXREGS
+#if defined(PTRACE_GETVFPREGS) && defined(PTRACE_SETVFPREGS)
+ int ptrace_getvfpregs = PTRACE_GETVFPREGS;
+ int ptrace_setvfpregs = PTRACE_SETVFPREGS;
+#else
+ int ptrace_getvfpregs = -1;
+ int ptrace_setvfpregs = -1;
+#endif
+ int ptrace_geteventmsg = PTRACE_GETEVENTMSG;
+#if (defined(PTRACE_GETSIGINFO) && defined(PTRACE_SETSIGINFO)) || \
+ (defined(PT_GETSIGINFO) && defined(PT_SETSIGINFO))
+ int ptrace_getsiginfo = PTRACE_GETSIGINFO;
+ int ptrace_setsiginfo = PTRACE_SETSIGINFO;
+#else
+ int ptrace_getsiginfo = -1;
+ int ptrace_setsiginfo = -1;
+#endif // PTRACE_GETSIGINFO/PTRACE_SETSIGINFO
+#if defined(PTRACE_GETREGSET) && defined(PTRACE_SETREGSET)
+ int ptrace_getregset = PTRACE_GETREGSET;
+ int ptrace_setregset = PTRACE_SETREGSET;
+#else
+ int ptrace_getregset = -1;
+ int ptrace_setregset = -1;
+#endif // PTRACE_GETREGSET/PTRACE_SETREGSET
+#endif
+
+ unsigned path_max = PATH_MAX;
+
+ // ioctl arguments
+ unsigned struct_ifreq_sz = sizeof(struct ifreq);
+ unsigned struct_termios_sz = sizeof(struct termios);
+ unsigned struct_winsize_sz = sizeof(struct winsize);
+
+#if SANITIZER_LINUX
+ unsigned struct_arpreq_sz = sizeof(struct arpreq);
+ unsigned struct_cdrom_msf_sz = sizeof(struct cdrom_msf);
+ unsigned struct_cdrom_multisession_sz = sizeof(struct cdrom_multisession);
+ unsigned struct_cdrom_read_audio_sz = sizeof(struct cdrom_read_audio);
+ unsigned struct_cdrom_subchnl_sz = sizeof(struct cdrom_subchnl);
+ unsigned struct_cdrom_ti_sz = sizeof(struct cdrom_ti);
+ unsigned struct_cdrom_tocentry_sz = sizeof(struct cdrom_tocentry);
+ unsigned struct_cdrom_tochdr_sz = sizeof(struct cdrom_tochdr);
+ unsigned struct_cdrom_volctrl_sz = sizeof(struct cdrom_volctrl);
+ unsigned struct_ff_effect_sz = sizeof(struct ff_effect);
+ unsigned struct_floppy_drive_params_sz = sizeof(struct floppy_drive_params);
+ unsigned struct_floppy_drive_struct_sz = sizeof(struct floppy_drive_struct);
+ unsigned struct_floppy_fdc_state_sz = sizeof(struct floppy_fdc_state);
+ unsigned struct_floppy_max_errors_sz = sizeof(struct floppy_max_errors);
+ unsigned struct_floppy_raw_cmd_sz = sizeof(struct floppy_raw_cmd);
+ unsigned struct_floppy_struct_sz = sizeof(struct floppy_struct);
+ unsigned struct_floppy_write_errors_sz = sizeof(struct floppy_write_errors);
+ unsigned struct_format_descr_sz = sizeof(struct format_descr);
+ unsigned struct_hd_driveid_sz = sizeof(struct hd_driveid);
+ unsigned struct_hd_geometry_sz = sizeof(struct hd_geometry);
+ unsigned struct_input_absinfo_sz = sizeof(struct input_absinfo);
+ unsigned struct_input_id_sz = sizeof(struct input_id);
+ unsigned struct_mtpos_sz = sizeof(struct mtpos);
+ unsigned struct_rtentry_sz = sizeof(struct rtentry);
+ unsigned struct_termio_sz = sizeof(struct termio);
+ unsigned struct_vt_consize_sz = sizeof(struct vt_consize);
+ unsigned struct_vt_sizes_sz = sizeof(struct vt_sizes);
+ unsigned struct_vt_stat_sz = sizeof(struct vt_stat);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX
+#if SOUND_VERSION >= 0x040000
+ unsigned struct_copr_buffer_sz = 0;
+ unsigned struct_copr_debug_buf_sz = 0;
+ unsigned struct_copr_msg_sz = 0;
+#else
+ unsigned struct_copr_buffer_sz = sizeof(struct copr_buffer);
+ unsigned struct_copr_debug_buf_sz = sizeof(struct copr_debug_buf);
+ unsigned struct_copr_msg_sz = sizeof(struct copr_msg);
+#endif
+ unsigned struct_midi_info_sz = sizeof(struct midi_info);
+ unsigned struct_mtget_sz = sizeof(struct mtget);
+ unsigned struct_mtop_sz = sizeof(struct mtop);
+ unsigned struct_sbi_instrument_sz = sizeof(struct sbi_instrument);
+ unsigned struct_seq_event_rec_sz = sizeof(struct seq_event_rec);
+ unsigned struct_synth_info_sz = sizeof(struct synth_info);
+ unsigned struct_vt_mode_sz = sizeof(struct vt_mode);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_ax25_parms_struct_sz = sizeof(struct ax25_parms_struct);
+ unsigned struct_cyclades_monitor_sz = sizeof(struct cyclades_monitor);
+#if EV_VERSION > (0x010000)
+ unsigned struct_input_keymap_entry_sz = sizeof(struct input_keymap_entry);
+#else
+ unsigned struct_input_keymap_entry_sz = 0;
+#endif
+ unsigned struct_ipx_config_data_sz = sizeof(struct ipx_config_data);
+ unsigned struct_kbdiacrs_sz = sizeof(struct kbdiacrs);
+ unsigned struct_kbentry_sz = sizeof(struct kbentry);
+ unsigned struct_kbkeycode_sz = sizeof(struct kbkeycode);
+ unsigned struct_kbsentry_sz = sizeof(struct kbsentry);
+ unsigned struct_mtconfiginfo_sz = sizeof(struct mtconfiginfo);
+ unsigned struct_nr_parms_struct_sz = sizeof(struct nr_parms_struct);
+ unsigned struct_scc_modem_sz = sizeof(struct scc_modem);
+ unsigned struct_scc_stat_sz = sizeof(struct scc_stat);
+ unsigned struct_serial_multiport_struct_sz
+ = sizeof(struct serial_multiport_struct);
+ unsigned struct_serial_struct_sz = sizeof(struct serial_struct);
+ unsigned struct_sockaddr_ax25_sz = sizeof(struct sockaddr_ax25);
+ unsigned struct_unimapdesc_sz = sizeof(struct unimapdesc);
+ unsigned struct_unimapinit_sz = sizeof(struct unimapinit);
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned struct_audio_buf_info_sz = sizeof(struct audio_buf_info);
+ unsigned struct_ppp_stats_sz = sizeof(struct ppp_stats);
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+
+#if !SANITIZER_ANDROID && !SANITIZER_MAC
+ unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
+ unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+#endif
+
+ const unsigned long __sanitizer_bufsiz = BUFSIZ;
+
+ const unsigned IOCTL_NOT_PRESENT = 0;
+
+ unsigned IOCTL_FIOASYNC = FIOASYNC;
+ unsigned IOCTL_FIOCLEX = FIOCLEX;
+ unsigned IOCTL_FIOGETOWN = FIOGETOWN;
+ unsigned IOCTL_FIONBIO = FIONBIO;
+ unsigned IOCTL_FIONCLEX = FIONCLEX;
+ unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+ unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
+ unsigned IOCTL_SIOCATMARK = SIOCATMARK;
+ unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
+ unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;
+ unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;
+ unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;
+ unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;
+ unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;
+ unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;
+ unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;
+ unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;
+ unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;
+ unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;
+ unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;
+ unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;
+ unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;
+ unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;
+ unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
+ unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
+ unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+ unsigned IOCTL_TIOCCONS = TIOCCONS;
+ unsigned IOCTL_TIOCEXCL = TIOCEXCL;
+ unsigned IOCTL_TIOCGETD = TIOCGETD;
+ unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
+ unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
+ unsigned IOCTL_TIOCMBIC = TIOCMBIC;
+ unsigned IOCTL_TIOCMBIS = TIOCMBIS;
+ unsigned IOCTL_TIOCMGET = TIOCMGET;
+ unsigned IOCTL_TIOCMSET = TIOCMSET;
+ unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+ unsigned IOCTL_TIOCNXCL = TIOCNXCL;
+ unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
+ unsigned IOCTL_TIOCPKT = TIOCPKT;
+ unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
+ unsigned IOCTL_TIOCSETD = TIOCSETD;
+ unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
+ unsigned IOCTL_TIOCSTI = TIOCSTI;
+ unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned IOCTL_SIOCGETSGCNT = SIOCGETSGCNT;
+ unsigned IOCTL_SIOCGETVIFCNT = SIOCGETVIFCNT;
+#endif
+
+#if SANITIZER_LINUX
+ unsigned IOCTL_EVIOCGABS = EVIOCGABS(0);
+ unsigned IOCTL_EVIOCGBIT = EVIOCGBIT(0, 0);
+ unsigned IOCTL_EVIOCGEFFECTS = EVIOCGEFFECTS;
+ unsigned IOCTL_EVIOCGID = EVIOCGID;
+ unsigned IOCTL_EVIOCGKEY = EVIOCGKEY(0);
+ unsigned IOCTL_EVIOCGKEYCODE = EVIOCGKEYCODE;
+ unsigned IOCTL_EVIOCGLED = EVIOCGLED(0);
+ unsigned IOCTL_EVIOCGNAME = EVIOCGNAME(0);
+ unsigned IOCTL_EVIOCGPHYS = EVIOCGPHYS(0);
+ unsigned IOCTL_EVIOCGRAB = EVIOCGRAB;
+ unsigned IOCTL_EVIOCGREP = EVIOCGREP;
+ unsigned IOCTL_EVIOCGSND = EVIOCGSND(0);
+ unsigned IOCTL_EVIOCGSW = EVIOCGSW(0);
+ unsigned IOCTL_EVIOCGUNIQ = EVIOCGUNIQ(0);
+ unsigned IOCTL_EVIOCGVERSION = EVIOCGVERSION;
+ unsigned IOCTL_EVIOCRMFF = EVIOCRMFF;
+ unsigned IOCTL_EVIOCSABS = EVIOCSABS(0);
+ unsigned IOCTL_EVIOCSFF = EVIOCSFF;
+ unsigned IOCTL_EVIOCSKEYCODE = EVIOCSKEYCODE;
+ unsigned IOCTL_EVIOCSREP = EVIOCSREP;
+ unsigned IOCTL_BLKFLSBUF = BLKFLSBUF;
+ unsigned IOCTL_BLKGETSIZE = BLKGETSIZE;
+ unsigned IOCTL_BLKRAGET = BLKRAGET;
+ unsigned IOCTL_BLKRASET = BLKRASET;
+ unsigned IOCTL_BLKROGET = BLKROGET;
+ unsigned IOCTL_BLKROSET = BLKROSET;
+ unsigned IOCTL_BLKRRPART = BLKRRPART;
+ unsigned IOCTL_CDROMAUDIOBUFSIZ = CDROMAUDIOBUFSIZ;
+ unsigned IOCTL_CDROMEJECT = CDROMEJECT;
+ unsigned IOCTL_CDROMEJECT_SW = CDROMEJECT_SW;
+ unsigned IOCTL_CDROMMULTISESSION = CDROMMULTISESSION;
+ unsigned IOCTL_CDROMPAUSE = CDROMPAUSE;
+ unsigned IOCTL_CDROMPLAYMSF = CDROMPLAYMSF;
+ unsigned IOCTL_CDROMPLAYTRKIND = CDROMPLAYTRKIND;
+ unsigned IOCTL_CDROMREADAUDIO = CDROMREADAUDIO;
+ unsigned IOCTL_CDROMREADCOOKED = CDROMREADCOOKED;
+ unsigned IOCTL_CDROMREADMODE1 = CDROMREADMODE1;
+ unsigned IOCTL_CDROMREADMODE2 = CDROMREADMODE2;
+ unsigned IOCTL_CDROMREADRAW = CDROMREADRAW;
+ unsigned IOCTL_CDROMREADTOCENTRY = CDROMREADTOCENTRY;
+ unsigned IOCTL_CDROMREADTOCHDR = CDROMREADTOCHDR;
+ unsigned IOCTL_CDROMRESET = CDROMRESET;
+ unsigned IOCTL_CDROMRESUME = CDROMRESUME;
+ unsigned IOCTL_CDROMSEEK = CDROMSEEK;
+ unsigned IOCTL_CDROMSTART = CDROMSTART;
+ unsigned IOCTL_CDROMSTOP = CDROMSTOP;
+ unsigned IOCTL_CDROMSUBCHNL = CDROMSUBCHNL;
+ unsigned IOCTL_CDROMVOLCTRL = CDROMVOLCTRL;
+ unsigned IOCTL_CDROMVOLREAD = CDROMVOLREAD;
+ unsigned IOCTL_CDROM_GET_UPC = CDROM_GET_UPC;
+ unsigned IOCTL_FDCLRPRM = FDCLRPRM;
+ unsigned IOCTL_FDDEFPRM = FDDEFPRM;
+ unsigned IOCTL_FDFLUSH = FDFLUSH;
+ unsigned IOCTL_FDFMTBEG = FDFMTBEG;
+ unsigned IOCTL_FDFMTEND = FDFMTEND;
+ unsigned IOCTL_FDFMTTRK = FDFMTTRK;
+ unsigned IOCTL_FDGETDRVPRM = FDGETDRVPRM;
+ unsigned IOCTL_FDGETDRVSTAT = FDGETDRVSTAT;
+ unsigned IOCTL_FDGETDRVTYP = FDGETDRVTYP;
+ unsigned IOCTL_FDGETFDCSTAT = FDGETFDCSTAT;
+ unsigned IOCTL_FDGETMAXERRS = FDGETMAXERRS;
+ unsigned IOCTL_FDGETPRM = FDGETPRM;
+ unsigned IOCTL_FDMSGOFF = FDMSGOFF;
+ unsigned IOCTL_FDMSGON = FDMSGON;
+ unsigned IOCTL_FDPOLLDRVSTAT = FDPOLLDRVSTAT;
+ unsigned IOCTL_FDRAWCMD = FDRAWCMD;
+ unsigned IOCTL_FDRESET = FDRESET;
+ unsigned IOCTL_FDSETDRVPRM = FDSETDRVPRM;
+ unsigned IOCTL_FDSETEMSGTRESH = FDSETEMSGTRESH;
+ unsigned IOCTL_FDSETMAXERRS = FDSETMAXERRS;
+ unsigned IOCTL_FDSETPRM = FDSETPRM;
+ unsigned IOCTL_FDTWADDLE = FDTWADDLE;
+ unsigned IOCTL_FDWERRORCLR = FDWERRORCLR;
+ unsigned IOCTL_FDWERRORGET = FDWERRORGET;
+ unsigned IOCTL_HDIO_DRIVE_CMD = HDIO_DRIVE_CMD;
+ unsigned IOCTL_HDIO_GETGEO = HDIO_GETGEO;
+ unsigned IOCTL_HDIO_GET_32BIT = HDIO_GET_32BIT;
+ unsigned IOCTL_HDIO_GET_DMA = HDIO_GET_DMA;
+ unsigned IOCTL_HDIO_GET_IDENTITY = HDIO_GET_IDENTITY;
+ unsigned IOCTL_HDIO_GET_KEEPSETTINGS = HDIO_GET_KEEPSETTINGS;
+ unsigned IOCTL_HDIO_GET_MULTCOUNT = HDIO_GET_MULTCOUNT;
+ unsigned IOCTL_HDIO_GET_NOWERR = HDIO_GET_NOWERR;
+ unsigned IOCTL_HDIO_GET_UNMASKINTR = HDIO_GET_UNMASKINTR;
+ unsigned IOCTL_HDIO_SET_32BIT = HDIO_SET_32BIT;
+ unsigned IOCTL_HDIO_SET_DMA = HDIO_SET_DMA;
+ unsigned IOCTL_HDIO_SET_KEEPSETTINGS = HDIO_SET_KEEPSETTINGS;
+ unsigned IOCTL_HDIO_SET_MULTCOUNT = HDIO_SET_MULTCOUNT;
+ unsigned IOCTL_HDIO_SET_NOWERR = HDIO_SET_NOWERR;
+ unsigned IOCTL_HDIO_SET_UNMASKINTR = HDIO_SET_UNMASKINTR;
+ unsigned IOCTL_MTIOCPOS = MTIOCPOS;
+ unsigned IOCTL_PPPIOCGASYNCMAP = PPPIOCGASYNCMAP;
+ unsigned IOCTL_PPPIOCGDEBUG = PPPIOCGDEBUG;
+ unsigned IOCTL_PPPIOCGFLAGS = PPPIOCGFLAGS;
+ unsigned IOCTL_PPPIOCGUNIT = PPPIOCGUNIT;
+ unsigned IOCTL_PPPIOCGXASYNCMAP = PPPIOCGXASYNCMAP;
+ unsigned IOCTL_PPPIOCSASYNCMAP = PPPIOCSASYNCMAP;
+ unsigned IOCTL_PPPIOCSDEBUG = PPPIOCSDEBUG;
+ unsigned IOCTL_PPPIOCSFLAGS = PPPIOCSFLAGS;
+ unsigned IOCTL_PPPIOCSMAXCID = PPPIOCSMAXCID;
+ unsigned IOCTL_PPPIOCSMRU = PPPIOCSMRU;
+ unsigned IOCTL_PPPIOCSXASYNCMAP = PPPIOCSXASYNCMAP;
+ unsigned IOCTL_SIOCADDRT = SIOCADDRT;
+ unsigned IOCTL_SIOCDARP = SIOCDARP;
+ unsigned IOCTL_SIOCDELRT = SIOCDELRT;
+ unsigned IOCTL_SIOCDRARP = SIOCDRARP;
+ unsigned IOCTL_SIOCGARP = SIOCGARP;
+ unsigned IOCTL_SIOCGIFENCAP = SIOCGIFENCAP;
+ unsigned IOCTL_SIOCGIFHWADDR = SIOCGIFHWADDR;
+ unsigned IOCTL_SIOCGIFMAP = SIOCGIFMAP;
+ unsigned IOCTL_SIOCGIFMEM = SIOCGIFMEM;
+ unsigned IOCTL_SIOCGIFNAME = SIOCGIFNAME;
+ unsigned IOCTL_SIOCGIFSLAVE = SIOCGIFSLAVE;
+ unsigned IOCTL_SIOCGRARP = SIOCGRARP;
+ unsigned IOCTL_SIOCGSTAMP = SIOCGSTAMP;
+ unsigned IOCTL_SIOCSARP = SIOCSARP;
+ unsigned IOCTL_SIOCSIFENCAP = SIOCSIFENCAP;
+ unsigned IOCTL_SIOCSIFHWADDR = SIOCSIFHWADDR;
+ unsigned IOCTL_SIOCSIFLINK = SIOCSIFLINK;
+ unsigned IOCTL_SIOCSIFMAP = SIOCSIFMAP;
+ unsigned IOCTL_SIOCSIFMEM = SIOCSIFMEM;
+ unsigned IOCTL_SIOCSIFSLAVE = SIOCSIFSLAVE;
+ unsigned IOCTL_SIOCSRARP = SIOCSRARP;
+# if SOUND_VERSION >= 0x040000
+ unsigned IOCTL_SNDCTL_COPR_HALT = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_LOAD = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RCODE = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RCVMSG = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RDATA = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RESET = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_RUN = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_SENDMSG = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_WCODE = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SNDCTL_COPR_WDATA = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_READ_BITS = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_READ_CHANNELS = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_READ_FILTER = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_READ_RATE = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_SOUND_PCM_WRITE_FILTER = IOCTL_NOT_PRESENT;
+# else // SOUND_VERSION
+ unsigned IOCTL_SNDCTL_COPR_HALT = SNDCTL_COPR_HALT;
+ unsigned IOCTL_SNDCTL_COPR_LOAD = SNDCTL_COPR_LOAD;
+ unsigned IOCTL_SNDCTL_COPR_RCODE = SNDCTL_COPR_RCODE;
+ unsigned IOCTL_SNDCTL_COPR_RCVMSG = SNDCTL_COPR_RCVMSG;
+ unsigned IOCTL_SNDCTL_COPR_RDATA = SNDCTL_COPR_RDATA;
+ unsigned IOCTL_SNDCTL_COPR_RESET = SNDCTL_COPR_RESET;
+ unsigned IOCTL_SNDCTL_COPR_RUN = SNDCTL_COPR_RUN;
+ unsigned IOCTL_SNDCTL_COPR_SENDMSG = SNDCTL_COPR_SENDMSG;
+ unsigned IOCTL_SNDCTL_COPR_WCODE = SNDCTL_COPR_WCODE;
+ unsigned IOCTL_SNDCTL_COPR_WDATA = SNDCTL_COPR_WDATA;
+ unsigned IOCTL_SOUND_PCM_READ_BITS = SOUND_PCM_READ_BITS;
+ unsigned IOCTL_SOUND_PCM_READ_CHANNELS = SOUND_PCM_READ_CHANNELS;
+ unsigned IOCTL_SOUND_PCM_READ_FILTER = SOUND_PCM_READ_FILTER;
+ unsigned IOCTL_SOUND_PCM_READ_RATE = SOUND_PCM_READ_RATE;
+ unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS = SOUND_PCM_WRITE_CHANNELS;
+ unsigned IOCTL_SOUND_PCM_WRITE_FILTER = SOUND_PCM_WRITE_FILTER;
+#endif // SOUND_VERSION
+ unsigned IOCTL_TCFLSH = TCFLSH;
+ unsigned IOCTL_TCGETA = TCGETA;
+ unsigned IOCTL_TCGETS = TCGETS;
+ unsigned IOCTL_TCSBRK = TCSBRK;
+ unsigned IOCTL_TCSBRKP = TCSBRKP;
+ unsigned IOCTL_TCSETA = TCSETA;
+ unsigned IOCTL_TCSETAF = TCSETAF;
+ unsigned IOCTL_TCSETAW = TCSETAW;
+ unsigned IOCTL_TCSETS = TCSETS;
+ unsigned IOCTL_TCSETSF = TCSETSF;
+ unsigned IOCTL_TCSETSW = TCSETSW;
+ unsigned IOCTL_TCXONC = TCXONC;
+ unsigned IOCTL_TIOCGLCKTRMIOS = TIOCGLCKTRMIOS;
+ unsigned IOCTL_TIOCGSOFTCAR = TIOCGSOFTCAR;
+ unsigned IOCTL_TIOCINQ = TIOCINQ;
+ unsigned IOCTL_TIOCLINUX = TIOCLINUX;
+ unsigned IOCTL_TIOCSERCONFIG = TIOCSERCONFIG;
+ unsigned IOCTL_TIOCSERGETLSR = TIOCSERGETLSR;
+ unsigned IOCTL_TIOCSERGWILD = TIOCSERGWILD;
+ unsigned IOCTL_TIOCSERSWILD = TIOCSERSWILD;
+ unsigned IOCTL_TIOCSLCKTRMIOS = TIOCSLCKTRMIOS;
+ unsigned IOCTL_TIOCSSOFTCAR = TIOCSSOFTCAR;
+ unsigned IOCTL_VT_DISALLOCATE = VT_DISALLOCATE;
+ unsigned IOCTL_VT_GETSTATE = VT_GETSTATE;
+ unsigned IOCTL_VT_RESIZE = VT_RESIZE;
+ unsigned IOCTL_VT_RESIZEX = VT_RESIZEX;
+ unsigned IOCTL_VT_SENDSIG = VT_SENDSIG;
+ unsigned IOCTL_MTIOCGET = MTIOCGET;
+ unsigned IOCTL_MTIOCTOP = MTIOCTOP;
+ unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE = SNDCTL_DSP_GETBLKSIZE;
+ unsigned IOCTL_SNDCTL_DSP_GETFMTS = SNDCTL_DSP_GETFMTS;
+ unsigned IOCTL_SNDCTL_DSP_NONBLOCK = SNDCTL_DSP_NONBLOCK;
+ unsigned IOCTL_SNDCTL_DSP_POST = SNDCTL_DSP_POST;
+ unsigned IOCTL_SNDCTL_DSP_RESET = SNDCTL_DSP_RESET;
+ unsigned IOCTL_SNDCTL_DSP_SETFMT = SNDCTL_DSP_SETFMT;
+ unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT = SNDCTL_DSP_SETFRAGMENT;
+ unsigned IOCTL_SNDCTL_DSP_SPEED = SNDCTL_DSP_SPEED;
+ unsigned IOCTL_SNDCTL_DSP_STEREO = SNDCTL_DSP_STEREO;
+ unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE = SNDCTL_DSP_SUBDIVIDE;
+ unsigned IOCTL_SNDCTL_DSP_SYNC = SNDCTL_DSP_SYNC;
+ unsigned IOCTL_SNDCTL_FM_4OP_ENABLE = SNDCTL_FM_4OP_ENABLE;
+ unsigned IOCTL_SNDCTL_FM_LOAD_INSTR = SNDCTL_FM_LOAD_INSTR;
+ unsigned IOCTL_SNDCTL_MIDI_INFO = SNDCTL_MIDI_INFO;
+ unsigned IOCTL_SNDCTL_MIDI_PRETIME = SNDCTL_MIDI_PRETIME;
+ unsigned IOCTL_SNDCTL_SEQ_CTRLRATE = SNDCTL_SEQ_CTRLRATE;
+ unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT = SNDCTL_SEQ_GETINCOUNT;
+ unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT = SNDCTL_SEQ_GETOUTCOUNT;
+ unsigned IOCTL_SNDCTL_SEQ_NRMIDIS = SNDCTL_SEQ_NRMIDIS;
+ unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS = SNDCTL_SEQ_NRSYNTHS;
+ unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND = SNDCTL_SEQ_OUTOFBAND;
+ unsigned IOCTL_SNDCTL_SEQ_PANIC = SNDCTL_SEQ_PANIC;
+ unsigned IOCTL_SNDCTL_SEQ_PERCMODE = SNDCTL_SEQ_PERCMODE;
+ unsigned IOCTL_SNDCTL_SEQ_RESET = SNDCTL_SEQ_RESET;
+ unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES = SNDCTL_SEQ_RESETSAMPLES;
+ unsigned IOCTL_SNDCTL_SEQ_SYNC = SNDCTL_SEQ_SYNC;
+ unsigned IOCTL_SNDCTL_SEQ_TESTMIDI = SNDCTL_SEQ_TESTMIDI;
+ unsigned IOCTL_SNDCTL_SEQ_THRESHOLD = SNDCTL_SEQ_THRESHOLD;
+ unsigned IOCTL_SNDCTL_SYNTH_INFO = SNDCTL_SYNTH_INFO;
+ unsigned IOCTL_SNDCTL_SYNTH_MEMAVL = SNDCTL_SYNTH_MEMAVL;
+ unsigned IOCTL_SNDCTL_TMR_CONTINUE = SNDCTL_TMR_CONTINUE;
+ unsigned IOCTL_SNDCTL_TMR_METRONOME = SNDCTL_TMR_METRONOME;
+ unsigned IOCTL_SNDCTL_TMR_SELECT = SNDCTL_TMR_SELECT;
+ unsigned IOCTL_SNDCTL_TMR_SOURCE = SNDCTL_TMR_SOURCE;
+ unsigned IOCTL_SNDCTL_TMR_START = SNDCTL_TMR_START;
+ unsigned IOCTL_SNDCTL_TMR_STOP = SNDCTL_TMR_STOP;
+ unsigned IOCTL_SNDCTL_TMR_TEMPO = SNDCTL_TMR_TEMPO;
+ unsigned IOCTL_SNDCTL_TMR_TIMEBASE = SNDCTL_TMR_TIMEBASE;
+ unsigned IOCTL_SOUND_MIXER_READ_ALTPCM = SOUND_MIXER_READ_ALTPCM;
+ unsigned IOCTL_SOUND_MIXER_READ_BASS = SOUND_MIXER_READ_BASS;
+ unsigned IOCTL_SOUND_MIXER_READ_CAPS = SOUND_MIXER_READ_CAPS;
+ unsigned IOCTL_SOUND_MIXER_READ_CD = SOUND_MIXER_READ_CD;
+ unsigned IOCTL_SOUND_MIXER_READ_DEVMASK = SOUND_MIXER_READ_DEVMASK;
+ unsigned IOCTL_SOUND_MIXER_READ_ENHANCE = SOUND_MIXER_READ_ENHANCE;
+ unsigned IOCTL_SOUND_MIXER_READ_IGAIN = SOUND_MIXER_READ_IGAIN;
+ unsigned IOCTL_SOUND_MIXER_READ_IMIX = SOUND_MIXER_READ_IMIX;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE = SOUND_MIXER_READ_LINE;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE1 = SOUND_MIXER_READ_LINE1;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE2 = SOUND_MIXER_READ_LINE2;
+ unsigned IOCTL_SOUND_MIXER_READ_LINE3 = SOUND_MIXER_READ_LINE3;
+ unsigned IOCTL_SOUND_MIXER_READ_LOUD = SOUND_MIXER_READ_LOUD;
+ unsigned IOCTL_SOUND_MIXER_READ_MIC = SOUND_MIXER_READ_MIC;
+ unsigned IOCTL_SOUND_MIXER_READ_MUTE = SOUND_MIXER_READ_MUTE;
+ unsigned IOCTL_SOUND_MIXER_READ_OGAIN = SOUND_MIXER_READ_OGAIN;
+ unsigned IOCTL_SOUND_MIXER_READ_PCM = SOUND_MIXER_READ_PCM;
+ unsigned IOCTL_SOUND_MIXER_READ_RECLEV = SOUND_MIXER_READ_RECLEV;
+ unsigned IOCTL_SOUND_MIXER_READ_RECMASK = SOUND_MIXER_READ_RECMASK;
+ unsigned IOCTL_SOUND_MIXER_READ_RECSRC = SOUND_MIXER_READ_RECSRC;
+ unsigned IOCTL_SOUND_MIXER_READ_SPEAKER = SOUND_MIXER_READ_SPEAKER;
+ unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS = SOUND_MIXER_READ_STEREODEVS;
+ unsigned IOCTL_SOUND_MIXER_READ_SYNTH = SOUND_MIXER_READ_SYNTH;
+ unsigned IOCTL_SOUND_MIXER_READ_TREBLE = SOUND_MIXER_READ_TREBLE;
+ unsigned IOCTL_SOUND_MIXER_READ_VOLUME = SOUND_MIXER_READ_VOLUME;
+ unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM = SOUND_MIXER_WRITE_ALTPCM;
+ unsigned IOCTL_SOUND_MIXER_WRITE_BASS = SOUND_MIXER_WRITE_BASS;
+ unsigned IOCTL_SOUND_MIXER_WRITE_CD = SOUND_MIXER_WRITE_CD;
+ unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE = SOUND_MIXER_WRITE_ENHANCE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN = SOUND_MIXER_WRITE_IGAIN;
+ unsigned IOCTL_SOUND_MIXER_WRITE_IMIX = SOUND_MIXER_WRITE_IMIX;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE = SOUND_MIXER_WRITE_LINE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE1 = SOUND_MIXER_WRITE_LINE1;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE2 = SOUND_MIXER_WRITE_LINE2;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LINE3 = SOUND_MIXER_WRITE_LINE3;
+ unsigned IOCTL_SOUND_MIXER_WRITE_LOUD = SOUND_MIXER_WRITE_LOUD;
+ unsigned IOCTL_SOUND_MIXER_WRITE_MIC = SOUND_MIXER_WRITE_MIC;
+ unsigned IOCTL_SOUND_MIXER_WRITE_MUTE = SOUND_MIXER_WRITE_MUTE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN = SOUND_MIXER_WRITE_OGAIN;
+ unsigned IOCTL_SOUND_MIXER_WRITE_PCM = SOUND_MIXER_WRITE_PCM;
+ unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV = SOUND_MIXER_WRITE_RECLEV;
+ unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC = SOUND_MIXER_WRITE_RECSRC;
+ unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER = SOUND_MIXER_WRITE_SPEAKER;
+ unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH = SOUND_MIXER_WRITE_SYNTH;
+ unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE = SOUND_MIXER_WRITE_TREBLE;
+ unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME = SOUND_MIXER_WRITE_VOLUME;
+ unsigned IOCTL_VT_ACTIVATE = VT_ACTIVATE;
+ unsigned IOCTL_VT_GETMODE = VT_GETMODE;
+ unsigned IOCTL_VT_OPENQRY = VT_OPENQRY;
+ unsigned IOCTL_VT_RELDISP = VT_RELDISP;
+ unsigned IOCTL_VT_SETMODE = VT_SETMODE;
+ unsigned IOCTL_VT_WAITACTIVE = VT_WAITACTIVE;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned IOCTL_CYGETDEFTHRESH = CYGETDEFTHRESH;
+ unsigned IOCTL_CYGETDEFTIMEOUT = CYGETDEFTIMEOUT;
+ unsigned IOCTL_CYGETMON = CYGETMON;
+ unsigned IOCTL_CYGETTHRESH = CYGETTHRESH;
+ unsigned IOCTL_CYGETTIMEOUT = CYGETTIMEOUT;
+ unsigned IOCTL_CYSETDEFTHRESH = CYSETDEFTHRESH;
+ unsigned IOCTL_CYSETDEFTIMEOUT = CYSETDEFTIMEOUT;
+ unsigned IOCTL_CYSETTHRESH = CYSETTHRESH;
+ unsigned IOCTL_CYSETTIMEOUT = CYSETTIMEOUT;
+ unsigned IOCTL_EQL_EMANCIPATE = EQL_EMANCIPATE;
+ unsigned IOCTL_EQL_ENSLAVE = EQL_ENSLAVE;
+ unsigned IOCTL_EQL_GETMASTRCFG = EQL_GETMASTRCFG;
+ unsigned IOCTL_EQL_GETSLAVECFG = EQL_GETSLAVECFG;
+ unsigned IOCTL_EQL_SETMASTRCFG = EQL_SETMASTRCFG;
+ unsigned IOCTL_EQL_SETSLAVECFG = EQL_SETSLAVECFG;
+#if EV_VERSION > (0x010000)
+ unsigned IOCTL_EVIOCGKEYCODE_V2 = EVIOCGKEYCODE_V2;
+ unsigned IOCTL_EVIOCGPROP = EVIOCGPROP(0);
+ unsigned IOCTL_EVIOCSKEYCODE_V2 = EVIOCSKEYCODE_V2;
+#else
+ unsigned IOCTL_EVIOCGKEYCODE_V2 = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_EVIOCGPROP = IOCTL_NOT_PRESENT;
+ unsigned IOCTL_EVIOCSKEYCODE_V2 = IOCTL_NOT_PRESENT;
+#endif
+ unsigned IOCTL_FS_IOC_GETFLAGS = FS_IOC_GETFLAGS;
+ unsigned IOCTL_FS_IOC_GETVERSION = FS_IOC_GETVERSION;
+ unsigned IOCTL_FS_IOC_SETFLAGS = FS_IOC_SETFLAGS;
+ unsigned IOCTL_FS_IOC_SETVERSION = FS_IOC_SETVERSION;
+ unsigned IOCTL_GIO_CMAP = GIO_CMAP;
+ unsigned IOCTL_GIO_FONT = GIO_FONT;
+ unsigned IOCTL_GIO_UNIMAP = GIO_UNIMAP;
+ unsigned IOCTL_GIO_UNISCRNMAP = GIO_UNISCRNMAP;
+ unsigned IOCTL_KDADDIO = KDADDIO;
+ unsigned IOCTL_KDDELIO = KDDELIO;
+ unsigned IOCTL_KDGETKEYCODE = KDGETKEYCODE;
+ unsigned IOCTL_KDGKBDIACR = KDGKBDIACR;
+ unsigned IOCTL_KDGKBENT = KDGKBENT;
+ unsigned IOCTL_KDGKBLED = KDGKBLED;
+ unsigned IOCTL_KDGKBMETA = KDGKBMETA;
+ unsigned IOCTL_KDGKBSENT = KDGKBSENT;
+ unsigned IOCTL_KDMAPDISP = KDMAPDISP;
+ unsigned IOCTL_KDSETKEYCODE = KDSETKEYCODE;
+ unsigned IOCTL_KDSIGACCEPT = KDSIGACCEPT;
+ unsigned IOCTL_KDSKBDIACR = KDSKBDIACR;
+ unsigned IOCTL_KDSKBENT = KDSKBENT;
+ unsigned IOCTL_KDSKBLED = KDSKBLED;
+ unsigned IOCTL_KDSKBMETA = KDSKBMETA;
+ unsigned IOCTL_KDSKBSENT = KDSKBSENT;
+ unsigned IOCTL_KDUNMAPDISP = KDUNMAPDISP;
+ unsigned IOCTL_LPABORT = LPABORT;
+ unsigned IOCTL_LPABORTOPEN = LPABORTOPEN;
+ unsigned IOCTL_LPCAREFUL = LPCAREFUL;
+ unsigned IOCTL_LPCHAR = LPCHAR;
+ unsigned IOCTL_LPGETIRQ = LPGETIRQ;
+ unsigned IOCTL_LPGETSTATUS = LPGETSTATUS;
+ unsigned IOCTL_LPRESET = LPRESET;
+ unsigned IOCTL_LPSETIRQ = LPSETIRQ;
+ unsigned IOCTL_LPTIME = LPTIME;
+ unsigned IOCTL_LPWAIT = LPWAIT;
+ unsigned IOCTL_MTIOCGETCONFIG = MTIOCGETCONFIG;
+ unsigned IOCTL_MTIOCSETCONFIG = MTIOCSETCONFIG;
+ unsigned IOCTL_PIO_CMAP = PIO_CMAP;
+ unsigned IOCTL_PIO_FONT = PIO_FONT;
+ unsigned IOCTL_PIO_UNIMAP = PIO_UNIMAP;
+ unsigned IOCTL_PIO_UNIMAPCLR = PIO_UNIMAPCLR;
+ unsigned IOCTL_PIO_UNISCRNMAP = PIO_UNISCRNMAP;
+ unsigned IOCTL_SCSI_IOCTL_GET_IDLUN = SCSI_IOCTL_GET_IDLUN;
+ unsigned IOCTL_SCSI_IOCTL_PROBE_HOST = SCSI_IOCTL_PROBE_HOST;
+ unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE = SCSI_IOCTL_TAGGED_DISABLE;
+ unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE = SCSI_IOCTL_TAGGED_ENABLE;
+ unsigned IOCTL_SIOCAIPXITFCRT = SIOCAIPXITFCRT;
+ unsigned IOCTL_SIOCAIPXPRISLT = SIOCAIPXPRISLT;
+ unsigned IOCTL_SIOCAX25ADDUID = SIOCAX25ADDUID;
+ unsigned IOCTL_SIOCAX25DELUID = SIOCAX25DELUID;
+ unsigned IOCTL_SIOCAX25GETPARMS = SIOCAX25GETPARMS;
+ unsigned IOCTL_SIOCAX25GETUID = SIOCAX25GETUID;
+ unsigned IOCTL_SIOCAX25NOUID = SIOCAX25NOUID;
+ unsigned IOCTL_SIOCAX25SETPARMS = SIOCAX25SETPARMS;
+ unsigned IOCTL_SIOCDEVPLIP = SIOCDEVPLIP;
+ unsigned IOCTL_SIOCIPXCFGDATA = SIOCIPXCFGDATA;
+ unsigned IOCTL_SIOCNRDECOBS = SIOCNRDECOBS;
+ unsigned IOCTL_SIOCNRGETPARMS = SIOCNRGETPARMS;
+ unsigned IOCTL_SIOCNRRTCTL = SIOCNRRTCTL;
+ unsigned IOCTL_SIOCNRSETPARMS = SIOCNRSETPARMS;
+ unsigned IOCTL_TIOCGSERIAL = TIOCGSERIAL;
+ unsigned IOCTL_TIOCSERGETMULTI = TIOCSERGETMULTI;
+ unsigned IOCTL_TIOCSERSETMULTI = TIOCSERSETMULTI;
+ unsigned IOCTL_TIOCSSERIAL = TIOCSSERIAL;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ unsigned IOCTL_GIO_SCRNMAP = GIO_SCRNMAP;
+ unsigned IOCTL_KDDISABIO = KDDISABIO;
+ unsigned IOCTL_KDENABIO = KDENABIO;
+ unsigned IOCTL_KDGETLED = KDGETLED;
+ unsigned IOCTL_KDGETMODE = KDGETMODE;
+ unsigned IOCTL_KDGKBMODE = KDGKBMODE;
+ unsigned IOCTL_KDGKBTYPE = KDGKBTYPE;
+ unsigned IOCTL_KDMKTONE = KDMKTONE;
+ unsigned IOCTL_KDSETLED = KDSETLED;
+ unsigned IOCTL_KDSETMODE = KDSETMODE;
+ unsigned IOCTL_KDSKBMODE = KDSKBMODE;
+ unsigned IOCTL_KIOCSOUND = KIOCSOUND;
+ unsigned IOCTL_PIO_SCRNMAP = PIO_SCRNMAP;
+ unsigned IOCTL_SNDCTL_DSP_GETISPACE = SNDCTL_DSP_GETISPACE;
+ unsigned IOCTL_SNDCTL_DSP_GETOSPACE = SNDCTL_DSP_GETOSPACE;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+
+ const int si_SEGV_MAPERR = SEGV_MAPERR;
+ const int si_SEGV_ACCERR = SEGV_ACCERR;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+#if SANITIZER_LINUX
+// FIXME: We define those on Linux and Mac, but only check on Linux.
+COMPILER_CHECK(IOC_NRBITS == _IOC_NRBITS);
+COMPILER_CHECK(IOC_TYPEBITS == _IOC_TYPEBITS);
+COMPILER_CHECK(IOC_SIZEBITS == _IOC_SIZEBITS);
+COMPILER_CHECK(IOC_DIRBITS == _IOC_DIRBITS);
+COMPILER_CHECK(IOC_NRMASK == _IOC_NRMASK);
+COMPILER_CHECK(IOC_TYPEMASK == _IOC_TYPEMASK);
+COMPILER_CHECK(IOC_SIZEMASK == _IOC_SIZEMASK);
+COMPILER_CHECK(IOC_DIRMASK == _IOC_DIRMASK);
+COMPILER_CHECK(IOC_NRSHIFT == _IOC_NRSHIFT);
+COMPILER_CHECK(IOC_TYPESHIFT == _IOC_TYPESHIFT);
+COMPILER_CHECK(IOC_SIZESHIFT == _IOC_SIZESHIFT);
+COMPILER_CHECK(IOC_DIRSHIFT == _IOC_DIRSHIFT);
+COMPILER_CHECK(IOC_NONE == _IOC_NONE);
+COMPILER_CHECK(IOC_WRITE == _IOC_WRITE);
+COMPILER_CHECK(IOC_READ == _IOC_READ);
+COMPILER_CHECK(EVIOC_ABS_MAX == ABS_MAX);
+COMPILER_CHECK(EVIOC_EV_MAX == EV_MAX);
+COMPILER_CHECK(IOC_SIZE(0x12345678) == _IOC_SIZE(0x12345678));
+COMPILER_CHECK(IOC_DIR(0x12345678) == _IOC_DIR(0x12345678));
+COMPILER_CHECK(IOC_NR(0x12345678) == _IOC_NR(0x12345678));
+COMPILER_CHECK(IOC_TYPE(0x12345678) == _IOC_TYPE(0x12345678));
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+// There are more undocumented fields in dl_phdr_info that we are not interested
+// in.
+COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_flags);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_closedir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_readdir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_opendir);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_lstat);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_stat);
+#endif
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+
+#if SANITIZER_LINUX && (__ANDROID_API__ >= 21 || __GLIBC_PREREQ (2, 14))
+CHECK_TYPE_SIZE(mmsghdr);
+CHECK_SIZE_AND_OFFSET(mmsghdr, msg_hdr);
+CHECK_SIZE_AND_OFFSET(mmsghdr, msg_len);
+#endif
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_ino);
+#if SANITIZER_MAC
+CHECK_SIZE_AND_OFFSET(dirent, d_seekoff);
+#elif SANITIZER_FREEBSD
+// There is no 'd_off' field on FreeBSD.
+#else
+CHECK_SIZE_AND_OFFSET(dirent, d_off);
+#endif
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));
+CHECK_SIZE_AND_OFFSET(dirent64, d_ino);
+CHECK_SIZE_AND_OFFSET(dirent64, d_off);
+CHECK_SIZE_AND_OFFSET(dirent64, d_reclen);
+#endif
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+#if !defined(__s390x__) || __GLIBC_PREREQ (2, 20)
+// On s390x glibc 2.19 and earlier sa_flags was unsigned long, and sa_resv
+// didn't exist.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
+#endif
+#if SANITIZER_LINUX && (!SANITIZER_ANDROID || !SANITIZER_MIPS32)
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_restorer);
+#endif
+
+#if SANITIZER_LINUX
+CHECK_TYPE_SIZE(__sysctl_args);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, name);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, nlen);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, oldval);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, oldlenp);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, newval);
+CHECK_SIZE_AND_OFFSET(__sysctl_args, newlen);
+
+CHECK_TYPE_SIZE(__kernel_uid_t);
+CHECK_TYPE_SIZE(__kernel_gid_t);
+
+#if SANITIZER_USES_UID16_SYSCALLS
+CHECK_TYPE_SIZE(__kernel_old_uid_t);
+CHECK_TYPE_SIZE(__kernel_old_gid_t);
+#endif
+
+CHECK_TYPE_SIZE(__kernel_off_t);
+CHECK_TYPE_SIZE(__kernel_loff_t);
+CHECK_TYPE_SIZE(__kernel_fd_set);
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(wordexp_t);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
+#endif
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+CHECK_SIZE_AND_OFFSET(tm, tm_gmtoff);
+CHECK_SIZE_AND_OFFSET(tm, tm_zone);
+
+#if SANITIZER_LINUX
+CHECK_TYPE_SIZE(mntent);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_fsname);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_dir);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_type);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_opts);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_freq);
+CHECK_SIZE_AND_OFFSET(mntent, mnt_passno);
+#endif
+
+CHECK_TYPE_SIZE(ether_addr);
+
+#if (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(ipc_perm);
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+# else
+CHECK_SIZE_AND_OFFSET(ipc_perm, __key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, __seq);
+# endif
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+#if !defined(__aarch64__) || !SANITIZER_LINUX || __GLIBC_PREREQ (2, 21)
+/* On aarch64 glibc 2.20 and earlier provided incorrect mode field. */
+CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+#endif
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+#endif
+
+CHECK_TYPE_SIZE(clock_t);
+
+#if SANITIZER_LINUX
+CHECK_TYPE_SIZE(clockid_t);
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+# if SANITIZER_FREEBSD
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+# else
+COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)nullptr)->ifa_dstaddr) ==
+ sizeof(((ifaddrs *)nullptr)->ifa_ifu));
+COMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) ==
+ offsetof(ifaddrs, ifa_ifu));
+# endif // SANITIZER_FREEBSD
+#else
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_dstaddr);
+#endif // SANITIZER_LINUX
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+#endif
+
+#if SANITIZER_LINUX
+COMPILER_CHECK(sizeof(__sanitizer_struct_mallinfo) == sizeof(struct mallinfo));
+#endif
+
+#if !SANITIZER_ANDROID
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+#endif
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+#if !SANITIZER_ANDROID
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+#endif
+
+#if SANITIZER_MAC
+CHECK_SIZE_AND_OFFSET(passwd, pw_change);
+CHECK_SIZE_AND_OFFSET(passwd, pw_expire);
+CHECK_SIZE_AND_OFFSET(passwd, pw_class);
+#endif
+
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+#if HAVE_RPC_XDR_H
+CHECK_TYPE_SIZE(XDR);
+CHECK_SIZE_AND_OFFSET(XDR, x_op);
+CHECK_SIZE_AND_OFFSET(XDR, x_ops);
+CHECK_SIZE_AND_OFFSET(XDR, x_public);
+CHECK_SIZE_AND_OFFSET(XDR, x_private);
+CHECK_SIZE_AND_OFFSET(XDR, x_base);
+CHECK_SIZE_AND_OFFSET(XDR, x_handy);
+COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);
+COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
+COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer_FILE) <= sizeof(FILE));
+CHECK_SIZE_AND_OFFSET(FILE, _flags);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_read_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_ptr);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_write_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_buf_end);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_backup_base);
+CHECK_SIZE_AND_OFFSET(FILE, _IO_save_end);
+CHECK_SIZE_AND_OFFSET(FILE, _markers);
+CHECK_SIZE_AND_OFFSET(FILE, _chain);
+CHECK_SIZE_AND_OFFSET(FILE, _fileno);
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+COMPILER_CHECK(sizeof(__sanitizer__obstack_chunk) <= sizeof(_obstack_chunk));
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, limit);
+CHECK_SIZE_AND_OFFSET(_obstack_chunk, prev);
+CHECK_TYPE_SIZE(obstack);
+CHECK_SIZE_AND_OFFSET(obstack, chunk_size);
+CHECK_SIZE_AND_OFFSET(obstack, chunk);
+CHECK_SIZE_AND_OFFSET(obstack, object_base);
+CHECK_SIZE_AND_OFFSET(obstack, next_free);
+
+CHECK_TYPE_SIZE(cookie_io_functions_t);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, read);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, write);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, seek);
+CHECK_SIZE_AND_OFFSET(cookie_io_functions_t, close);
+#endif
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD
+CHECK_TYPE_SIZE(sem_t);
+#endif
+
+#if SANITIZER_LINUX && defined(__arm__)
+COMPILER_CHECK(ARM_VFPREGS_SIZE == ARM_VFPREGS_SIZE_ASAN);
+#endif
+
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_MAC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_posix.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_posix.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_posix.h (revision 351984)
@@ -0,0 +1,1454 @@
+//===-- sanitizer_platform_limits_posix.h ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific POSIX data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_POSIX_H
+#define SANITIZER_PLATFORM_LIMITS_POSIX_H
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+# define GET_LINK_MAP_BY_DLOPEN_HANDLE(handle) ((link_map*)(handle))
+
+#ifndef __GLIBC_PREREQ
+#define __GLIBC_PREREQ(x, y) 0
+#endif
+
+namespace __sanitizer {
+ extern unsigned struct_utsname_sz;
+ extern unsigned struct_stat_sz;
+#if !SANITIZER_IOS
+ extern unsigned struct_stat64_sz;
+#endif
+ extern unsigned struct_rusage_sz;
+ extern unsigned siginfo_t_sz;
+ extern unsigned struct_itimerval_sz;
+ extern unsigned pthread_t_sz;
+ extern unsigned pthread_mutex_t_sz;
+ extern unsigned pthread_cond_t_sz;
+ extern unsigned pid_t_sz;
+ extern unsigned timeval_sz;
+ extern unsigned uid_t_sz;
+ extern unsigned gid_t_sz;
+ extern unsigned mbstate_t_sz;
+ extern unsigned struct_timezone_sz;
+ extern unsigned struct_tms_sz;
+ extern unsigned struct_itimerspec_sz;
+ extern unsigned struct_sigevent_sz;
+ extern unsigned struct_sched_param_sz;
+ extern unsigned struct_statfs64_sz;
+ extern unsigned struct_regex_sz;
+ extern unsigned struct_regmatch_sz;
+
+#if !SANITIZER_ANDROID
+ extern unsigned struct_fstab_sz;
+ extern unsigned struct_statfs_sz;
+ extern unsigned struct_sockaddr_sz;
+ extern unsigned ucontext_t_sz;
+#endif // !SANITIZER_ANDROID
+
+#if SANITIZER_LINUX
+
+#if defined(__x86_64__)
+ const unsigned struct_kernel_stat_sz = 144;
+ const unsigned struct_kernel_stat64_sz = 0;
+#elif defined(__i386__)
+ const unsigned struct_kernel_stat_sz = 64;
+ const unsigned struct_kernel_stat64_sz = 96;
+#elif defined(__arm__)
+ const unsigned struct_kernel_stat_sz = 64;
+ const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__aarch64__)
+ const unsigned struct_kernel_stat_sz = 128;
+ const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__powerpc__) && !defined(__powerpc64__)
+ const unsigned struct_kernel_stat_sz = 72;
+ const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__powerpc64__)
+ const unsigned struct_kernel_stat_sz = 144;
+ const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__mips__)
+ const unsigned struct_kernel_stat_sz =
+ SANITIZER_ANDROID ? FIRST_32_SECOND_64(104, 128) :
+ FIRST_32_SECOND_64(160, 216);
+ const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__s390__) && !defined(__s390x__)
+ const unsigned struct_kernel_stat_sz = 64;
+ const unsigned struct_kernel_stat64_sz = 104;
+#elif defined(__s390x__)
+ const unsigned struct_kernel_stat_sz = 144;
+ const unsigned struct_kernel_stat64_sz = 0;
+#elif defined(__sparc__) && defined(__arch64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 104;
+ const unsigned struct_kernel_stat64_sz = 144;
+#elif defined(__sparc__) && !defined(__arch64__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+ const unsigned struct_kernel_stat_sz = 64;
+ const unsigned struct_kernel_stat64_sz = 104;
+#endif
+ struct __sanitizer_perf_event_attr {
+ unsigned type;
+ unsigned size;
+ // More fields that vary with the kernel version.
+ };
+
+ extern unsigned struct_epoll_event_sz;
+ extern unsigned struct_sysinfo_sz;
+ extern unsigned __user_cap_header_struct_sz;
+ extern unsigned __user_cap_data_struct_sz;
+ extern unsigned struct_new_utsname_sz;
+ extern unsigned struct_old_utsname_sz;
+ extern unsigned struct_oldold_utsname_sz;
+
+ const unsigned struct_kexec_segment_sz = 4 * sizeof(unsigned long);
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX
+
+#if defined(__powerpc64__) || defined(__s390__)
+ const unsigned struct___old_kernel_stat_sz = 0;
+#elif !defined(__sparc__)
+ const unsigned struct___old_kernel_stat_sz = 32;
+#endif
+
+ extern unsigned struct_rlimit_sz;
+ extern unsigned struct_utimbuf_sz;
+ extern unsigned struct_timespec_sz;
+
+ struct __sanitizer_iocb {
+ u64 aio_data;
+ u32 aio_key_or_aio_reserved1; // Simply crazy.
+ u32 aio_reserved1_or_aio_key; // Luckily, we don't need these.
+ u16 aio_lio_opcode;
+ s16 aio_reqprio;
+ u32 aio_fildes;
+ u64 aio_buf;
+ u64 aio_nbytes;
+ s64 aio_offset;
+ u64 aio_reserved2;
+ u64 aio_reserved3;
+ };
+
+ struct __sanitizer_io_event {
+ u64 data;
+ u64 obj;
+ u64 res;
+ u64 res2;
+ };
+
+ const unsigned iocb_cmd_pread = 0;
+ const unsigned iocb_cmd_pwrite = 1;
+ const unsigned iocb_cmd_preadv = 7;
+ const unsigned iocb_cmd_pwritev = 8;
+
+ struct __sanitizer___sysctl_args {
+ int *name;
+ int nlen;
+ void *oldval;
+ uptr *oldlenp;
+ void *newval;
+ uptr newlen;
+ unsigned long ___unused[4];
+ };
+
+ const unsigned old_sigset_t_sz = sizeof(unsigned long);
+
+ struct __sanitizer_sem_t {
+#if SANITIZER_ANDROID && defined(_LP64)
+ int data[4];
+#elif SANITIZER_ANDROID && !defined(_LP64)
+ int data;
+#elif SANITIZER_LINUX
+ uptr data[4];
+#endif
+ };
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_ANDROID
+ struct __sanitizer_struct_mallinfo {
+ uptr v[10];
+ };
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ struct __sanitizer_struct_mallinfo {
+ int v[10];
+ };
+
+ extern unsigned struct_ustat_sz;
+ extern unsigned struct_rlimit64_sz;
+ extern unsigned struct_statvfs64_sz;
+
+ struct __sanitizer_ipc_perm {
+ int __key;
+ int uid;
+ int gid;
+ int cuid;
+ int cgid;
+#ifdef __powerpc__
+ unsigned mode;
+ unsigned __seq;
+ u64 __unused1;
+ u64 __unused2;
+#elif defined(__sparc__)
+#if defined(__arch64__)
+ unsigned mode;
+ unsigned short __pad1;
+#else
+ unsigned short __pad1;
+ unsigned short mode;
+ unsigned short __pad2;
+#endif
+ unsigned short __seq;
+ unsigned long long __unused1;
+ unsigned long long __unused2;
+#elif defined(__mips__) || defined(__aarch64__) || defined(__s390x__)
+ unsigned int mode;
+ unsigned short __seq;
+ unsigned short __pad1;
+ unsigned long __unused1;
+ unsigned long __unused2;
+#else
+ unsigned short mode;
+ unsigned short __pad1;
+ unsigned short __seq;
+ unsigned short __pad2;
+#if defined(__x86_64__) && !defined(_LP64)
+ u64 __unused1;
+ u64 __unused2;
+#else
+ unsigned long __unused1;
+ unsigned long __unused2;
+#endif
+#endif
+ };
+
+ struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ #if defined(__sparc__)
+ #if !defined(__arch64__)
+ u32 __pad1;
+ #endif
+ long shm_atime;
+ #if !defined(__arch64__)
+ u32 __pad2;
+ #endif
+ long shm_dtime;
+ #if !defined(__arch64__)
+ u32 __pad3;
+ #endif
+ long shm_ctime;
+ uptr shm_segsz;
+ int shm_cpid;
+ int shm_lpid;
+ unsigned long shm_nattch;
+ unsigned long __glibc_reserved1;
+ unsigned long __glibc_reserved2;
+ #else
+ #ifndef __powerpc__
+ uptr shm_segsz;
+ #elif !defined(__powerpc64__)
+ uptr __unused0;
+ #endif
+ #if defined(__x86_64__) && !defined(_LP64)
+ u64 shm_atime;
+ u64 shm_dtime;
+ u64 shm_ctime;
+ #else
+ uptr shm_atime;
+ #if !defined(_LP64) && !defined(__mips__)
+ uptr __unused1;
+ #endif
+ uptr shm_dtime;
+ #if !defined(_LP64) && !defined(__mips__)
+ uptr __unused2;
+ #endif
+ uptr shm_ctime;
+ #if !defined(_LP64) && !defined(__mips__)
+ uptr __unused3;
+ #endif
+ #endif
+ #ifdef __powerpc__
+ uptr shm_segsz;
+ #endif
+ int shm_cpid;
+ int shm_lpid;
+ #if defined(__x86_64__) && !defined(_LP64)
+ u64 shm_nattch;
+ u64 __unused4;
+ u64 __unused5;
+ #else
+ uptr shm_nattch;
+ uptr __unused4;
+ uptr __unused5;
+ #endif
+#endif
+ };
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ extern unsigned struct_msqid_ds_sz;
+ extern unsigned struct_mq_attr_sz;
+ extern unsigned struct_timex_sz;
+ extern unsigned struct_statvfs_sz;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+ };
+
+#if !SANITIZER_ANDROID
+ struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ unsigned int ifa_flags;
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ // This is a union on Linux.
+# ifdef ifa_dstaddr
+# undef ifa_dstaddr
+# endif
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+ };
+#endif // !SANITIZER_ANDROID
+
+#if SANITIZER_MAC
+ typedef unsigned long __sanitizer_pthread_key_t;
+#else
+ typedef unsigned __sanitizer_pthread_key_t;
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ struct __sanitizer_XDR {
+ int x_op;
+ void *x_ops;
+ uptr x_public;
+ uptr x_private;
+ uptr x_base;
+ unsigned x_handy;
+ };
+
+ const int __sanitizer_XDR_ENCODE = 0;
+ const int __sanitizer_XDR_DECODE = 1;
+ const int __sanitizer_XDR_FREE = 2;
+#endif
+
+ struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ int pw_uid;
+ int pw_gid;
+#if SANITIZER_MAC
+ long pw_change;
+ char *pw_class;
+#endif
+#if !(SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32))
+ char *pw_gecos;
+#endif
+ char *pw_dir;
+ char *pw_shell;
+#if SANITIZER_MAC
+ long pw_expire;
+#endif
+ };
+
+ struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+ };
+
+#if defined(__x86_64__) && !defined(_LP64)
+ typedef long long __sanitizer_time_t;
+#else
+ typedef long __sanitizer_time_t;
+#endif
+
+ typedef long __sanitizer_suseconds_t;
+
+ struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+ };
+
+ struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+ };
+
+ struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+ };
+
+ struct __sanitizer_ether_addr {
+ u8 octet[6];
+ };
+
+ struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+ long int tm_gmtoff;
+ const char *tm_zone;
+ };
+
+#if SANITIZER_LINUX
+ struct __sanitizer_mntent {
+ char *mnt_fsname;
+ char *mnt_dir;
+ char *mnt_type;
+ char *mnt_opts;
+ int mnt_freq;
+ int mnt_passno;
+ };
+
+ struct __sanitizer_file_handle {
+ unsigned int handle_bytes;
+ int handle_type;
+ unsigned char f_handle[1]; // variable sized
+ };
+#endif
+
+#if SANITIZER_MAC
+ struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+ };
+ struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+ };
+#else
+ struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ uptr msg_iovlen;
+ void *msg_control;
+ uptr msg_controllen;
+ int msg_flags;
+ };
+ struct __sanitizer_cmsghdr {
+ uptr cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+ };
+#endif
+
+#if SANITIZER_LINUX
+ struct __sanitizer_mmsghdr {
+ __sanitizer_msghdr msg_hdr;
+ unsigned int msg_len;
+ };
+#endif
+
+#if SANITIZER_MAC
+ struct __sanitizer_dirent {
+ unsigned long long d_ino;
+ unsigned long long d_seekoff;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+ };
+#elif SANITIZER_ANDROID || defined(__x86_64__)
+ struct __sanitizer_dirent {
+ unsigned long long d_ino;
+ unsigned long long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+ };
+#else
+ struct __sanitizer_dirent {
+ uptr d_ino;
+ uptr d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+ };
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ struct __sanitizer_dirent64 {
+ unsigned long long d_ino;
+ unsigned long long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+ };
+#endif
+
+#if defined(__x86_64__) && !defined(_LP64)
+ typedef long long __sanitizer_clock_t;
+#else
+ typedef long __sanitizer_clock_t;
+#endif
+
+#if SANITIZER_LINUX
+ typedef int __sanitizer_clockid_t;
+#endif
+
+#if SANITIZER_LINUX
+#if defined(_LP64) || defined(__x86_64__) || defined(__powerpc__)\
+ || defined(__mips__)
+ typedef unsigned __sanitizer___kernel_uid_t;
+ typedef unsigned __sanitizer___kernel_gid_t;
+#else
+ typedef unsigned short __sanitizer___kernel_uid_t;
+ typedef unsigned short __sanitizer___kernel_gid_t;
+#endif
+#if defined(__x86_64__) && !defined(_LP64)
+ typedef long long __sanitizer___kernel_off_t;
+#else
+ typedef long __sanitizer___kernel_off_t;
+#endif
+
+#if defined(__powerpc__) || defined(__mips__)
+ typedef unsigned int __sanitizer___kernel_old_uid_t;
+ typedef unsigned int __sanitizer___kernel_old_gid_t;
+#else
+ typedef unsigned short __sanitizer___kernel_old_uid_t;
+ typedef unsigned short __sanitizer___kernel_old_gid_t;
+#endif
+
+ typedef long long __sanitizer___kernel_loff_t;
+ typedef struct {
+ unsigned long fds_bits[1024 / (8 * sizeof(long))];
+ } __sanitizer___kernel_fd_set;
+#endif
+
+ // This thing depends on the platform. We are only interested in the upper
+ // limit. Verified with a compiler assert in .cc.
+ const int pthread_attr_t_max_sz = 128;
+ union __sanitizer_pthread_attr_t {
+ char size[pthread_attr_t_max_sz]; // NOLINT
+ void *align;
+ };
+
+#if SANITIZER_ANDROID
+# if SANITIZER_MIPS
+ typedef unsigned long __sanitizer_sigset_t[16/sizeof(unsigned long)];
+# else
+ typedef unsigned long __sanitizer_sigset_t;
+# endif
+#elif SANITIZER_MAC
+ typedef unsigned __sanitizer_sigset_t;
+#elif SANITIZER_LINUX
+ struct __sanitizer_sigset_t {
+ // The size is determined by looking at sizeof of real sigset_t on linux.
+ uptr val[128 / sizeof(uptr)];
+ };
+#endif
+
+ struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+ };
+
+ using __sanitizer_sighandler_ptr = void (*)(int sig);
+ using __sanitizer_sigactionhandler_ptr =
+ void (*)(int sig, __sanitizer_siginfo *siginfo, void *uctx);
+
+ // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
+#if SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 64)
+ struct __sanitizer_sigaction {
+ unsigned sa_flags;
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ __sanitizer_sigset_t sa_mask;
+ void (*sa_restorer)();
+ };
+#elif SANITIZER_ANDROID && SANITIZER_MIPS32 // check this before WORDSIZE == 32
+ struct __sanitizer_sigaction {
+ unsigned sa_flags;
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ __sanitizer_sigset_t sa_mask;
+ };
+#elif SANITIZER_ANDROID && (SANITIZER_WORDSIZE == 32)
+ struct __sanitizer_sigaction {
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ __sanitizer_sigset_t sa_mask;
+ uptr sa_flags;
+ void (*sa_restorer)();
+ };
+#else // !SANITIZER_ANDROID
+ struct __sanitizer_sigaction {
+#if defined(__mips__) && !SANITIZER_FREEBSD
+ unsigned int sa_flags;
+#endif
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+#if SANITIZER_FREEBSD
+ int sa_flags;
+ __sanitizer_sigset_t sa_mask;
+#else
+#if defined(__s390x__)
+ int sa_resv;
+#else
+ __sanitizer_sigset_t sa_mask;
+#endif
+#ifndef __mips__
+#if defined(__sparc__)
+#if __GLIBC_PREREQ (2, 20)
+ // On sparc glibc 2.19 and earlier sa_flags was unsigned long.
+#if defined(__arch64__)
+ // To maintain ABI compatibility on sparc64 when switching to an int,
+ // __glibc_reserved0 was added.
+ int __glibc_reserved0;
+#endif
+ int sa_flags;
+#else
+ unsigned long sa_flags;
+#endif
+#else
+ int sa_flags;
+#endif
+#endif
+#endif
+#if SANITIZER_LINUX
+ void (*sa_restorer)();
+#endif
+#if defined(__mips__) && (SANITIZER_WORDSIZE == 32)
+ int sa_resv[1];
+#endif
+#if defined(__s390x__)
+ __sanitizer_sigset_t sa_mask;
+#endif
+ };
+#endif // !SANITIZER_ANDROID
+
+#if defined(__mips__)
+ struct __sanitizer_kernel_sigset_t {
+ uptr sig[2];
+ };
+#else
+ struct __sanitizer_kernel_sigset_t {
+ u8 sig[8];
+ };
+#endif
+
+ // Linux system headers define the 'sa_handler' and 'sa_sigaction' macros.
+#if SANITIZER_MIPS
+ struct __sanitizer_kernel_sigaction_t {
+ unsigned int sa_flags;
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, __sanitizer_siginfo *info, void *ctx);
+ };
+ __sanitizer_kernel_sigset_t sa_mask;
+ void (*sa_restorer)(void);
+ };
+#else
+ struct __sanitizer_kernel_sigaction_t {
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, __sanitizer_siginfo *info, void *ctx);
+ };
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ __sanitizer_kernel_sigset_t sa_mask;
+ };
+#endif
+
+ extern const uptr sig_ign;
+ extern const uptr sig_dfl;
+ extern const uptr sig_err;
+ extern const uptr sa_siginfo;
+
+#if SANITIZER_LINUX
+ extern int e_tabsz;
+#endif
+
+ extern int af_inet;
+ extern int af_inet6;
+ uptr __sanitizer_in_addr_sz(int af);
+
+#if SANITIZER_LINUX
+ struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+ };
+
+ extern unsigned struct_ElfW_Phdr_sz;
+#endif
+
+ struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+#if SANITIZER_ANDROID || SANITIZER_MAC
+ unsigned ai_addrlen;
+ char *ai_canonname;
+ void *ai_addr;
+#else // LINUX
+ unsigned ai_addrlen;
+ void *ai_addr;
+ char *ai_canonname;
+#endif
+ struct __sanitizer_addrinfo *ai_next;
+ };
+
+ struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+ };
+
+ struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+ };
+
+#if SANITIZER_ANDROID || SANITIZER_MAC
+ typedef unsigned __sanitizer_nfds_t;
+#else
+ typedef unsigned long __sanitizer_nfds_t;
+#endif
+
+#if !SANITIZER_ANDROID
+# if SANITIZER_LINUX
+ struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ char **gl_pathv;
+ uptr gl_offs;
+ int gl_flags;
+
+ void (*gl_closedir)(void *dirp);
+ void *(*gl_readdir)(void *dirp);
+ void *(*gl_opendir)(const char *);
+ int (*gl_lstat)(const char *, void *);
+ int (*gl_stat)(const char *, void *);
+ };
+# endif // SANITIZER_LINUX
+
+# if SANITIZER_LINUX
+ extern int glob_nomatch;
+ extern int glob_altdirfunc;
+# endif
+#endif // !SANITIZER_ANDROID
+
+ extern unsigned path_max;
+
+ struct __sanitizer_wordexp_t {
+ uptr we_wordc;
+ char **we_wordv;
+ uptr we_offs;
+ };
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ struct __sanitizer_FILE {
+ int _flags;
+ char *_IO_read_ptr;
+ char *_IO_read_end;
+ char *_IO_read_base;
+ char *_IO_write_base;
+ char *_IO_write_ptr;
+ char *_IO_write_end;
+ char *_IO_buf_base;
+ char *_IO_buf_end;
+ char *_IO_save_base;
+ char *_IO_backup_base;
+ char *_IO_save_end;
+ void *_markers;
+ __sanitizer_FILE *_chain;
+ int _fileno;
+ };
+# define SANITIZER_HAS_STRUCT_FILE 1
+#else
+ typedef void __sanitizer_FILE;
+# define SANITIZER_HAS_STRUCT_FILE 0
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && \
+ (defined(__i386) || defined(__x86_64) || defined(__mips64) || \
+ defined(__powerpc64__) || defined(__aarch64__) || defined(__arm__) || \
+ defined(__s390__))
+ extern unsigned struct_user_regs_struct_sz;
+ extern unsigned struct_user_fpregs_struct_sz;
+ extern unsigned struct_user_fpxregs_struct_sz;
+ extern unsigned struct_user_vfpregs_struct_sz;
+
+ extern int ptrace_peektext;
+ extern int ptrace_peekdata;
+ extern int ptrace_peekuser;
+ extern int ptrace_getregs;
+ extern int ptrace_setregs;
+ extern int ptrace_getfpregs;
+ extern int ptrace_setfpregs;
+ extern int ptrace_getfpxregs;
+ extern int ptrace_setfpxregs;
+ extern int ptrace_getvfpregs;
+ extern int ptrace_setvfpregs;
+ extern int ptrace_getsiginfo;
+ extern int ptrace_setsiginfo;
+ extern int ptrace_getregset;
+ extern int ptrace_setregset;
+ extern int ptrace_geteventmsg;
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ extern unsigned struct_shminfo_sz;
+ extern unsigned struct_shm_info_sz;
+ extern int shmctl_ipc_stat;
+ extern int shmctl_ipc_info;
+ extern int shmctl_shm_info;
+ extern int shmctl_shm_stat;
+#endif
+
+#if !SANITIZER_MAC && !SANITIZER_FREEBSD
+ extern unsigned struct_utmp_sz;
+#endif
+#if !SANITIZER_ANDROID
+ extern unsigned struct_utmpx_sz;
+#endif
+
+ extern int map_fixed;
+
+ // ioctl arguments
+ struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+#if SANITIZER_MAC
+ } __attribute__((packed));
+#else
+ };
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+struct __sanitizer__obstack_chunk {
+ char *limit;
+ struct __sanitizer__obstack_chunk *prev;
+};
+
+struct __sanitizer_obstack {
+ long chunk_size;
+ struct __sanitizer__obstack_chunk *chunk;
+ char *object_base;
+ char *next_free;
+ uptr more_fields[7];
+};
+
+typedef uptr (*__sanitizer_cookie_io_read)(void *cookie, char *buf, uptr size);
+typedef uptr (*__sanitizer_cookie_io_write)(void *cookie, const char *buf,
+ uptr size);
+typedef int (*__sanitizer_cookie_io_seek)(void *cookie, u64 *offset,
+ int whence);
+typedef int (*__sanitizer_cookie_io_close)(void *cookie);
+
+struct __sanitizer_cookie_io_functions_t {
+ __sanitizer_cookie_io_read read;
+ __sanitizer_cookie_io_write write;
+ __sanitizer_cookie_io_seek seek;
+ __sanitizer_cookie_io_close close;
+};
+#endif
+
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#if defined(__powerpc__) || defined(__powerpc64__) || defined(__mips__) || \
+ defined(__sparc__)
+#define IOC_SIZEBITS 13
+#define IOC_DIRBITS 3
+#define IOC_NONE 1U
+#define IOC_WRITE 4U
+#define IOC_READ 2U
+#else
+#define IOC_SIZEBITS 14
+#define IOC_DIRBITS 2
+#define IOC_NONE 0U
+#define IOC_WRITE 1U
+#define IOC_READ 2U
+#endif
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#if defined(IOC_DIRMASK)
+#undef IOC_DIRMASK
+#endif
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+#define EVIOC_EV_MAX 0x1f
+#define EVIOC_ABS_MAX 0x3f
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+
+#if defined(__sparc__)
+// In sparc the 14 bits SIZE field overlaps with the
+// least significant bit of DIR, so either IOC_READ or
+// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
+#define IOC_SIZE(nr) \
+ ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))
+#else
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+#endif
+
+ extern unsigned struct_ifreq_sz;
+ extern unsigned struct_termios_sz;
+ extern unsigned struct_winsize_sz;
+
+#if SANITIZER_LINUX
+ extern unsigned struct_arpreq_sz;
+ extern unsigned struct_cdrom_msf_sz;
+ extern unsigned struct_cdrom_multisession_sz;
+ extern unsigned struct_cdrom_read_audio_sz;
+ extern unsigned struct_cdrom_subchnl_sz;
+ extern unsigned struct_cdrom_ti_sz;
+ extern unsigned struct_cdrom_tocentry_sz;
+ extern unsigned struct_cdrom_tochdr_sz;
+ extern unsigned struct_cdrom_volctrl_sz;
+ extern unsigned struct_ff_effect_sz;
+ extern unsigned struct_floppy_drive_params_sz;
+ extern unsigned struct_floppy_drive_struct_sz;
+ extern unsigned struct_floppy_fdc_state_sz;
+ extern unsigned struct_floppy_max_errors_sz;
+ extern unsigned struct_floppy_raw_cmd_sz;
+ extern unsigned struct_floppy_struct_sz;
+ extern unsigned struct_floppy_write_errors_sz;
+ extern unsigned struct_format_descr_sz;
+ extern unsigned struct_hd_driveid_sz;
+ extern unsigned struct_hd_geometry_sz;
+ extern unsigned struct_input_absinfo_sz;
+ extern unsigned struct_input_id_sz;
+ extern unsigned struct_mtpos_sz;
+ extern unsigned struct_termio_sz;
+ extern unsigned struct_vt_consize_sz;
+ extern unsigned struct_vt_sizes_sz;
+ extern unsigned struct_vt_stat_sz;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX
+ extern unsigned struct_copr_buffer_sz;
+ extern unsigned struct_copr_debug_buf_sz;
+ extern unsigned struct_copr_msg_sz;
+ extern unsigned struct_midi_info_sz;
+ extern unsigned struct_mtget_sz;
+ extern unsigned struct_mtop_sz;
+ extern unsigned struct_rtentry_sz;
+ extern unsigned struct_sbi_instrument_sz;
+ extern unsigned struct_seq_event_rec_sz;
+ extern unsigned struct_synth_info_sz;
+ extern unsigned struct_vt_mode_sz;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ extern unsigned struct_ax25_parms_struct_sz;
+ extern unsigned struct_cyclades_monitor_sz;
+ extern unsigned struct_input_keymap_entry_sz;
+ extern unsigned struct_ipx_config_data_sz;
+ extern unsigned struct_kbdiacrs_sz;
+ extern unsigned struct_kbentry_sz;
+ extern unsigned struct_kbkeycode_sz;
+ extern unsigned struct_kbsentry_sz;
+ extern unsigned struct_mtconfiginfo_sz;
+ extern unsigned struct_nr_parms_struct_sz;
+ extern unsigned struct_scc_modem_sz;
+ extern unsigned struct_scc_stat_sz;
+ extern unsigned struct_serial_multiport_struct_sz;
+ extern unsigned struct_serial_struct_sz;
+ extern unsigned struct_sockaddr_ax25_sz;
+ extern unsigned struct_unimapdesc_sz;
+ extern unsigned struct_unimapinit_sz;
+#endif // SANITIZER_LINUX && !SANITIZER_ANDROID
+
+ extern const unsigned long __sanitizer_bufsiz;
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ extern unsigned struct_audio_buf_info_sz;
+ extern unsigned struct_ppp_stats_sz;
+#endif // (SANITIZER_LINUX || SANITIZER_FREEBSD) && !SANITIZER_ANDROID
+
+#if !SANITIZER_ANDROID && !SANITIZER_MAC
+ extern unsigned struct_sioc_sg_req_sz;
+ extern unsigned struct_sioc_vif_req_sz;
+#endif
+
+ // ioctl request identifiers
+
+ // A special value to mark ioctls that are not present on the target platform,
+ // when it can not be determined without including any system headers.
+ extern const unsigned IOCTL_NOT_PRESENT;
+
+ extern unsigned IOCTL_FIOASYNC;
+ extern unsigned IOCTL_FIOCLEX;
+ extern unsigned IOCTL_FIOGETOWN;
+ extern unsigned IOCTL_FIONBIO;
+ extern unsigned IOCTL_FIONCLEX;
+ extern unsigned IOCTL_FIOSETOWN;
+ extern unsigned IOCTL_SIOCADDMULTI;
+ extern unsigned IOCTL_SIOCATMARK;
+ extern unsigned IOCTL_SIOCDELMULTI;
+ extern unsigned IOCTL_SIOCGIFADDR;
+ extern unsigned IOCTL_SIOCGIFBRDADDR;
+ extern unsigned IOCTL_SIOCGIFCONF;
+ extern unsigned IOCTL_SIOCGIFDSTADDR;
+ extern unsigned IOCTL_SIOCGIFFLAGS;
+ extern unsigned IOCTL_SIOCGIFMETRIC;
+ extern unsigned IOCTL_SIOCGIFMTU;
+ extern unsigned IOCTL_SIOCGIFNETMASK;
+ extern unsigned IOCTL_SIOCGPGRP;
+ extern unsigned IOCTL_SIOCSIFADDR;
+ extern unsigned IOCTL_SIOCSIFBRDADDR;
+ extern unsigned IOCTL_SIOCSIFDSTADDR;
+ extern unsigned IOCTL_SIOCSIFFLAGS;
+ extern unsigned IOCTL_SIOCSIFMETRIC;
+ extern unsigned IOCTL_SIOCSIFMTU;
+ extern unsigned IOCTL_SIOCSIFNETMASK;
+ extern unsigned IOCTL_SIOCSPGRP;
+ extern unsigned IOCTL_TIOCCONS;
+ extern unsigned IOCTL_TIOCEXCL;
+ extern unsigned IOCTL_TIOCGETD;
+ extern unsigned IOCTL_TIOCGPGRP;
+ extern unsigned IOCTL_TIOCGWINSZ;
+ extern unsigned IOCTL_TIOCMBIC;
+ extern unsigned IOCTL_TIOCMBIS;
+ extern unsigned IOCTL_TIOCMGET;
+ extern unsigned IOCTL_TIOCMSET;
+ extern unsigned IOCTL_TIOCNOTTY;
+ extern unsigned IOCTL_TIOCNXCL;
+ extern unsigned IOCTL_TIOCOUTQ;
+ extern unsigned IOCTL_TIOCPKT;
+ extern unsigned IOCTL_TIOCSCTTY;
+ extern unsigned IOCTL_TIOCSETD;
+ extern unsigned IOCTL_TIOCSPGRP;
+ extern unsigned IOCTL_TIOCSTI;
+ extern unsigned IOCTL_TIOCSWINSZ;
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ extern unsigned IOCTL_SIOCGETSGCNT;
+ extern unsigned IOCTL_SIOCGETVIFCNT;
+#endif
+#if SANITIZER_LINUX
+ extern unsigned IOCTL_EVIOCGABS;
+ extern unsigned IOCTL_EVIOCGBIT;
+ extern unsigned IOCTL_EVIOCGEFFECTS;
+ extern unsigned IOCTL_EVIOCGID;
+ extern unsigned IOCTL_EVIOCGKEY;
+ extern unsigned IOCTL_EVIOCGKEYCODE;
+ extern unsigned IOCTL_EVIOCGLED;
+ extern unsigned IOCTL_EVIOCGNAME;
+ extern unsigned IOCTL_EVIOCGPHYS;
+ extern unsigned IOCTL_EVIOCGRAB;
+ extern unsigned IOCTL_EVIOCGREP;
+ extern unsigned IOCTL_EVIOCGSND;
+ extern unsigned IOCTL_EVIOCGSW;
+ extern unsigned IOCTL_EVIOCGUNIQ;
+ extern unsigned IOCTL_EVIOCGVERSION;
+ extern unsigned IOCTL_EVIOCRMFF;
+ extern unsigned IOCTL_EVIOCSABS;
+ extern unsigned IOCTL_EVIOCSFF;
+ extern unsigned IOCTL_EVIOCSKEYCODE;
+ extern unsigned IOCTL_EVIOCSREP;
+ extern unsigned IOCTL_BLKFLSBUF;
+ extern unsigned IOCTL_BLKGETSIZE;
+ extern unsigned IOCTL_BLKRAGET;
+ extern unsigned IOCTL_BLKRASET;
+ extern unsigned IOCTL_BLKROGET;
+ extern unsigned IOCTL_BLKROSET;
+ extern unsigned IOCTL_BLKRRPART;
+ extern unsigned IOCTL_CDROMAUDIOBUFSIZ;
+ extern unsigned IOCTL_CDROMEJECT;
+ extern unsigned IOCTL_CDROMEJECT_SW;
+ extern unsigned IOCTL_CDROMMULTISESSION;
+ extern unsigned IOCTL_CDROMPAUSE;
+ extern unsigned IOCTL_CDROMPLAYMSF;
+ extern unsigned IOCTL_CDROMPLAYTRKIND;
+ extern unsigned IOCTL_CDROMREADAUDIO;
+ extern unsigned IOCTL_CDROMREADCOOKED;
+ extern unsigned IOCTL_CDROMREADMODE1;
+ extern unsigned IOCTL_CDROMREADMODE2;
+ extern unsigned IOCTL_CDROMREADRAW;
+ extern unsigned IOCTL_CDROMREADTOCENTRY;
+ extern unsigned IOCTL_CDROMREADTOCHDR;
+ extern unsigned IOCTL_CDROMRESET;
+ extern unsigned IOCTL_CDROMRESUME;
+ extern unsigned IOCTL_CDROMSEEK;
+ extern unsigned IOCTL_CDROMSTART;
+ extern unsigned IOCTL_CDROMSTOP;
+ extern unsigned IOCTL_CDROMSUBCHNL;
+ extern unsigned IOCTL_CDROMVOLCTRL;
+ extern unsigned IOCTL_CDROMVOLREAD;
+ extern unsigned IOCTL_CDROM_GET_UPC;
+ extern unsigned IOCTL_FDCLRPRM;
+ extern unsigned IOCTL_FDDEFPRM;
+ extern unsigned IOCTL_FDFLUSH;
+ extern unsigned IOCTL_FDFMTBEG;
+ extern unsigned IOCTL_FDFMTEND;
+ extern unsigned IOCTL_FDFMTTRK;
+ extern unsigned IOCTL_FDGETDRVPRM;
+ extern unsigned IOCTL_FDGETDRVSTAT;
+ extern unsigned IOCTL_FDGETDRVTYP;
+ extern unsigned IOCTL_FDGETFDCSTAT;
+ extern unsigned IOCTL_FDGETMAXERRS;
+ extern unsigned IOCTL_FDGETPRM;
+ extern unsigned IOCTL_FDMSGOFF;
+ extern unsigned IOCTL_FDMSGON;
+ extern unsigned IOCTL_FDPOLLDRVSTAT;
+ extern unsigned IOCTL_FDRAWCMD;
+ extern unsigned IOCTL_FDRESET;
+ extern unsigned IOCTL_FDSETDRVPRM;
+ extern unsigned IOCTL_FDSETEMSGTRESH;
+ extern unsigned IOCTL_FDSETMAXERRS;
+ extern unsigned IOCTL_FDSETPRM;
+ extern unsigned IOCTL_FDTWADDLE;
+ extern unsigned IOCTL_FDWERRORCLR;
+ extern unsigned IOCTL_FDWERRORGET;
+ extern unsigned IOCTL_HDIO_DRIVE_CMD;
+ extern unsigned IOCTL_HDIO_GETGEO;
+ extern unsigned IOCTL_HDIO_GET_32BIT;
+ extern unsigned IOCTL_HDIO_GET_DMA;
+ extern unsigned IOCTL_HDIO_GET_IDENTITY;
+ extern unsigned IOCTL_HDIO_GET_KEEPSETTINGS;
+ extern unsigned IOCTL_HDIO_GET_MULTCOUNT;
+ extern unsigned IOCTL_HDIO_GET_NOWERR;
+ extern unsigned IOCTL_HDIO_GET_UNMASKINTR;
+ extern unsigned IOCTL_HDIO_SET_32BIT;
+ extern unsigned IOCTL_HDIO_SET_DMA;
+ extern unsigned IOCTL_HDIO_SET_KEEPSETTINGS;
+ extern unsigned IOCTL_HDIO_SET_MULTCOUNT;
+ extern unsigned IOCTL_HDIO_SET_NOWERR;
+ extern unsigned IOCTL_HDIO_SET_UNMASKINTR;
+ extern unsigned IOCTL_MTIOCPOS;
+ extern unsigned IOCTL_PPPIOCGASYNCMAP;
+ extern unsigned IOCTL_PPPIOCGDEBUG;
+ extern unsigned IOCTL_PPPIOCGFLAGS;
+ extern unsigned IOCTL_PPPIOCGUNIT;
+ extern unsigned IOCTL_PPPIOCGXASYNCMAP;
+ extern unsigned IOCTL_PPPIOCSASYNCMAP;
+ extern unsigned IOCTL_PPPIOCSDEBUG;
+ extern unsigned IOCTL_PPPIOCSFLAGS;
+ extern unsigned IOCTL_PPPIOCSMAXCID;
+ extern unsigned IOCTL_PPPIOCSMRU;
+ extern unsigned IOCTL_PPPIOCSXASYNCMAP;
+ extern unsigned IOCTL_SIOCDARP;
+ extern unsigned IOCTL_SIOCDRARP;
+ extern unsigned IOCTL_SIOCGARP;
+ extern unsigned IOCTL_SIOCGIFENCAP;
+ extern unsigned IOCTL_SIOCGIFHWADDR;
+ extern unsigned IOCTL_SIOCGIFMAP;
+ extern unsigned IOCTL_SIOCGIFMEM;
+ extern unsigned IOCTL_SIOCGIFNAME;
+ extern unsigned IOCTL_SIOCGIFSLAVE;
+ extern unsigned IOCTL_SIOCGRARP;
+ extern unsigned IOCTL_SIOCGSTAMP;
+ extern unsigned IOCTL_SIOCSARP;
+ extern unsigned IOCTL_SIOCSIFENCAP;
+ extern unsigned IOCTL_SIOCSIFHWADDR;
+ extern unsigned IOCTL_SIOCSIFLINK;
+ extern unsigned IOCTL_SIOCSIFMAP;
+ extern unsigned IOCTL_SIOCSIFMEM;
+ extern unsigned IOCTL_SIOCSIFSLAVE;
+ extern unsigned IOCTL_SIOCSRARP;
+ extern unsigned IOCTL_SNDCTL_COPR_HALT;
+ extern unsigned IOCTL_SNDCTL_COPR_LOAD;
+ extern unsigned IOCTL_SNDCTL_COPR_RCODE;
+ extern unsigned IOCTL_SNDCTL_COPR_RCVMSG;
+ extern unsigned IOCTL_SNDCTL_COPR_RDATA;
+ extern unsigned IOCTL_SNDCTL_COPR_RESET;
+ extern unsigned IOCTL_SNDCTL_COPR_RUN;
+ extern unsigned IOCTL_SNDCTL_COPR_SENDMSG;
+ extern unsigned IOCTL_SNDCTL_COPR_WCODE;
+ extern unsigned IOCTL_SNDCTL_COPR_WDATA;
+ extern unsigned IOCTL_TCFLSH;
+ extern unsigned IOCTL_TCGETA;
+ extern unsigned IOCTL_TCGETS;
+ extern unsigned IOCTL_TCSBRK;
+ extern unsigned IOCTL_TCSBRKP;
+ extern unsigned IOCTL_TCSETA;
+ extern unsigned IOCTL_TCSETAF;
+ extern unsigned IOCTL_TCSETAW;
+ extern unsigned IOCTL_TCSETS;
+ extern unsigned IOCTL_TCSETSF;
+ extern unsigned IOCTL_TCSETSW;
+ extern unsigned IOCTL_TCXONC;
+ extern unsigned IOCTL_TIOCGLCKTRMIOS;
+ extern unsigned IOCTL_TIOCGSOFTCAR;
+ extern unsigned IOCTL_TIOCINQ;
+ extern unsigned IOCTL_TIOCLINUX;
+ extern unsigned IOCTL_TIOCSERCONFIG;
+ extern unsigned IOCTL_TIOCSERGETLSR;
+ extern unsigned IOCTL_TIOCSERGWILD;
+ extern unsigned IOCTL_TIOCSERSWILD;
+ extern unsigned IOCTL_TIOCSLCKTRMIOS;
+ extern unsigned IOCTL_TIOCSSOFTCAR;
+ extern unsigned IOCTL_VT_DISALLOCATE;
+ extern unsigned IOCTL_VT_GETSTATE;
+ extern unsigned IOCTL_VT_RESIZE;
+ extern unsigned IOCTL_VT_RESIZEX;
+ extern unsigned IOCTL_VT_SENDSIG;
+ extern unsigned IOCTL_MTIOCGET;
+ extern unsigned IOCTL_MTIOCTOP;
+ extern unsigned IOCTL_SIOCADDRT;
+ extern unsigned IOCTL_SIOCDELRT;
+ extern unsigned IOCTL_SNDCTL_DSP_GETBLKSIZE;
+ extern unsigned IOCTL_SNDCTL_DSP_GETFMTS;
+ extern unsigned IOCTL_SNDCTL_DSP_NONBLOCK;
+ extern unsigned IOCTL_SNDCTL_DSP_POST;
+ extern unsigned IOCTL_SNDCTL_DSP_RESET;
+ extern unsigned IOCTL_SNDCTL_DSP_SETFMT;
+ extern unsigned IOCTL_SNDCTL_DSP_SETFRAGMENT;
+ extern unsigned IOCTL_SNDCTL_DSP_SPEED;
+ extern unsigned IOCTL_SNDCTL_DSP_STEREO;
+ extern unsigned IOCTL_SNDCTL_DSP_SUBDIVIDE;
+ extern unsigned IOCTL_SNDCTL_DSP_SYNC;
+ extern unsigned IOCTL_SNDCTL_FM_4OP_ENABLE;
+ extern unsigned IOCTL_SNDCTL_FM_LOAD_INSTR;
+ extern unsigned IOCTL_SNDCTL_MIDI_INFO;
+ extern unsigned IOCTL_SNDCTL_MIDI_PRETIME;
+ extern unsigned IOCTL_SNDCTL_SEQ_CTRLRATE;
+ extern unsigned IOCTL_SNDCTL_SEQ_GETINCOUNT;
+ extern unsigned IOCTL_SNDCTL_SEQ_GETOUTCOUNT;
+ extern unsigned IOCTL_SNDCTL_SEQ_NRMIDIS;
+ extern unsigned IOCTL_SNDCTL_SEQ_NRSYNTHS;
+ extern unsigned IOCTL_SNDCTL_SEQ_OUTOFBAND;
+ extern unsigned IOCTL_SNDCTL_SEQ_PANIC;
+ extern unsigned IOCTL_SNDCTL_SEQ_PERCMODE;
+ extern unsigned IOCTL_SNDCTL_SEQ_RESET;
+ extern unsigned IOCTL_SNDCTL_SEQ_RESETSAMPLES;
+ extern unsigned IOCTL_SNDCTL_SEQ_SYNC;
+ extern unsigned IOCTL_SNDCTL_SEQ_TESTMIDI;
+ extern unsigned IOCTL_SNDCTL_SEQ_THRESHOLD;
+ extern unsigned IOCTL_SNDCTL_SYNTH_INFO;
+ extern unsigned IOCTL_SNDCTL_SYNTH_MEMAVL;
+ extern unsigned IOCTL_SNDCTL_TMR_CONTINUE;
+ extern unsigned IOCTL_SNDCTL_TMR_METRONOME;
+ extern unsigned IOCTL_SNDCTL_TMR_SELECT;
+ extern unsigned IOCTL_SNDCTL_TMR_SOURCE;
+ extern unsigned IOCTL_SNDCTL_TMR_START;
+ extern unsigned IOCTL_SNDCTL_TMR_STOP;
+ extern unsigned IOCTL_SNDCTL_TMR_TEMPO;
+ extern unsigned IOCTL_SNDCTL_TMR_TIMEBASE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_ALTPCM;
+ extern unsigned IOCTL_SOUND_MIXER_READ_BASS;
+ extern unsigned IOCTL_SOUND_MIXER_READ_CAPS;
+ extern unsigned IOCTL_SOUND_MIXER_READ_CD;
+ extern unsigned IOCTL_SOUND_MIXER_READ_DEVMASK;
+ extern unsigned IOCTL_SOUND_MIXER_READ_ENHANCE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_IGAIN;
+ extern unsigned IOCTL_SOUND_MIXER_READ_IMIX;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LINE1;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LINE2;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LINE3;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LINE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_LOUD;
+ extern unsigned IOCTL_SOUND_MIXER_READ_MIC;
+ extern unsigned IOCTL_SOUND_MIXER_READ_MUTE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_OGAIN;
+ extern unsigned IOCTL_SOUND_MIXER_READ_PCM;
+ extern unsigned IOCTL_SOUND_MIXER_READ_RECLEV;
+ extern unsigned IOCTL_SOUND_MIXER_READ_RECMASK;
+ extern unsigned IOCTL_SOUND_MIXER_READ_RECSRC;
+ extern unsigned IOCTL_SOUND_MIXER_READ_SPEAKER;
+ extern unsigned IOCTL_SOUND_MIXER_READ_STEREODEVS;
+ extern unsigned IOCTL_SOUND_MIXER_READ_SYNTH;
+ extern unsigned IOCTL_SOUND_MIXER_READ_TREBLE;
+ extern unsigned IOCTL_SOUND_MIXER_READ_VOLUME;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_ALTPCM;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_BASS;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_CD;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_ENHANCE;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_IGAIN;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_IMIX;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE1;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE2;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE3;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LINE;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_LOUD;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_MIC;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_MUTE;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_OGAIN;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_PCM;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_RECLEV;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_RECSRC;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_SPEAKER;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_SYNTH;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_TREBLE;
+ extern unsigned IOCTL_SOUND_MIXER_WRITE_VOLUME;
+ extern unsigned IOCTL_SOUND_PCM_READ_BITS;
+ extern unsigned IOCTL_SOUND_PCM_READ_CHANNELS;
+ extern unsigned IOCTL_SOUND_PCM_READ_FILTER;
+ extern unsigned IOCTL_SOUND_PCM_READ_RATE;
+ extern unsigned IOCTL_SOUND_PCM_WRITE_CHANNELS;
+ extern unsigned IOCTL_SOUND_PCM_WRITE_FILTER;
+ extern unsigned IOCTL_VT_ACTIVATE;
+ extern unsigned IOCTL_VT_GETMODE;
+ extern unsigned IOCTL_VT_OPENQRY;
+ extern unsigned IOCTL_VT_RELDISP;
+ extern unsigned IOCTL_VT_SETMODE;
+ extern unsigned IOCTL_VT_WAITACTIVE;
+#endif // SANITIZER_LINUX
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ extern unsigned IOCTL_CYGETDEFTHRESH;
+ extern unsigned IOCTL_CYGETDEFTIMEOUT;
+ extern unsigned IOCTL_CYGETMON;
+ extern unsigned IOCTL_CYGETTHRESH;
+ extern unsigned IOCTL_CYGETTIMEOUT;
+ extern unsigned IOCTL_CYSETDEFTHRESH;
+ extern unsigned IOCTL_CYSETDEFTIMEOUT;
+ extern unsigned IOCTL_CYSETTHRESH;
+ extern unsigned IOCTL_CYSETTIMEOUT;
+ extern unsigned IOCTL_EQL_EMANCIPATE;
+ extern unsigned IOCTL_EQL_ENSLAVE;
+ extern unsigned IOCTL_EQL_GETMASTRCFG;
+ extern unsigned IOCTL_EQL_GETSLAVECFG;
+ extern unsigned IOCTL_EQL_SETMASTRCFG;
+ extern unsigned IOCTL_EQL_SETSLAVECFG;
+ extern unsigned IOCTL_EVIOCGKEYCODE_V2;
+ extern unsigned IOCTL_EVIOCGPROP;
+ extern unsigned IOCTL_EVIOCSKEYCODE_V2;
+ extern unsigned IOCTL_FS_IOC_GETFLAGS;
+ extern unsigned IOCTL_FS_IOC_GETVERSION;
+ extern unsigned IOCTL_FS_IOC_SETFLAGS;
+ extern unsigned IOCTL_FS_IOC_SETVERSION;
+ extern unsigned IOCTL_GIO_CMAP;
+ extern unsigned IOCTL_GIO_FONT;
+ extern unsigned IOCTL_GIO_UNIMAP;
+ extern unsigned IOCTL_GIO_UNISCRNMAP;
+ extern unsigned IOCTL_KDADDIO;
+ extern unsigned IOCTL_KDDELIO;
+ extern unsigned IOCTL_KDGETKEYCODE;
+ extern unsigned IOCTL_KDGKBDIACR;
+ extern unsigned IOCTL_KDGKBENT;
+ extern unsigned IOCTL_KDGKBLED;
+ extern unsigned IOCTL_KDGKBMETA;
+ extern unsigned IOCTL_KDGKBSENT;
+ extern unsigned IOCTL_KDMAPDISP;
+ extern unsigned IOCTL_KDSETKEYCODE;
+ extern unsigned IOCTL_KDSIGACCEPT;
+ extern unsigned IOCTL_KDSKBDIACR;
+ extern unsigned IOCTL_KDSKBENT;
+ extern unsigned IOCTL_KDSKBLED;
+ extern unsigned IOCTL_KDSKBMETA;
+ extern unsigned IOCTL_KDSKBSENT;
+ extern unsigned IOCTL_KDUNMAPDISP;
+ extern unsigned IOCTL_LPABORT;
+ extern unsigned IOCTL_LPABORTOPEN;
+ extern unsigned IOCTL_LPCAREFUL;
+ extern unsigned IOCTL_LPCHAR;
+ extern unsigned IOCTL_LPGETIRQ;
+ extern unsigned IOCTL_LPGETSTATUS;
+ extern unsigned IOCTL_LPRESET;
+ extern unsigned IOCTL_LPSETIRQ;
+ extern unsigned IOCTL_LPTIME;
+ extern unsigned IOCTL_LPWAIT;
+ extern unsigned IOCTL_MTIOCGETCONFIG;
+ extern unsigned IOCTL_MTIOCSETCONFIG;
+ extern unsigned IOCTL_PIO_CMAP;
+ extern unsigned IOCTL_PIO_FONT;
+ extern unsigned IOCTL_PIO_UNIMAP;
+ extern unsigned IOCTL_PIO_UNIMAPCLR;
+ extern unsigned IOCTL_PIO_UNISCRNMAP;
+ extern unsigned IOCTL_SCSI_IOCTL_GET_IDLUN;
+ extern unsigned IOCTL_SCSI_IOCTL_PROBE_HOST;
+ extern unsigned IOCTL_SCSI_IOCTL_TAGGED_DISABLE;
+ extern unsigned IOCTL_SCSI_IOCTL_TAGGED_ENABLE;
+ extern unsigned IOCTL_SIOCAIPXITFCRT;
+ extern unsigned IOCTL_SIOCAIPXPRISLT;
+ extern unsigned IOCTL_SIOCAX25ADDUID;
+ extern unsigned IOCTL_SIOCAX25DELUID;
+ extern unsigned IOCTL_SIOCAX25GETPARMS;
+ extern unsigned IOCTL_SIOCAX25GETUID;
+ extern unsigned IOCTL_SIOCAX25NOUID;
+ extern unsigned IOCTL_SIOCAX25SETPARMS;
+ extern unsigned IOCTL_SIOCDEVPLIP;
+ extern unsigned IOCTL_SIOCIPXCFGDATA;
+ extern unsigned IOCTL_SIOCNRDECOBS;
+ extern unsigned IOCTL_SIOCNRGETPARMS;
+ extern unsigned IOCTL_SIOCNRRTCTL;
+ extern unsigned IOCTL_SIOCNRSETPARMS;
+ extern unsigned IOCTL_SNDCTL_DSP_GETISPACE;
+ extern unsigned IOCTL_SNDCTL_DSP_GETOSPACE;
+ extern unsigned IOCTL_TIOCGSERIAL;
+ extern unsigned IOCTL_TIOCSERGETMULTI;
+ extern unsigned IOCTL_TIOCSERSETMULTI;
+ extern unsigned IOCTL_TIOCSSERIAL;
+ extern unsigned IOCTL_GIO_SCRNMAP;
+ extern unsigned IOCTL_KDDISABIO;
+ extern unsigned IOCTL_KDENABIO;
+ extern unsigned IOCTL_KDGETLED;
+ extern unsigned IOCTL_KDGETMODE;
+ extern unsigned IOCTL_KDGKBMODE;
+ extern unsigned IOCTL_KDGKBTYPE;
+ extern unsigned IOCTL_KDMKTONE;
+ extern unsigned IOCTL_KDSETLED;
+ extern unsigned IOCTL_KDSETMODE;
+ extern unsigned IOCTL_KDSKBMODE;
+ extern unsigned IOCTL_KIOCSOUND;
+ extern unsigned IOCTL_PIO_SCRNMAP;
+#endif
+
+ extern const int si_SEGV_MAPERR;
+ extern const int si_SEGV_ACCERR;
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \
+ sizeof(((CLASS *) NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \
+ sizeof(((struct CLASS *) NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#define SIGACTION_SYMNAME sigaction
+
+#endif // SANITIZER_LINUX || SANITIZER_MAC
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_solaris.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_solaris.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_solaris.cc (revision 351984)
@@ -0,0 +1,365 @@
+//===-- sanitizer_platform_limits_solaris.cc ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific Solaris data structures.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_SOLARIS
+#include <arpa/inet.h>
+#include <dirent.h>
+#include <glob.h>
+#include <grp.h>
+#include <ifaddrs.h>
+#include <limits.h>
+#include <link.h>
+#include <net/if.h>
+#include <net/route.h>
+#include <netdb.h>
+#include <netinet/ip_mroute.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <rpc/xdr.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stddef.h>
+#include <sys/ethernet.h>
+#include <sys/filio.h>
+#include <sys/ipc.h>
+#include <sys/mman.h>
+#include <sys/mount.h>
+#include <sys/mtio.h>
+#include <sys/ptyvar.h>
+#include <sys/resource.h>
+#include <sys/shm.h>
+#include <sys/socket.h>
+#include <sys/sockio.h>
+#include <sys/stat.h>
+#include <sys/statfs.h>
+#include <sys/statvfs.h>
+#include <sys/time.h>
+#include <sys/timeb.h>
+#include <sys/times.h>
+#include <sys/types.h>
+#include <sys/utsname.h>
+#include <termios.h>
+#include <time.h>
+#include <utmp.h>
+#include <utmpx.h>
+#include <wchar.h>
+#include <wordexp.h>
+
+// Include these after system headers to avoid name clashes and ambiguities.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_solaris.h"
+
+namespace __sanitizer {
+ unsigned struct_utsname_sz = sizeof(struct utsname);
+ unsigned struct_stat_sz = sizeof(struct stat);
+ unsigned struct_stat64_sz = sizeof(struct stat64);
+ unsigned struct_rusage_sz = sizeof(struct rusage);
+ unsigned struct_tm_sz = sizeof(struct tm);
+ unsigned struct_passwd_sz = sizeof(struct passwd);
+ unsigned struct_group_sz = sizeof(struct group);
+ unsigned siginfo_t_sz = sizeof(siginfo_t);
+ unsigned struct_sigaction_sz = sizeof(struct sigaction);
+ unsigned struct_itimerval_sz = sizeof(struct itimerval);
+ unsigned pthread_t_sz = sizeof(pthread_t);
+ unsigned pthread_mutex_t_sz = sizeof(pthread_mutex_t);
+ unsigned pthread_cond_t_sz = sizeof(pthread_cond_t);
+ unsigned pid_t_sz = sizeof(pid_t);
+ unsigned timeval_sz = sizeof(timeval);
+ unsigned uid_t_sz = sizeof(uid_t);
+ unsigned gid_t_sz = sizeof(gid_t);
+ unsigned mbstate_t_sz = sizeof(mbstate_t);
+ unsigned sigset_t_sz = sizeof(sigset_t);
+ unsigned struct_timezone_sz = sizeof(struct timezone);
+ unsigned struct_tms_sz = sizeof(struct tms);
+ unsigned struct_sigevent_sz = sizeof(struct sigevent);
+ unsigned struct_sched_param_sz = sizeof(struct sched_param);
+ unsigned struct_statfs_sz = sizeof(struct statfs);
+ unsigned struct_sockaddr_sz = sizeof(struct sockaddr);
+ unsigned ucontext_t_sz = sizeof(ucontext_t);
+ unsigned struct_timespec_sz = sizeof(struct timespec);
+#if SANITIZER_SOLARIS32
+ unsigned struct_statvfs64_sz = sizeof(struct statvfs64);
+#endif
+ unsigned struct_statvfs_sz = sizeof(struct statvfs);
+
+ const uptr sig_ign = (uptr)SIG_IGN;
+ const uptr sig_dfl = (uptr)SIG_DFL;
+ const uptr sig_err = (uptr)SIG_ERR;
+ const uptr sa_siginfo = (uptr)SA_SIGINFO;
+
+ int shmctl_ipc_stat = (int)IPC_STAT;
+
+ unsigned struct_utmp_sz = sizeof(struct utmp);
+ unsigned struct_utmpx_sz = sizeof(struct utmpx);
+
+ int map_fixed = MAP_FIXED;
+
+ int af_inet = (int)AF_INET;
+ int af_inet6 = (int)AF_INET6;
+
+ uptr __sanitizer_in_addr_sz(int af) {
+ if (af == AF_INET)
+ return sizeof(struct in_addr);
+ else if (af == AF_INET6)
+ return sizeof(struct in6_addr);
+ else
+ return 0;
+ }
+
+ unsigned struct_ElfW_Phdr_sz = sizeof(ElfW(Phdr));
+
+ int glob_nomatch = GLOB_NOMATCH;
+
+ unsigned path_max = PATH_MAX;
+
+ // ioctl arguments
+ unsigned struct_ifreq_sz = sizeof(struct ifreq);
+ unsigned struct_termios_sz = sizeof(struct termios);
+ unsigned struct_winsize_sz = sizeof(struct winsize);
+
+ unsigned struct_sioc_sg_req_sz = sizeof(struct sioc_sg_req);
+ unsigned struct_sioc_vif_req_sz = sizeof(struct sioc_vif_req);
+
+ const unsigned IOCTL_NOT_PRESENT = 0;
+
+ unsigned IOCTL_FIOASYNC = FIOASYNC;
+ unsigned IOCTL_FIOCLEX = FIOCLEX;
+ unsigned IOCTL_FIOGETOWN = FIOGETOWN;
+ unsigned IOCTL_FIONBIO = FIONBIO;
+ unsigned IOCTL_FIONCLEX = FIONCLEX;
+ unsigned IOCTL_FIOSETOWN = FIOSETOWN;
+ unsigned IOCTL_SIOCADDMULTI = SIOCADDMULTI;
+ unsigned IOCTL_SIOCATMARK = SIOCATMARK;
+ unsigned IOCTL_SIOCDELMULTI = SIOCDELMULTI;
+ unsigned IOCTL_SIOCGIFADDR = SIOCGIFADDR;
+ unsigned IOCTL_SIOCGIFBRDADDR = SIOCGIFBRDADDR;
+ unsigned IOCTL_SIOCGIFCONF = SIOCGIFCONF;
+ unsigned IOCTL_SIOCGIFDSTADDR = SIOCGIFDSTADDR;
+ unsigned IOCTL_SIOCGIFFLAGS = SIOCGIFFLAGS;
+ unsigned IOCTL_SIOCGIFMETRIC = SIOCGIFMETRIC;
+ unsigned IOCTL_SIOCGIFMTU = SIOCGIFMTU;
+ unsigned IOCTL_SIOCGIFNETMASK = SIOCGIFNETMASK;
+ unsigned IOCTL_SIOCGPGRP = SIOCGPGRP;
+ unsigned IOCTL_SIOCSIFADDR = SIOCSIFADDR;
+ unsigned IOCTL_SIOCSIFBRDADDR = SIOCSIFBRDADDR;
+ unsigned IOCTL_SIOCSIFDSTADDR = SIOCSIFDSTADDR;
+ unsigned IOCTL_SIOCSIFFLAGS = SIOCSIFFLAGS;
+ unsigned IOCTL_SIOCSIFMETRIC = SIOCSIFMETRIC;
+ unsigned IOCTL_SIOCSIFMTU = SIOCSIFMTU;
+ unsigned IOCTL_SIOCSIFNETMASK = SIOCSIFNETMASK;
+ unsigned IOCTL_SIOCSPGRP = SIOCSPGRP;
+ unsigned IOCTL_TIOCEXCL = TIOCEXCL;
+ unsigned IOCTL_TIOCGETD = TIOCGETD;
+ unsigned IOCTL_TIOCGPGRP = TIOCGPGRP;
+ unsigned IOCTL_TIOCGWINSZ = TIOCGWINSZ;
+ unsigned IOCTL_TIOCMBIC = TIOCMBIC;
+ unsigned IOCTL_TIOCMBIS = TIOCMBIS;
+ unsigned IOCTL_TIOCMGET = TIOCMGET;
+ unsigned IOCTL_TIOCMSET = TIOCMSET;
+ unsigned IOCTL_TIOCNOTTY = TIOCNOTTY;
+ unsigned IOCTL_TIOCNXCL = TIOCNXCL;
+ unsigned IOCTL_TIOCOUTQ = TIOCOUTQ;
+ unsigned IOCTL_TIOCPKT = TIOCPKT;
+ unsigned IOCTL_TIOCSCTTY = TIOCSCTTY;
+ unsigned IOCTL_TIOCSETD = TIOCSETD;
+ unsigned IOCTL_TIOCSPGRP = TIOCSPGRP;
+ unsigned IOCTL_TIOCSTI = TIOCSTI;
+ unsigned IOCTL_TIOCSWINSZ = TIOCSWINSZ;
+
+ unsigned IOCTL_MTIOCGET = MTIOCGET;
+ unsigned IOCTL_MTIOCTOP = MTIOCTOP;
+
+ const int si_SEGV_MAPERR = SEGV_MAPERR;
+ const int si_SEGV_ACCERR = SEGV_ACCERR;
+} // namespace __sanitizer
+
+using namespace __sanitizer;
+
+COMPILER_CHECK(sizeof(__sanitizer_pthread_attr_t) >= sizeof(pthread_attr_t));
+
+COMPILER_CHECK(sizeof(socklen_t) == sizeof(unsigned));
+CHECK_TYPE_SIZE(pthread_key_t);
+
+// There are more undocumented fields in dl_phdr_info that we are not interested
+// in.
+COMPILER_CHECK(sizeof(__sanitizer_dl_phdr_info) <= sizeof(dl_phdr_info));
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_addr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_name);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phdr);
+CHECK_SIZE_AND_OFFSET(dl_phdr_info, dlpi_phnum);
+
+CHECK_TYPE_SIZE(glob_t);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathc);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_pathv);
+CHECK_SIZE_AND_OFFSET(glob_t, gl_offs);
+
+CHECK_TYPE_SIZE(addrinfo);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_flags);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_family);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_socktype);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_protocol);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addrlen);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_canonname);
+CHECK_SIZE_AND_OFFSET(addrinfo, ai_addr);
+
+CHECK_TYPE_SIZE(hostent);
+CHECK_SIZE_AND_OFFSET(hostent, h_name);
+CHECK_SIZE_AND_OFFSET(hostent, h_aliases);
+CHECK_SIZE_AND_OFFSET(hostent, h_addrtype);
+CHECK_SIZE_AND_OFFSET(hostent, h_length);
+CHECK_SIZE_AND_OFFSET(hostent, h_addr_list);
+
+CHECK_TYPE_SIZE(iovec);
+CHECK_SIZE_AND_OFFSET(iovec, iov_base);
+CHECK_SIZE_AND_OFFSET(iovec, iov_len);
+
+CHECK_TYPE_SIZE(msghdr);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_name);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_namelen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iov);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_iovlen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_control);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_controllen);
+CHECK_SIZE_AND_OFFSET(msghdr, msg_flags);
+
+CHECK_TYPE_SIZE(cmsghdr);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_len);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_level);
+CHECK_SIZE_AND_OFFSET(cmsghdr, cmsg_type);
+
+COMPILER_CHECK(sizeof(__sanitizer_dirent) <= sizeof(dirent));
+CHECK_SIZE_AND_OFFSET(dirent, d_ino);
+CHECK_SIZE_AND_OFFSET(dirent, d_off);
+CHECK_SIZE_AND_OFFSET(dirent, d_reclen);
+
+#if SANITIZER_SOLARIS32
+COMPILER_CHECK(sizeof(__sanitizer_dirent64) <= sizeof(dirent64));
+CHECK_SIZE_AND_OFFSET(dirent64, d_ino);
+CHECK_SIZE_AND_OFFSET(dirent64, d_off);
+CHECK_SIZE_AND_OFFSET(dirent64, d_reclen);
+#endif
+
+CHECK_TYPE_SIZE(ifconf);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_len);
+CHECK_SIZE_AND_OFFSET(ifconf, ifc_ifcu);
+
+CHECK_TYPE_SIZE(pollfd);
+CHECK_SIZE_AND_OFFSET(pollfd, fd);
+CHECK_SIZE_AND_OFFSET(pollfd, events);
+CHECK_SIZE_AND_OFFSET(pollfd, revents);
+
+CHECK_TYPE_SIZE(nfds_t);
+
+CHECK_TYPE_SIZE(sigset_t);
+
+COMPILER_CHECK(sizeof(__sanitizer_sigaction) == sizeof(struct sigaction));
+// Can't write checks for sa_handler and sa_sigaction due to them being
+// preprocessor macros.
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_mask);
+CHECK_STRUCT_SIZE_AND_OFFSET(sigaction, sa_flags);
+
+CHECK_TYPE_SIZE(wordexp_t);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordc);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_wordv);
+CHECK_SIZE_AND_OFFSET(wordexp_t, we_offs);
+
+CHECK_TYPE_SIZE(tm);
+CHECK_SIZE_AND_OFFSET(tm, tm_sec);
+CHECK_SIZE_AND_OFFSET(tm, tm_min);
+CHECK_SIZE_AND_OFFSET(tm, tm_hour);
+CHECK_SIZE_AND_OFFSET(tm, tm_mday);
+CHECK_SIZE_AND_OFFSET(tm, tm_mon);
+CHECK_SIZE_AND_OFFSET(tm, tm_year);
+CHECK_SIZE_AND_OFFSET(tm, tm_wday);
+CHECK_SIZE_AND_OFFSET(tm, tm_yday);
+CHECK_SIZE_AND_OFFSET(tm, tm_isdst);
+
+CHECK_TYPE_SIZE(ether_addr);
+
+CHECK_TYPE_SIZE(ipc_perm);
+CHECK_SIZE_AND_OFFSET(ipc_perm, key);
+CHECK_SIZE_AND_OFFSET(ipc_perm, seq);
+CHECK_SIZE_AND_OFFSET(ipc_perm, uid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, gid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cuid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, cgid);
+CHECK_SIZE_AND_OFFSET(ipc_perm, mode);
+
+CHECK_TYPE_SIZE(shmid_ds);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_perm);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_segsz);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_atime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_dtime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_ctime);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_cpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_lpid);
+CHECK_SIZE_AND_OFFSET(shmid_ds, shm_nattch);
+
+CHECK_TYPE_SIZE(clock_t);
+
+CHECK_TYPE_SIZE(ifaddrs);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_next);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_name);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_addr);
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_netmask);
+// Compare against the union, because we can't reach into the union in a
+// compliant way.
+#ifdef ifa_dstaddr
+#undef ifa_dstaddr
+#endif
+COMPILER_CHECK(sizeof(((__sanitizer_ifaddrs *)nullptr)->ifa_dstaddr) ==
+ sizeof(((ifaddrs *)nullptr)->ifa_ifu));
+COMPILER_CHECK(offsetof(__sanitizer_ifaddrs, ifa_dstaddr) ==
+ offsetof(ifaddrs, ifa_ifu));
+CHECK_SIZE_AND_OFFSET(ifaddrs, ifa_data);
+
+CHECK_TYPE_SIZE(timeb);
+CHECK_SIZE_AND_OFFSET(timeb, time);
+CHECK_SIZE_AND_OFFSET(timeb, millitm);
+CHECK_SIZE_AND_OFFSET(timeb, timezone);
+CHECK_SIZE_AND_OFFSET(timeb, dstflag);
+
+CHECK_TYPE_SIZE(passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_name);
+CHECK_SIZE_AND_OFFSET(passwd, pw_passwd);
+CHECK_SIZE_AND_OFFSET(passwd, pw_uid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_gid);
+CHECK_SIZE_AND_OFFSET(passwd, pw_dir);
+CHECK_SIZE_AND_OFFSET(passwd, pw_shell);
+
+CHECK_SIZE_AND_OFFSET(passwd, pw_gecos);
+
+CHECK_TYPE_SIZE(group);
+CHECK_SIZE_AND_OFFSET(group, gr_name);
+CHECK_SIZE_AND_OFFSET(group, gr_passwd);
+CHECK_SIZE_AND_OFFSET(group, gr_gid);
+CHECK_SIZE_AND_OFFSET(group, gr_mem);
+
+CHECK_TYPE_SIZE(XDR);
+CHECK_SIZE_AND_OFFSET(XDR, x_op);
+CHECK_SIZE_AND_OFFSET(XDR, x_ops);
+CHECK_SIZE_AND_OFFSET(XDR, x_public);
+CHECK_SIZE_AND_OFFSET(XDR, x_private);
+CHECK_SIZE_AND_OFFSET(XDR, x_base);
+CHECK_SIZE_AND_OFFSET(XDR, x_handy);
+COMPILER_CHECK(__sanitizer_XDR_ENCODE == XDR_ENCODE);
+COMPILER_CHECK(__sanitizer_XDR_DECODE == XDR_DECODE);
+COMPILER_CHECK(__sanitizer_XDR_FREE == XDR_FREE);
+
+CHECK_TYPE_SIZE(sem_t);
+
+#endif // SANITIZER_SOLARIS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_solaris.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_solaris.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_solaris.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_solaris.h (revision 351984)
@@ -0,0 +1,495 @@
+//===-- sanitizer_platform_limits_solaris.h -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of Sanitizer common code.
+//
+// Sizes and layouts of platform-specific Solaris data structures.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_PLATFORM_LIMITS_SOLARIS_H
+#define SANITIZER_PLATFORM_LIMITS_SOLARIS_H
+
+#if SANITIZER_SOLARIS
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform.h"
+
+namespace __sanitizer {
+extern unsigned struct_utsname_sz;
+extern unsigned struct_stat_sz;
+extern unsigned struct_stat64_sz;
+extern unsigned struct_rusage_sz;
+extern unsigned siginfo_t_sz;
+extern unsigned struct_itimerval_sz;
+extern unsigned pthread_t_sz;
+extern unsigned pthread_mutex_t_sz;
+extern unsigned pthread_cond_t_sz;
+extern unsigned pid_t_sz;
+extern unsigned timeval_sz;
+extern unsigned uid_t_sz;
+extern unsigned gid_t_sz;
+extern unsigned mbstate_t_sz;
+extern unsigned struct_timezone_sz;
+extern unsigned struct_tms_sz;
+extern unsigned struct_itimerspec_sz;
+extern unsigned struct_sigevent_sz;
+extern unsigned struct_sched_param_sz;
+extern unsigned struct_statfs64_sz;
+extern unsigned struct_statfs_sz;
+extern unsigned struct_sockaddr_sz;
+extern unsigned ucontext_t_sz;
+
+extern unsigned struct_timespec_sz;
+extern unsigned struct_rlimit_sz;
+extern unsigned struct_utimbuf_sz;
+
+struct __sanitizer_sem_t {
+ //u64 data[6];
+ u32 sem_count;
+ u16 sem_type;
+ u16 sem_magic;
+ u64 sem_pad1[3];
+ u64 sem_pad2[2];
+};
+
+struct __sanitizer_ipc_perm {
+ unsigned int uid; // uid_t
+ unsigned int gid; // gid_t
+ unsigned int cuid; // uid_t
+ unsigned int cgid; // gid_t
+ unsigned int mode; // mode_t
+ unsigned int seq; // uint_t
+ int key; // key_t
+#if !defined(_LP64)
+ int pad[4];
+#endif
+ };
+
+struct __sanitizer_shmid_ds {
+ __sanitizer_ipc_perm shm_perm;
+ unsigned long shm_segsz; // size_t
+ unsigned long shm_flags; // uintptr_t
+ unsigned short shm_lkcnt; // ushort_t
+ int shm_lpid; // pid_t
+ int shm_cpid; // pid_t
+ unsigned long shm_nattch; // shmatt_t
+ unsigned long shm_cnattch; // ulong_t
+#if defined(_LP64)
+ long shm_atime; // time_t
+ long shm_dtime;
+ long shm_ctime;
+ void *shm_amp;
+ u64 shm_gransize; // uint64_t
+ u64 shm_allocated; // uint64_t
+ u64 shm_pad4[1]; // int64_t
+#else
+ long shm_atime; // time_t
+ int shm_pad1; // int32_t
+ long shm_dtime; // time_t
+ int shm_pad2; // int32_t
+ long shm_ctime; // time_t
+ void *shm_amp;
+ u64 shm_gransize; // uint64_t
+ u64 shm_allocated; // uint64_t
+#endif
+};
+
+extern unsigned struct_statvfs_sz;
+#if SANITIZER_SOLARIS32
+extern unsigned struct_statvfs64_sz;
+#endif
+
+struct __sanitizer_iovec {
+ void *iov_base;
+ uptr iov_len;
+};
+
+struct __sanitizer_ifaddrs {
+ struct __sanitizer_ifaddrs *ifa_next;
+ char *ifa_name;
+ u64 ifa_flags; // uint64_t
+ void *ifa_addr; // (struct sockaddr *)
+ void *ifa_netmask; // (struct sockaddr *)
+ // This is a union on Linux.
+# ifdef ifa_dstaddr
+# undef ifa_dstaddr
+# endif
+ void *ifa_dstaddr; // (struct sockaddr *)
+ void *ifa_data;
+};
+
+typedef unsigned __sanitizer_pthread_key_t;
+
+struct __sanitizer_XDR {
+ int x_op;
+ void *x_ops;
+ uptr x_public;
+ uptr x_private;
+ uptr x_base;
+ unsigned x_handy;
+};
+
+const int __sanitizer_XDR_ENCODE = 0;
+const int __sanitizer_XDR_DECODE = 1;
+const int __sanitizer_XDR_FREE = 2;
+
+struct __sanitizer_passwd {
+ char *pw_name;
+ char *pw_passwd;
+ unsigned int pw_uid; // uid_t
+ unsigned int pw_gid; // gid_t
+ char *pw_age;
+ char *pw_comment;
+ char *pw_gecos;
+ char *pw_dir;
+ char *pw_shell;
+};
+
+struct __sanitizer_group {
+ char *gr_name;
+ char *gr_passwd;
+ int gr_gid;
+ char **gr_mem;
+};
+
+typedef long __sanitizer_time_t;
+
+typedef long __sanitizer_suseconds_t;
+
+struct __sanitizer_timeval {
+ __sanitizer_time_t tv_sec;
+ __sanitizer_suseconds_t tv_usec;
+};
+
+struct __sanitizer_itimerval {
+ struct __sanitizer_timeval it_interval;
+ struct __sanitizer_timeval it_value;
+};
+
+struct __sanitizer_timeb {
+ __sanitizer_time_t time;
+ unsigned short millitm;
+ short timezone;
+ short dstflag;
+};
+
+struct __sanitizer_ether_addr {
+ u8 octet[6];
+};
+
+struct __sanitizer_tm {
+ int tm_sec;
+ int tm_min;
+ int tm_hour;
+ int tm_mday;
+ int tm_mon;
+ int tm_year;
+ int tm_wday;
+ int tm_yday;
+ int tm_isdst;
+};
+
+struct __sanitizer_msghdr {
+ void *msg_name;
+ unsigned msg_namelen;
+ struct __sanitizer_iovec *msg_iov;
+ unsigned msg_iovlen;
+ void *msg_control;
+ unsigned msg_controllen;
+ int msg_flags;
+};
+struct __sanitizer_cmsghdr {
+ unsigned cmsg_len;
+ int cmsg_level;
+ int cmsg_type;
+};
+
+#if SANITIZER_SOLARIS && (defined(_LP64) || _FILE_OFFSET_BITS == 64)
+struct __sanitizer_dirent {
+ unsigned long long d_ino;
+ long long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+#else
+struct __sanitizer_dirent {
+ unsigned long d_ino;
+ long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+#endif
+
+struct __sanitizer_dirent64 {
+ unsigned long long d_ino;
+ unsigned long long d_off;
+ unsigned short d_reclen;
+ // more fields that we don't care about
+};
+
+typedef long __sanitizer_clock_t;
+typedef int __sanitizer_clockid_t;
+
+// This thing depends on the platform. We are only interested in the upper
+// limit. Verified with a compiler assert in .cc.
+const int pthread_attr_t_max_sz = 128;
+union __sanitizer_pthread_attr_t {
+ char size[pthread_attr_t_max_sz]; // NOLINT
+ void *align;
+};
+
+struct __sanitizer_sigset_t {
+ // uint32_t * 4
+ unsigned int __bits[4];
+};
+
+struct __sanitizer_siginfo {
+ // The size is determined by looking at sizeof of real siginfo_t on linux.
+ u64 opaque[128 / sizeof(u64)];
+};
+
+using __sanitizer_sighandler_ptr = void (*)(int sig);
+using __sanitizer_sigactionhandler_ptr =
+ void (*)(int sig, __sanitizer_siginfo *siginfo, void *uctx);
+
+struct __sanitizer_sigaction {
+ int sa_flags;
+ union {
+ __sanitizer_sigactionhandler_ptr sigaction;
+ __sanitizer_sighandler_ptr handler;
+ };
+ __sanitizer_sigset_t sa_mask;
+#if !defined(_LP64)
+ int sa_resv[2];
+#endif
+};
+
+struct __sanitizer_kernel_sigset_t {
+ u8 sig[8];
+};
+
+struct __sanitizer_kernel_sigaction_t {
+ union {
+ void (*handler)(int signo);
+ void (*sigaction)(int signo, __sanitizer_siginfo *info, void *ctx);
+ };
+ unsigned long sa_flags;
+ void (*sa_restorer)(void);
+ __sanitizer_kernel_sigset_t sa_mask;
+};
+
+extern const uptr sig_ign;
+extern const uptr sig_dfl;
+extern const uptr sig_err;
+extern const uptr sa_siginfo;
+
+extern int af_inet;
+extern int af_inet6;
+uptr __sanitizer_in_addr_sz(int af);
+
+struct __sanitizer_dl_phdr_info {
+ uptr dlpi_addr;
+ const char *dlpi_name;
+ const void *dlpi_phdr;
+ short dlpi_phnum;
+};
+
+extern unsigned struct_ElfW_Phdr_sz;
+
+struct __sanitizer_addrinfo {
+ int ai_flags;
+ int ai_family;
+ int ai_socktype;
+ int ai_protocol;
+#if defined(__sparcv9)
+ int _ai_pad;
+#endif
+ unsigned ai_addrlen;
+ char *ai_canonname;
+ void *ai_addr;
+ struct __sanitizer_addrinfo *ai_next;
+};
+
+struct __sanitizer_hostent {
+ char *h_name;
+ char **h_aliases;
+ int h_addrtype;
+ int h_length;
+ char **h_addr_list;
+};
+
+struct __sanitizer_pollfd {
+ int fd;
+ short events;
+ short revents;
+};
+
+typedef unsigned long __sanitizer_nfds_t;
+
+struct __sanitizer_glob_t {
+ uptr gl_pathc;
+ char **gl_pathv;
+ uptr gl_offs;
+ char **gl_pathp;
+ int gl_pathn;
+};
+
+extern int glob_nomatch;
+extern int glob_altdirfunc;
+
+extern unsigned path_max;
+
+struct __sanitizer_wordexp_t {
+ uptr we_wordc;
+ char **we_wordv;
+ uptr we_offs;
+ char **we_wordp;
+ int we_wordn;
+};
+
+typedef void __sanitizer_FILE;
+#define SANITIZER_HAS_STRUCT_FILE 0
+
+// This simplifies generic code
+#define struct_shminfo_sz -1
+#define struct_shm_info_sz -1
+#define shmctl_shm_stat -1
+#define shmctl_ipc_info -1
+#define shmctl_shm_info -1
+
+extern int shmctl_ipc_stat;
+
+extern unsigned struct_utmp_sz;
+extern unsigned struct_utmpx_sz;
+
+extern int map_fixed;
+
+// ioctl arguments
+struct __sanitizer_ifconf {
+ int ifc_len;
+ union {
+ void *ifcu_req;
+ } ifc_ifcu;
+};
+
+// <sys/ioccom.h>
+#define IOC_NRBITS 8
+#define IOC_TYPEBITS 8
+#define IOC_SIZEBITS 12
+#define IOC_DIRBITS 4
+#undef IOC_NONE
+#define IOC_NONE 2U // IOC_VOID
+#define IOC_READ 4U // IOC_OUT
+#define IOC_WRITE 8U // IOC_IN
+
+#define IOC_NRMASK ((1 << IOC_NRBITS) - 1)
+#define IOC_TYPEMASK ((1 << IOC_TYPEBITS) - 1)
+#define IOC_SIZEMASK ((1 << IOC_SIZEBITS) - 1)
+#define IOC_DIRMASK ((1 << IOC_DIRBITS) - 1)
+#define IOC_NRSHIFT 0
+#define IOC_TYPESHIFT (IOC_NRSHIFT + IOC_NRBITS)
+#define IOC_SIZESHIFT (IOC_TYPESHIFT + IOC_TYPEBITS)
+#define IOC_DIRSHIFT (IOC_SIZESHIFT + IOC_SIZEBITS)
+
+#define IOC_DIR(nr) (((nr) >> IOC_DIRSHIFT) & IOC_DIRMASK)
+#define IOC_TYPE(nr) (((nr) >> IOC_TYPESHIFT) & IOC_TYPEMASK)
+#define IOC_NR(nr) (((nr) >> IOC_NRSHIFT) & IOC_NRMASK)
+
+#if defined(__sparc__)
+// In sparc the 14 bits SIZE field overlaps with the
+// least significant bit of DIR, so either IOC_READ or
+// IOC_WRITE shall be 1 in order to get a non-zero SIZE.
+#define IOC_SIZE(nr) \
+ ((((((nr) >> 29) & 0x7) & (4U | 2U)) == 0) ? 0 : (((nr) >> 16) & 0x3fff))
+#else
+#define IOC_SIZE(nr) (((nr) >> IOC_SIZESHIFT) & IOC_SIZEMASK)
+#endif
+
+extern unsigned struct_ifreq_sz;
+extern unsigned struct_termios_sz;
+extern unsigned struct_winsize_sz;
+
+extern unsigned struct_sioc_sg_req_sz;
+extern unsigned struct_sioc_vif_req_sz;
+
+// ioctl request identifiers
+
+// A special value to mark ioctls that are not present on the target platform,
+// when it can not be determined without including any system headers.
+extern const unsigned IOCTL_NOT_PRESENT;
+
+extern unsigned IOCTL_FIOASYNC;
+extern unsigned IOCTL_FIOCLEX;
+extern unsigned IOCTL_FIOGETOWN;
+extern unsigned IOCTL_FIONBIO;
+extern unsigned IOCTL_FIONCLEX;
+extern unsigned IOCTL_FIOSETOWN;
+extern unsigned IOCTL_SIOCADDMULTI;
+extern unsigned IOCTL_SIOCATMARK;
+extern unsigned IOCTL_SIOCDELMULTI;
+extern unsigned IOCTL_SIOCGIFADDR;
+extern unsigned IOCTL_SIOCGIFBRDADDR;
+extern unsigned IOCTL_SIOCGIFCONF;
+extern unsigned IOCTL_SIOCGIFDSTADDR;
+extern unsigned IOCTL_SIOCGIFFLAGS;
+extern unsigned IOCTL_SIOCGIFMETRIC;
+extern unsigned IOCTL_SIOCGIFMTU;
+extern unsigned IOCTL_SIOCGIFNETMASK;
+extern unsigned IOCTL_SIOCGPGRP;
+extern unsigned IOCTL_SIOCSIFADDR;
+extern unsigned IOCTL_SIOCSIFBRDADDR;
+extern unsigned IOCTL_SIOCSIFDSTADDR;
+extern unsigned IOCTL_SIOCSIFFLAGS;
+extern unsigned IOCTL_SIOCSIFMETRIC;
+extern unsigned IOCTL_SIOCSIFMTU;
+extern unsigned IOCTL_SIOCSIFNETMASK;
+extern unsigned IOCTL_SIOCSPGRP;
+extern unsigned IOCTL_TIOCEXCL;
+extern unsigned IOCTL_TIOCGETD;
+extern unsigned IOCTL_TIOCGPGRP;
+extern unsigned IOCTL_TIOCGWINSZ;
+extern unsigned IOCTL_TIOCMBIC;
+extern unsigned IOCTL_TIOCMBIS;
+extern unsigned IOCTL_TIOCMGET;
+extern unsigned IOCTL_TIOCMSET;
+extern unsigned IOCTL_TIOCNOTTY;
+extern unsigned IOCTL_TIOCNXCL;
+extern unsigned IOCTL_TIOCOUTQ;
+extern unsigned IOCTL_TIOCPKT;
+extern unsigned IOCTL_TIOCSCTTY;
+extern unsigned IOCTL_TIOCSETD;
+extern unsigned IOCTL_TIOCSPGRP;
+extern unsigned IOCTL_TIOCSTI;
+extern unsigned IOCTL_TIOCSWINSZ;
+extern unsigned IOCTL_MTIOCGET;
+extern unsigned IOCTL_MTIOCTOP;
+
+extern const int si_SEGV_MAPERR;
+extern const int si_SEGV_ACCERR;
+} // namespace __sanitizer
+
+#define CHECK_TYPE_SIZE(TYPE) \
+ COMPILER_CHECK(sizeof(__sanitizer_##TYPE) == sizeof(TYPE))
+
+#define CHECK_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((__sanitizer_##CLASS *) NULL)->MEMBER) == \
+ sizeof(((CLASS *) NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(__sanitizer_##CLASS, MEMBER) == \
+ offsetof(CLASS, MEMBER))
+
+// For sigaction, which is a function and struct at the same time,
+// and thus requires explicit "struct" in sizeof() expression.
+#define CHECK_STRUCT_SIZE_AND_OFFSET(CLASS, MEMBER) \
+ COMPILER_CHECK(sizeof(((struct __sanitizer_##CLASS *) NULL)->MEMBER) == \
+ sizeof(((struct CLASS *) NULL)->MEMBER)); \
+ COMPILER_CHECK(offsetof(struct __sanitizer_##CLASS, MEMBER) == \
+ offsetof(struct CLASS, MEMBER))
+
+#endif // SANITIZER_SOLARIS
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_platform_limits_solaris.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix.cc (revision 351984)
@@ -0,0 +1,388 @@
+//===-- sanitizer_posix.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements POSIX-specific functions from
+// sanitizer_posix.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_POSIX
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_posix.h"
+#include "sanitizer_procmaps.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <sys/mman.h>
+
+#if SANITIZER_FREEBSD
+// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
+// that, it was never implemented. So just define it to zero.
+#undef MAP_NORESERVE
+#define MAP_NORESERVE 0
+#endif
+
+namespace __sanitizer {
+
+// ------------- sanitizer_common.h
+uptr GetMmapGranularity() {
+ return GetPageSize();
+}
+
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, mem_type);
+ int reserrno;
+ if (UNLIKELY(internal_iserror(res, &reserrno)))
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno, raw_report);
+ IncreaseTotalMmap(size);
+ return (void *)res;
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (!addr || !size) return;
+ uptr res = internal_munmap(addr, size);
+ if (UNLIKELY(internal_iserror(res))) {
+ Report("ERROR: %s failed to deallocate 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, size, size, addr);
+ CHECK("unable to unmap" && 0);
+ }
+ DecreaseTotalMmap(size);
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ uptr res = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, mem_type);
+ int reserrno;
+ if (UNLIKELY(internal_iserror(res, &reserrno))) {
+ if (reserrno == ENOMEM)
+ return nullptr;
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
+ }
+ IncreaseTotalMmap(size);
+ return (void *)res;
+}
+
+// We want to map a chunk of address space aligned to 'alignment'.
+// We do it by mapping a bit more and then unmapping redundant pieces.
+// We probably can do it with fewer syscalls in some OS-dependent way.
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+ uptr map_size = size + alignment;
+ uptr map_res = (uptr)MmapOrDieOnFatalError(map_size, mem_type);
+ if (UNLIKELY(!map_res))
+ return nullptr;
+ uptr map_end = map_res + map_size;
+ uptr res = map_res;
+ if (!IsAligned(res, alignment)) {
+ res = (map_res + alignment - 1) & ~(alignment - 1);
+ UnmapOrDie((void*)map_res, res - map_res);
+ }
+ uptr end = res + size;
+ if (end != map_end)
+ UnmapOrDie((void*)end, map_end - end);
+ return (void*)res;
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ uptr p = MmapNamed(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_NORESERVE, mem_type);
+ int reserrno;
+ if (UNLIKELY(internal_iserror(p, &reserrno)))
+ ReportMmapFailureAndDie(size, mem_type, "allocate noreserve", reserrno);
+ IncreaseTotalMmap(size);
+ return (void *)p;
+}
+
+static void *MmapFixedImpl(uptr fixed_addr, uptr size, bool tolerate_enomem,
+ const char *name) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
+ uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON | MAP_FIXED, name);
+ int reserrno;
+ if (UNLIKELY(internal_iserror(p, &reserrno))) {
+ if (tolerate_enomem && reserrno == ENOMEM)
+ return nullptr;
+ char mem_type[40];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ ReportMmapFailureAndDie(size, mem_type, "allocate", reserrno);
+ }
+ IncreaseTotalMmap(size);
+ return (void *)p;
+}
+
+void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
+ return MmapFixedImpl(fixed_addr, size, false /*tolerate_enomem*/, name);
+}
+
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
+ return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/, name);
+}
+
+bool MprotectNoAccess(uptr addr, uptr size) {
+ return 0 == internal_mprotect((void*)addr, size, PROT_NONE);
+}
+
+bool MprotectReadOnly(uptr addr, uptr size) {
+ return 0 == internal_mprotect((void *)addr, size, PROT_READ);
+}
+
+#if !SANITIZER_MAC
+void MprotectMallocZones(void *addr, int prot) {}
+#endif
+
+fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
+ if (ShouldMockFailureToOpen(filename))
+ return kInvalidFd;
+ int flags;
+ switch (mode) {
+ case RdOnly: flags = O_RDONLY; break;
+ case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
+ case RdWr: flags = O_RDWR | O_CREAT; break;
+ }
+ fd_t res = internal_open(filename, flags, 0660);
+ if (internal_iserror(res, errno_p))
+ return kInvalidFd;
+ return ReserveStandardFds(res);
+}
+
+void CloseFile(fd_t fd) {
+ internal_close(fd);
+}
+
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
+ error_t *error_p) {
+ uptr res = internal_read(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_read)
+ *bytes_read = res;
+ return true;
+}
+
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
+ error_t *error_p) {
+ uptr res = internal_write(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_written)
+ *bytes_written = res;
+ return true;
+}
+
+void *MapFileToMemory(const char *file_name, uptr *buff_size) {
+ fd_t fd = OpenFile(file_name, RdOnly);
+ CHECK(fd != kInvalidFd);
+ uptr fsize = internal_filesize(fd);
+ CHECK_NE(fsize, (uptr)-1);
+ CHECK_GT(fsize, 0);
+ *buff_size = RoundUpTo(fsize, GetPageSizeCached());
+ uptr map = internal_mmap(nullptr, *buff_size, PROT_READ, MAP_PRIVATE, fd, 0);
+ return internal_iserror(map) ? nullptr : (void *)map;
+}
+
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
+ uptr flags = MAP_SHARED;
+ if (addr) flags |= MAP_FIXED;
+ uptr p = internal_mmap(addr, size, PROT_READ | PROT_WRITE, flags, fd, offset);
+ int mmap_errno = 0;
+ if (internal_iserror(p, &mmap_errno)) {
+ Printf("could not map writable file (%d, %lld, %zu): %zd, errno: %d\n",
+ fd, (long long)offset, size, p, mmap_errno);
+ return nullptr;
+ }
+ return (void *)p;
+}
+
+static inline bool IntervalsAreSeparate(uptr start1, uptr end1,
+ uptr start2, uptr end2) {
+ CHECK(start1 <= end1);
+ CHECK(start2 <= end2);
+ return (end1 < start2) || (end2 < start1);
+}
+
+// FIXME: this is thread-unsafe, but should not cause problems most of the time.
+// When the shadow is mapped only a single thread usually exists (plus maybe
+// several worker threads on Mac, which aren't expected to map big chunks of
+// memory).
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ if (proc_maps.Error())
+ return true; // and hope for the best
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if (segment.start == segment.end) continue; // Empty range.
+ CHECK_NE(0, segment.end);
+ if (!IntervalsAreSeparate(segment.start, segment.end - 1, range_start,
+ range_end))
+ return false;
+ }
+ return true;
+}
+
+void DumpProcessMap() {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ const sptr kBufSize = 4095;
+ char *filename = (char*)MmapOrDie(kBufSize, __func__);
+ MemoryMappedSegment segment(filename, kBufSize);
+ Report("Process memory map follows:\n");
+ while (proc_maps.Next(&segment)) {
+ Printf("\t%p-%p\t%s\n", (void *)segment.start, (void *)segment.end,
+ segment.filename);
+ }
+ Report("End of process memory map.\n");
+ UnmapOrDie(filename, kBufSize);
+}
+
+const char *GetPwd() {
+ return GetEnv("PWD");
+}
+
+bool IsPathSeparator(const char c) {
+ return c == '/';
+}
+
+bool IsAbsolutePath(const char *path) {
+ return path != nullptr && IsPathSeparator(path[0]);
+}
+
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ internal_write(fd, buffer, length);
+}
+
+bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/false);
+ InternalScopedString buff(kMaxPathLength);
+ MemoryMappedSegment segment(buff.data(), kMaxPathLength);
+ while (proc_maps.Next(&segment)) {
+ if (segment.IsExecutable() &&
+ internal_strcmp(module, segment.filename) == 0) {
+ *start = segment.start;
+ *end = segment.end;
+ return true;
+ }
+ }
+ return false;
+}
+
+uptr SignalContext::GetAddress() const {
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ return (uptr)si->si_addr;
+}
+
+bool SignalContext::IsMemoryAccess() const {
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ return si->si_signo == SIGSEGV;
+}
+
+int SignalContext::GetType() const {
+ return static_cast<const siginfo_t *>(siginfo)->si_signo;
+}
+
+const char *SignalContext::Describe() const {
+ switch (GetType()) {
+ case SIGFPE:
+ return "FPE";
+ case SIGILL:
+ return "ILL";
+ case SIGABRT:
+ return "ABRT";
+ case SIGSEGV:
+ return "SEGV";
+ case SIGBUS:
+ return "BUS";
+ }
+ return "UNKNOWN SIGNAL";
+}
+
+fd_t ReserveStandardFds(fd_t fd) {
+ CHECK_GE(fd, 0);
+ if (fd > 2)
+ return fd;
+ bool used[3];
+ internal_memset(used, 0, sizeof(used));
+ while (fd <= 2) {
+ used[fd] = true;
+ fd = internal_dup(fd);
+ }
+ for (int i = 0; i <= 2; ++i)
+ if (used[i])
+ internal_close(i);
+ return fd;
+}
+
+bool ShouldMockFailureToOpen(const char *path) {
+ return common_flags()->test_only_emulate_no_memorymap &&
+ internal_strncmp(path, "/proc/", 6) == 0;
+}
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID && !SANITIZER_GO
+int GetNamedMappingFd(const char *name, uptr size, int *flags) {
+ if (!common_flags()->decorate_proc_maps || !name)
+ return -1;
+ char shmname[200];
+ CHECK(internal_strlen(name) < sizeof(shmname) - 10);
+ internal_snprintf(shmname, sizeof(shmname), "/dev/shm/%zu [%s]",
+ internal_getpid(), name);
+ int fd = ReserveStandardFds(
+ internal_open(shmname, O_RDWR | O_CREAT | O_TRUNC | O_CLOEXEC, S_IRWXU));
+ CHECK_GE(fd, 0);
+ int res = internal_ftruncate(fd, size);
+ CHECK_EQ(0, res);
+ res = internal_unlink(shmname);
+ CHECK_EQ(0, res);
+ *flags &= ~(MAP_ANON | MAP_ANONYMOUS);
+ return fd;
+}
+#else
+int GetNamedMappingFd(const char *name, uptr size, int *flags) {
+ return -1;
+}
+#endif
+
+#if SANITIZER_ANDROID
+#define PR_SET_VMA 0x53564d41
+#define PR_SET_VMA_ANON_NAME 0
+void DecorateMapping(uptr addr, uptr size, const char *name) {
+ if (!common_flags()->decorate_proc_maps || !name)
+ return;
+ internal_prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, addr, size, (uptr)name);
+}
+#else
+void DecorateMapping(uptr addr, uptr size, const char *name) {
+}
+#endif
+
+uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name) {
+ int fd = GetNamedMappingFd(name, length, &flags);
+ uptr res = internal_mmap(addr, length, prot, flags, fd, 0);
+ if (!internal_iserror(res))
+ DecorateMapping(res, length, name);
+ return res;
+}
+
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix.h (revision 351984)
@@ -0,0 +1,125 @@
+//===-- sanitizer_posix.h -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and declares some useful POSIX-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_POSIX_H
+#define SANITIZER_POSIX_H
+
+// ----------- ATTENTION -------------
+// This header should NOT include any other headers from sanitizer runtime.
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_limits_freebsd.h"
+#include "sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_platform_limits_openbsd.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_platform_limits_solaris.h"
+
+#if !SANITIZER_POSIX
+// Make it hard to accidentally use any of functions declared in this file:
+#error This file should only be included on POSIX
+#endif
+
+namespace __sanitizer {
+
+// I/O
+// Don't use directly, use __sanitizer::OpenFile() instead.
+uptr internal_open(const char *filename, int flags);
+uptr internal_open(const char *filename, int flags, u32 mode);
+uptr internal_close(fd_t fd);
+
+uptr internal_read(fd_t fd, void *buf, uptr count);
+uptr internal_write(fd_t fd, const void *buf, uptr count);
+
+// Memory
+uptr internal_mmap(void *addr, uptr length, int prot, int flags,
+ int fd, OFF_T offset);
+uptr internal_munmap(void *addr, uptr length);
+int internal_mprotect(void *addr, uptr length, int prot);
+
+// OS
+uptr internal_filesize(fd_t fd); // -1 on error.
+uptr internal_stat(const char *path, void *buf);
+uptr internal_lstat(const char *path, void *buf);
+uptr internal_fstat(fd_t fd, void *buf);
+uptr internal_dup(int oldfd);
+uptr internal_dup2(int oldfd, int newfd);
+uptr internal_readlink(const char *path, char *buf, uptr bufsize);
+uptr internal_unlink(const char *path);
+uptr internal_rename(const char *oldpath, const char *newpath);
+uptr internal_lseek(fd_t fd, OFF_T offset, int whence);
+
+#if SANITIZER_NETBSD
+uptr internal_ptrace(int request, int pid, void *addr, int data);
+#else
+uptr internal_ptrace(int request, int pid, void *addr, void *data);
+#endif
+uptr internal_waitpid(int pid, int *status, int options);
+
+int internal_fork();
+int internal_forkpty(int *amaster);
+
+int internal_sysctl(const int *name, unsigned int namelen, void *oldp,
+ uptr *oldlenp, const void *newp, uptr newlen);
+int internal_sysctlbyname(const char *sname, void *oldp, uptr *oldlenp,
+ const void *newp, uptr newlen);
+
+// These functions call appropriate pthread_ functions directly, bypassing
+// the interceptor. They are weak and may not be present in some tools.
+SANITIZER_WEAK_ATTRIBUTE
+int real_pthread_create(void *th, void *attr, void *(*callback)(void *),
+ void *param);
+SANITIZER_WEAK_ATTRIBUTE
+int real_pthread_join(void *th, void **ret);
+
+#define DEFINE_REAL_PTHREAD_FUNCTIONS \
+ namespace __sanitizer { \
+ int real_pthread_create(void *th, void *attr, void *(*callback)(void *), \
+ void *param) { \
+ return REAL(pthread_create)(th, attr, callback, param); \
+ } \
+ int real_pthread_join(void *th, void **ret) { \
+ return REAL(pthread_join(th, ret)); \
+ } \
+ } // namespace __sanitizer
+
+int my_pthread_attr_getstack(void *attr, void **addr, uptr *size);
+
+// A routine named real_sigaction() must be implemented by each sanitizer in
+// order for internal_sigaction() to bypass interceptors.
+int internal_sigaction(int signum, const void *act, void *oldact);
+void internal_sigfillset(__sanitizer_sigset_t *set);
+void internal_sigemptyset(__sanitizer_sigset_t *set);
+bool internal_sigismember(__sanitizer_sigset_t *set, int signum);
+
+uptr internal_execve(const char *filename, char *const argv[],
+ char *const envp[]);
+
+bool IsStateDetached(int state);
+
+// Move the fd out of {0, 1, 2} range.
+fd_t ReserveStandardFds(fd_t fd);
+
+bool ShouldMockFailureToOpen(const char *path);
+
+// Create a non-file mapping with a given /proc/self/maps name.
+uptr MmapNamed(void *addr, uptr length, int prot, int flags, const char *name);
+
+// Platforms should implement at most one of these.
+// 1. Provide a pre-decorated file descriptor to use instead of an anonymous
+// mapping.
+int GetNamedMappingFd(const char *name, uptr size, int *flags);
+// 2. Add name to an existing anonymous mapping. The caller must keep *name
+// alive at least as long as the mapping exists.
+void DecorateMapping(uptr addr, uptr size, const char *name);
+
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_posix_libcdep.cc (revision 351984)
@@ -0,0 +1,486 @@
+//===-- sanitizer_posix_libcdep.cc ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements libc-dependent POSIX-specific functions
+// from sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_POSIX
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_platform_limits_openbsd.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_platform_limits_solaris.h"
+#include "sanitizer_posix.h"
+#include "sanitizer_procmaps.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#if SANITIZER_FREEBSD
+// The MAP_NORESERVE define has been removed in FreeBSD 11.x, and even before
+// that, it was never implemented. So just define it to zero.
+#undef MAP_NORESERVE
+#define MAP_NORESERVE 0
+#endif
+
+typedef void (*sa_sigaction_t)(int, siginfo_t *, void *);
+
+namespace __sanitizer {
+
+u32 GetUid() {
+ return getuid();
+}
+
+uptr GetThreadSelf() {
+ return (uptr)pthread_self();
+}
+
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
+ uptr page_size = GetPageSizeCached();
+ uptr beg_aligned = RoundUpTo(beg, page_size);
+ uptr end_aligned = RoundDownTo(end, page_size);
+ if (beg_aligned < end_aligned)
+ // In the default Solaris compilation environment, madvise() is declared
+ // to take a caddr_t arg; casting it to void * results in an invalid
+ // conversion error, so use char * instead.
+ madvise((char *)beg_aligned, end_aligned - beg_aligned,
+ SANITIZER_MADVISE_DONTNEED);
+}
+
+bool NoHugePagesInRegion(uptr addr, uptr size) {
+#ifdef MADV_NOHUGEPAGE // May not be defined on old systems.
+ return madvise((char *)addr, size, MADV_NOHUGEPAGE) == 0;
+#else
+ return true;
+#endif // MADV_NOHUGEPAGE
+}
+
+bool DontDumpShadowMemory(uptr addr, uptr length) {
+#if defined(MADV_DONTDUMP)
+ return madvise((char *)addr, length, MADV_DONTDUMP) == 0;
+#elif defined(MADV_NOCORE)
+ return madvise((char *)addr, length, MADV_NOCORE) == 0;
+#else
+ return true;
+#endif // MADV_DONTDUMP
+}
+
+static rlim_t getlim(int res) {
+ rlimit rlim;
+ CHECK_EQ(0, getrlimit(res, &rlim));
+ return rlim.rlim_cur;
+}
+
+static void setlim(int res, rlim_t lim) {
+ struct rlimit rlim;
+ if (getrlimit(res, const_cast<struct rlimit *>(&rlim))) {
+ Report("ERROR: %s getrlimit() failed %d\n", SanitizerToolName, errno);
+ Die();
+ }
+ rlim.rlim_cur = lim;
+ if (setrlimit(res, const_cast<struct rlimit *>(&rlim))) {
+ Report("ERROR: %s setrlimit() failed %d\n", SanitizerToolName, errno);
+ Die();
+ }
+}
+
+void DisableCoreDumperIfNecessary() {
+ if (common_flags()->disable_coredump) {
+ setlim(RLIMIT_CORE, 0);
+ }
+}
+
+bool StackSizeIsUnlimited() {
+ rlim_t stack_size = getlim(RLIMIT_STACK);
+ return (stack_size == RLIM_INFINITY);
+}
+
+void SetStackSizeLimitInBytes(uptr limit) {
+ setlim(RLIMIT_STACK, (rlim_t)limit);
+ CHECK(!StackSizeIsUnlimited());
+}
+
+bool AddressSpaceIsUnlimited() {
+ rlim_t as_size = getlim(RLIMIT_AS);
+ return (as_size == RLIM_INFINITY);
+}
+
+void SetAddressSpaceUnlimited() {
+ setlim(RLIMIT_AS, RLIM_INFINITY);
+ CHECK(AddressSpaceIsUnlimited());
+}
+
+void SleepForSeconds(int seconds) {
+ sleep(seconds);
+}
+
+void SleepForMillis(int millis) {
+ usleep(millis * 1000);
+}
+
+void Abort() {
+#if !SANITIZER_GO
+ // If we are handling SIGABRT, unhandle it first.
+ // TODO(vitalybuka): Check if handler belongs to sanitizer.
+ if (GetHandleSignalMode(SIGABRT) != kHandleSignalNo) {
+ struct sigaction sigact;
+ internal_memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = (sa_sigaction_t)SIG_DFL;
+ internal_sigaction(SIGABRT, &sigact, nullptr);
+ }
+#endif
+
+ abort();
+}
+
+int Atexit(void (*function)(void)) {
+#if !SANITIZER_GO
+ return atexit(function);
+#else
+ return 0;
+#endif
+}
+
+bool SupportsColoredOutput(fd_t fd) {
+ return isatty(fd) != 0;
+}
+
+#if !SANITIZER_GO
+// TODO(glider): different tools may require different altstack size.
+static const uptr kAltStackSize = SIGSTKSZ * 4; // SIGSTKSZ is not enough.
+
+void SetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ CHECK_EQ(0, sigaltstack(nullptr, &oldstack));
+ // If the alternate stack is already in place, do nothing.
+ // Android always sets an alternate stack, but it's too small for us.
+ if (!SANITIZER_ANDROID && !(oldstack.ss_flags & SS_DISABLE)) return;
+ // TODO(glider): the mapped stack should have the MAP_STACK flag in the
+ // future. It is not required by man 2 sigaltstack now (they're using
+ // malloc()).
+ void* base = MmapOrDie(kAltStackSize, __func__);
+ altstack.ss_sp = (char*) base;
+ altstack.ss_flags = 0;
+ altstack.ss_size = kAltStackSize;
+ CHECK_EQ(0, sigaltstack(&altstack, nullptr));
+}
+
+void UnsetAlternateSignalStack() {
+ stack_t altstack, oldstack;
+ altstack.ss_sp = nullptr;
+ altstack.ss_flags = SS_DISABLE;
+ altstack.ss_size = kAltStackSize; // Some sane value required on Darwin.
+ CHECK_EQ(0, sigaltstack(&altstack, &oldstack));
+ UnmapOrDie(oldstack.ss_sp, oldstack.ss_size);
+}
+
+static void MaybeInstallSigaction(int signum,
+ SignalHandlerType handler) {
+ if (GetHandleSignalMode(signum) == kHandleSignalNo) return;
+
+ struct sigaction sigact;
+ internal_memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_sigaction = (sa_sigaction_t)handler;
+ // Do not block the signal from being received in that signal's handler.
+ // Clients are responsible for handling this correctly.
+ sigact.sa_flags = SA_SIGINFO | SA_NODEFER;
+ if (common_flags()->use_sigaltstack) sigact.sa_flags |= SA_ONSTACK;
+ CHECK_EQ(0, internal_sigaction(signum, &sigact, nullptr));
+ VReport(1, "Installed the sigaction for signal %d\n", signum);
+}
+
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {
+ // Set the alternate signal stack for the main thread.
+ // This will cause SetAlternateSignalStack to be called twice, but the stack
+ // will be actually set only once.
+ if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
+ MaybeInstallSigaction(SIGSEGV, handler);
+ MaybeInstallSigaction(SIGBUS, handler);
+ MaybeInstallSigaction(SIGABRT, handler);
+ MaybeInstallSigaction(SIGFPE, handler);
+ MaybeInstallSigaction(SIGILL, handler);
+ MaybeInstallSigaction(SIGTRAP, handler);
+}
+
+bool SignalContext::IsStackOverflow() const {
+ // Access at a reasonable offset above SP, or slightly below it (to account
+ // for x86_64 or PowerPC redzone, ARM push of multiple registers, etc) is
+ // probably a stack overflow.
+#ifdef __s390__
+ // On s390, the fault address in siginfo points to start of the page, not
+ // to the precise word that was accessed. Mask off the low bits of sp to
+ // take it into account.
+ bool IsStackAccess = addr >= (sp & ~0xFFF) && addr < sp + 0xFFFF;
+#else
+ // Let's accept up to a page size away from top of stack. Things like stack
+ // probing can trigger accesses with such large offsets.
+ bool IsStackAccess = addr + GetPageSizeCached() > sp && addr < sp + 0xFFFF;
+#endif
+
+#if __powerpc__
+ // Large stack frames can be allocated with e.g.
+ // lis r0,-10000
+ // stdux r1,r1,r0 # store sp to [sp-10000] and update sp by -10000
+ // If the store faults then sp will not have been updated, so test above
+ // will not work, because the fault address will be more than just "slightly"
+ // below sp.
+ if (!IsStackAccess && IsAccessibleMemoryRange(pc, 4)) {
+ u32 inst = *(unsigned *)pc;
+ u32 ra = (inst >> 16) & 0x1F;
+ u32 opcd = inst >> 26;
+ u32 xo = (inst >> 1) & 0x3FF;
+ // Check for store-with-update to sp. The instructions we accept are:
+ // stbu rs,d(ra) stbux rs,ra,rb
+ // sthu rs,d(ra) sthux rs,ra,rb
+ // stwu rs,d(ra) stwux rs,ra,rb
+ // stdu rs,ds(ra) stdux rs,ra,rb
+ // where ra is r1 (the stack pointer).
+ if (ra == 1 &&
+ (opcd == 39 || opcd == 45 || opcd == 37 || opcd == 62 ||
+ (opcd == 31 && (xo == 247 || xo == 439 || xo == 183 || xo == 181))))
+ IsStackAccess = true;
+ }
+#endif // __powerpc__
+
+ // We also check si_code to filter out SEGV caused by something else other
+ // then hitting the guard page or unmapped memory, like, for example,
+ // unaligned memory access.
+ auto si = static_cast<const siginfo_t *>(siginfo);
+ return IsStackAccess &&
+ (si->si_code == si_SEGV_MAPERR || si->si_code == si_SEGV_ACCERR);
+}
+
+#endif // SANITIZER_GO
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ uptr page_size = GetPageSizeCached();
+ // Checking too large memory ranges is slow.
+ CHECK_LT(size, page_size * 10);
+ int sock_pair[2];
+ if (pipe(sock_pair))
+ return false;
+ uptr bytes_written =
+ internal_write(sock_pair[1], reinterpret_cast<void *>(beg), size);
+ int write_errno;
+ bool result;
+ if (internal_iserror(bytes_written, &write_errno)) {
+ CHECK_EQ(EFAULT, write_errno);
+ result = false;
+ } else {
+ result = (bytes_written == size);
+ }
+ internal_close(sock_pair[0]);
+ internal_close(sock_pair[1]);
+ return result;
+}
+
+void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+ // Some kinds of sandboxes may forbid filesystem access, so we won't be able
+ // to read the file mappings from /proc/self/maps. Luckily, neither the
+ // process will be able to load additional libraries, so it's fine to use the
+ // cached mappings.
+ MemoryMappingLayout::CacheMemoryMappings();
+}
+
+bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
+ size = RoundUpTo(size, GetPageSizeCached());
+ fixed_addr = RoundDownTo(fixed_addr, GetPageSizeCached());
+ uptr p = MmapNamed((void *)fixed_addr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON, name);
+ int reserrno;
+ if (internal_iserror(p, &reserrno)) {
+ Report("ERROR: %s failed to "
+ "allocate 0x%zx (%zd) bytes at address %zx (errno: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, reserrno);
+ return false;
+ }
+ IncreaseTotalMmap(size);
+ return true;
+}
+
+uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
+ base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size, name)
+ : MmapNoAccess(size);
+ size_ = size;
+ name_ = name;
+ (void)os_handle_; // unsupported
+ return reinterpret_cast<uptr>(base_);
+}
+
+// Uses fixed_addr for now.
+// Will use offset instead once we've implemented this function for real.
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {
+ return reinterpret_cast<uptr>(
+ MmapFixedOrDieOnFatalError(fixed_addr, size, name));
+}
+
+uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,
+ const char *name) {
+ return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size, name));
+}
+
+void ReservedAddressRange::Unmap(uptr addr, uptr size) {
+ CHECK_LE(size, size_);
+ if (addr == reinterpret_cast<uptr>(base_))
+ // If we unmap the whole range, just null out the base.
+ base_ = (size == size_) ? nullptr : reinterpret_cast<void*>(addr + size);
+ else
+ CHECK_EQ(addr + size, reinterpret_cast<uptr>(base_) + size_);
+ size_ -= size;
+ UnmapOrDie(reinterpret_cast<void*>(addr), size);
+}
+
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
+ return (void *)MmapNamed((void *)fixed_addr, size, PROT_NONE,
+ MAP_PRIVATE | MAP_FIXED | MAP_NORESERVE | MAP_ANON,
+ name);
+}
+
+void *MmapNoAccess(uptr size) {
+ unsigned flags = MAP_PRIVATE | MAP_ANON | MAP_NORESERVE;
+ return (void *)internal_mmap(nullptr, size, PROT_NONE, flags, -1, 0);
+}
+
+// This function is defined elsewhere if we intercepted pthread_attr_getstack.
+extern "C" {
+SANITIZER_WEAK_ATTRIBUTE int
+real_pthread_attr_getstack(void *attr, void **addr, size_t *size);
+} // extern "C"
+
+int my_pthread_attr_getstack(void *attr, void **addr, uptr *size) {
+#if !SANITIZER_GO && !SANITIZER_MAC
+ if (&real_pthread_attr_getstack)
+ return real_pthread_attr_getstack((pthread_attr_t *)attr, addr,
+ (size_t *)size);
+#endif
+ return pthread_attr_getstack((pthread_attr_t *)attr, addr, (size_t *)size);
+}
+
+#if !SANITIZER_GO
+void AdjustStackSize(void *attr_) {
+ pthread_attr_t *attr = (pthread_attr_t *)attr_;
+ uptr stackaddr = 0;
+ uptr stacksize = 0;
+ my_pthread_attr_getstack(attr, (void**)&stackaddr, &stacksize);
+ // GLibC will return (0 - stacksize) as the stack address in the case when
+ // stacksize is set, but stackaddr is not.
+ bool stack_set = (stackaddr != 0) && (stackaddr + stacksize != 0);
+ // We place a lot of tool data into TLS, account for that.
+ const uptr minstacksize = GetTlsSize() + 128*1024;
+ if (stacksize < minstacksize) {
+ if (!stack_set) {
+ if (stacksize != 0) {
+ VPrintf(1, "Sanitizer: increasing stacksize %zu->%zu\n", stacksize,
+ minstacksize);
+ pthread_attr_setstacksize(attr, minstacksize);
+ }
+ } else {
+ Printf("Sanitizer: pre-allocated stack size is insufficient: "
+ "%zu < %zu\n", stacksize, minstacksize);
+ Printf("Sanitizer: pthread_create is likely to fail.\n");
+ }
+ }
+}
+#endif // !SANITIZER_GO
+
+pid_t StartSubprocess(const char *program, const char *const argv[],
+ fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) {
+ auto file_closer = at_scope_exit([&] {
+ if (stdin_fd != kInvalidFd) {
+ internal_close(stdin_fd);
+ }
+ if (stdout_fd != kInvalidFd) {
+ internal_close(stdout_fd);
+ }
+ if (stderr_fd != kInvalidFd) {
+ internal_close(stderr_fd);
+ }
+ });
+
+ int pid = internal_fork();
+
+ if (pid < 0) {
+ int rverrno;
+ if (internal_iserror(pid, &rverrno)) {
+ Report("WARNING: failed to fork (errno %d)\n", rverrno);
+ }
+ return pid;
+ }
+
+ if (pid == 0) {
+ // Child subprocess
+ if (stdin_fd != kInvalidFd) {
+ internal_close(STDIN_FILENO);
+ internal_dup2(stdin_fd, STDIN_FILENO);
+ internal_close(stdin_fd);
+ }
+ if (stdout_fd != kInvalidFd) {
+ internal_close(STDOUT_FILENO);
+ internal_dup2(stdout_fd, STDOUT_FILENO);
+ internal_close(stdout_fd);
+ }
+ if (stderr_fd != kInvalidFd) {
+ internal_close(STDERR_FILENO);
+ internal_dup2(stderr_fd, STDERR_FILENO);
+ internal_close(stderr_fd);
+ }
+
+ for (int fd = sysconf(_SC_OPEN_MAX); fd > 2; fd--) internal_close(fd);
+
+ execv(program, const_cast<char **>(&argv[0]));
+ internal__exit(1);
+ }
+
+ return pid;
+}
+
+bool IsProcessRunning(pid_t pid) {
+ int process_status;
+ uptr waitpid_status = internal_waitpid(pid, &process_status, WNOHANG);
+ int local_errno;
+ if (internal_iserror(waitpid_status, &local_errno)) {
+ VReport(1, "Waiting on the process failed (errno %d).\n", local_errno);
+ return false;
+ }
+ return waitpid_status == 0;
+}
+
+int WaitForProcess(pid_t pid) {
+ int process_status;
+ uptr waitpid_status = internal_waitpid(pid, &process_status, 0);
+ int local_errno;
+ if (internal_iserror(waitpid_status, &local_errno)) {
+ VReport(1, "Waiting on the process failed (errno %d).\n", local_errno);
+ return -1;
+ }
+ return process_status;
+}
+
+bool IsStateDetached(int state) {
+ return state == PTHREAD_CREATE_DETACHED;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_printf.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_printf.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_printf.cc (revision 351984)
@@ -0,0 +1,358 @@
+//===-- sanitizer_printf.cc -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+//
+// Internal printf function, used inside run-time libraries.
+// We can't use libc printf because we intercept some of the functions used
+// inside it.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
+
+#include <stdio.h>
+#include <stdarg.h>
+
+#if SANITIZER_WINDOWS && defined(_MSC_VER) && _MSC_VER < 1800 && \
+ !defined(va_copy)
+# define va_copy(dst, src) ((dst) = (src))
+#endif
+
+namespace __sanitizer {
+
+static int AppendChar(char **buff, const char *buff_end, char c) {
+ if (*buff < buff_end) {
+ **buff = c;
+ (*buff)++;
+ }
+ return 1;
+}
+
+// Appends number in a given base to buffer. If its length is less than
+// |minimal_num_length|, it is padded with leading zeroes or spaces, depending
+// on the value of |pad_with_zero|.
+static int AppendNumber(char **buff, const char *buff_end, u64 absolute_value,
+ u8 base, u8 minimal_num_length, bool pad_with_zero,
+ bool negative, bool uppercase) {
+ uptr const kMaxLen = 30;
+ RAW_CHECK(base == 10 || base == 16);
+ RAW_CHECK(base == 10 || !negative);
+ RAW_CHECK(absolute_value || !negative);
+ RAW_CHECK(minimal_num_length < kMaxLen);
+ int result = 0;
+ if (negative && minimal_num_length)
+ --minimal_num_length;
+ if (negative && pad_with_zero)
+ result += AppendChar(buff, buff_end, '-');
+ uptr num_buffer[kMaxLen];
+ int pos = 0;
+ do {
+ RAW_CHECK_MSG((uptr)pos < kMaxLen, "AppendNumber buffer overflow");
+ num_buffer[pos++] = absolute_value % base;
+ absolute_value /= base;
+ } while (absolute_value > 0);
+ if (pos < minimal_num_length) {
+ // Make sure compiler doesn't insert call to memset here.
+ internal_memset(&num_buffer[pos], 0,
+ sizeof(num_buffer[0]) * (minimal_num_length - pos));
+ pos = minimal_num_length;
+ }
+ RAW_CHECK(pos > 0);
+ pos--;
+ for (; pos >= 0 && num_buffer[pos] == 0; pos--) {
+ char c = (pad_with_zero || pos == 0) ? '0' : ' ';
+ result += AppendChar(buff, buff_end, c);
+ }
+ if (negative && !pad_with_zero) result += AppendChar(buff, buff_end, '-');
+ for (; pos >= 0; pos--) {
+ char digit = static_cast<char>(num_buffer[pos]);
+ digit = (digit < 10) ? '0' + digit : (uppercase ? 'A' : 'a') + digit - 10;
+ result += AppendChar(buff, buff_end, digit);
+ }
+ return result;
+}
+
+static int AppendUnsigned(char **buff, const char *buff_end, u64 num, u8 base,
+ u8 minimal_num_length, bool pad_with_zero,
+ bool uppercase) {
+ return AppendNumber(buff, buff_end, num, base, minimal_num_length,
+ pad_with_zero, false /* negative */, uppercase);
+}
+
+static int AppendSignedDecimal(char **buff, const char *buff_end, s64 num,
+ u8 minimal_num_length, bool pad_with_zero) {
+ bool negative = (num < 0);
+ return AppendNumber(buff, buff_end, (u64)(negative ? -num : num), 10,
+ minimal_num_length, pad_with_zero, negative,
+ false /* uppercase */);
+}
+
+
+// Use the fact that explicitly requesting 0 width (%0s) results in UB and
+// interpret width == 0 as "no width requested":
+// width == 0 - no width requested
+// width < 0 - left-justify s within and pad it to -width chars, if necessary
+// width > 0 - right-justify s, not implemented yet
+static int AppendString(char **buff, const char *buff_end, int width,
+ int max_chars, const char *s) {
+ if (!s)
+ s = "<null>";
+ int result = 0;
+ for (; *s; s++) {
+ if (max_chars >= 0 && result >= max_chars)
+ break;
+ result += AppendChar(buff, buff_end, *s);
+ }
+ // Only the left justified strings are supported.
+ while (width < -result)
+ result += AppendChar(buff, buff_end, ' ');
+ return result;
+}
+
+static int AppendPointer(char **buff, const char *buff_end, u64 ptr_value) {
+ int result = 0;
+ result += AppendString(buff, buff_end, 0, -1, "0x");
+ result += AppendUnsigned(buff, buff_end, ptr_value, 16,
+ SANITIZER_POINTER_FORMAT_LENGTH,
+ true /* pad_with_zero */, false /* uppercase */);
+ return result;
+}
+
+int VSNPrintf(char *buff, int buff_length,
+ const char *format, va_list args) {
+ static const char *kPrintfFormatsHelp =
+ "Supported Printf formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+ "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+ RAW_CHECK(format);
+ RAW_CHECK(buff_length > 0);
+ const char *buff_end = &buff[buff_length - 1];
+ const char *cur = format;
+ int result = 0;
+ for (; *cur; cur++) {
+ if (*cur != '%') {
+ result += AppendChar(&buff, buff_end, *cur);
+ continue;
+ }
+ cur++;
+ bool left_justified = *cur == '-';
+ if (left_justified)
+ cur++;
+ bool have_width = (*cur >= '0' && *cur <= '9');
+ bool pad_with_zero = (*cur == '0');
+ int width = 0;
+ if (have_width) {
+ while (*cur >= '0' && *cur <= '9') {
+ width = width * 10 + *cur++ - '0';
+ }
+ }
+ bool have_precision = (cur[0] == '.' && cur[1] == '*');
+ int precision = -1;
+ if (have_precision) {
+ cur += 2;
+ precision = va_arg(args, int);
+ }
+ bool have_z = (*cur == 'z');
+ cur += have_z;
+ bool have_ll = !have_z && (cur[0] == 'l' && cur[1] == 'l');
+ cur += have_ll * 2;
+ s64 dval;
+ u64 uval;
+ const bool have_length = have_z || have_ll;
+ const bool have_flags = have_width || have_length;
+ // At the moment only %s supports precision and left-justification.
+ CHECK(!((precision >= 0 || left_justified) && *cur != 's'));
+ switch (*cur) {
+ case 'd': {
+ dval = have_ll ? va_arg(args, s64)
+ : have_z ? va_arg(args, sptr)
+ : va_arg(args, int);
+ result += AppendSignedDecimal(&buff, buff_end, dval, width,
+ pad_with_zero);
+ break;
+ }
+ case 'u':
+ case 'x':
+ case 'X': {
+ uval = have_ll ? va_arg(args, u64)
+ : have_z ? va_arg(args, uptr)
+ : va_arg(args, unsigned);
+ bool uppercase = (*cur == 'X');
+ result += AppendUnsigned(&buff, buff_end, uval, (*cur == 'u') ? 10 : 16,
+ width, pad_with_zero, uppercase);
+ break;
+ }
+ case 'p': {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendPointer(&buff, buff_end, va_arg(args, uptr));
+ break;
+ }
+ case 's': {
+ RAW_CHECK_MSG(!have_length, kPrintfFormatsHelp);
+ // Only left-justified width is supported.
+ CHECK(!have_width || left_justified);
+ result += AppendString(&buff, buff_end, left_justified ? -width : width,
+ precision, va_arg(args, char*));
+ break;
+ }
+ case 'c': {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendChar(&buff, buff_end, va_arg(args, int));
+ break;
+ }
+ case '%' : {
+ RAW_CHECK_MSG(!have_flags, kPrintfFormatsHelp);
+ result += AppendChar(&buff, buff_end, '%');
+ break;
+ }
+ default: {
+ RAW_CHECK_MSG(false, kPrintfFormatsHelp);
+ }
+ }
+ }
+ RAW_CHECK(buff <= buff_end);
+ AppendChar(&buff, buff_end + 1, '\0');
+ return result;
+}
+
+static void (*PrintfAndReportCallback)(const char *);
+void SetPrintfAndReportCallback(void (*callback)(const char *)) {
+ PrintfAndReportCallback = callback;
+}
+
+// Can be overriden in frontend.
+#if SANITIZER_GO && defined(TSAN_EXTERNAL_HOOKS)
+// Implementation must be defined in frontend.
+extern "C" void OnPrint(const char *str);
+#else
+SANITIZER_INTERFACE_WEAK_DEF(void, OnPrint, const char *str) {
+ (void)str;
+}
+#endif
+
+static void CallPrintfAndReportCallback(const char *str) {
+ OnPrint(str);
+ if (PrintfAndReportCallback)
+ PrintfAndReportCallback(str);
+}
+
+static void NOINLINE SharedPrintfCodeNoBuffer(bool append_pid,
+ char *local_buffer,
+ int buffer_size,
+ const char *format,
+ va_list args) {
+ va_list args2;
+ va_copy(args2, args);
+ const int kLen = 16 * 1024;
+ int needed_length;
+ char *buffer = local_buffer;
+ // First try to print a message using a local buffer, and then fall back to
+ // mmaped buffer.
+ for (int use_mmap = 0; use_mmap < 2; use_mmap++) {
+ if (use_mmap) {
+ va_end(args);
+ va_copy(args, args2);
+ buffer = (char*)MmapOrDie(kLen, "Report");
+ buffer_size = kLen;
+ }
+ needed_length = 0;
+ // Check that data fits into the current buffer.
+# define CHECK_NEEDED_LENGTH \
+ if (needed_length >= buffer_size) { \
+ if (!use_mmap) continue; \
+ RAW_CHECK_MSG(needed_length < kLen, \
+ "Buffer in Report is too short!\n"); \
+ }
+ // Fuchsia's logging infrastructure always keeps track of the logging
+ // process, thread, and timestamp, so never prepend such information.
+ if (!SANITIZER_FUCHSIA && append_pid) {
+ int pid = internal_getpid();
+ const char *exe_name = GetProcessName();
+ if (common_flags()->log_exe_name && exe_name) {
+ needed_length += internal_snprintf(buffer, buffer_size,
+ "==%s", exe_name);
+ CHECK_NEEDED_LENGTH
+ }
+ needed_length += internal_snprintf(
+ buffer + needed_length, buffer_size - needed_length, "==%d==", pid);
+ CHECK_NEEDED_LENGTH
+ }
+ needed_length += VSNPrintf(buffer + needed_length,
+ buffer_size - needed_length, format, args);
+ CHECK_NEEDED_LENGTH
+ // If the message fit into the buffer, print it and exit.
+ break;
+# undef CHECK_NEEDED_LENGTH
+ }
+ RawWrite(buffer);
+
+ // Remove color sequences from the message.
+ RemoveANSIEscapeSequencesFromString(buffer);
+ CallPrintfAndReportCallback(buffer);
+ LogMessageOnPrintf(buffer);
+
+ // If we had mapped any memory, clean up.
+ if (buffer != local_buffer)
+ UnmapOrDie((void *)buffer, buffer_size);
+ va_end(args2);
+}
+
+static void NOINLINE SharedPrintfCode(bool append_pid, const char *format,
+ va_list args) {
+ // |local_buffer| is small enough not to overflow the stack and/or violate
+ // the stack limit enforced by TSan (-Wframe-larger-than=512). On the other
+ // hand, the bigger the buffer is, the more the chance the error report will
+ // fit into it.
+ char local_buffer[400];
+ SharedPrintfCodeNoBuffer(append_pid, local_buffer, ARRAY_SIZE(local_buffer),
+ format, args);
+}
+
+FORMAT(1, 2)
+void Printf(const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ SharedPrintfCode(false, format, args);
+ va_end(args);
+}
+
+// Like Printf, but prints the current PID before the output string.
+FORMAT(1, 2)
+void Report(const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ SharedPrintfCode(true, format, args);
+ va_end(args);
+}
+
+// Writes at most "length" symbols to "buffer" (including trailing '\0').
+// Returns the number of symbols that should have been written to buffer
+// (not including trailing '\0'). Thus, the string is truncated
+// iff return value is not less than "length".
+FORMAT(3, 4)
+int internal_snprintf(char *buffer, uptr length, const char *format, ...) {
+ va_list args;
+ va_start(args, format);
+ int needed_length = VSNPrintf(buffer, length, format, args);
+ va_end(args);
+ return needed_length;
+}
+
+FORMAT(2, 3)
+void InternalScopedString::append(const char *format, ...) {
+ CHECK_LT(length_, size());
+ va_list args;
+ va_start(args, format);
+ VSNPrintf(data() + length_, size() - length_, format, args);
+ va_end(args);
+ length_ += internal_strlen(data() + length_);
+ CHECK_LT(length_, size());
+}
+
+} // namespace __sanitizer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps.h (revision 351984)
@@ -0,0 +1,99 @@
+//===-- sanitizer_procmaps.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer.
+//
+// Information about the process mappings.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_PROCMAPS_H
+#define SANITIZER_PROCMAPS_H
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_MAC || SANITIZER_SOLARIS
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mac.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+
+// Memory protection masks.
+static const uptr kProtectionRead = 1;
+static const uptr kProtectionWrite = 2;
+static const uptr kProtectionExecute = 4;
+static const uptr kProtectionShared = 8;
+
+struct MemoryMappedSegmentData;
+
+class MemoryMappedSegment {
+ public:
+ MemoryMappedSegment(char *buff = nullptr, uptr size = 0)
+ : filename(buff), filename_size(size), data_(nullptr) {}
+ ~MemoryMappedSegment() {}
+
+ bool IsReadable() const { return protection & kProtectionRead; }
+ bool IsWritable() const { return protection & kProtectionWrite; }
+ bool IsExecutable() const { return protection & kProtectionExecute; }
+ bool IsShared() const { return protection & kProtectionShared; }
+
+ void AddAddressRanges(LoadedModule *module);
+
+ uptr start;
+ uptr end;
+ uptr offset;
+ char *filename; // owned by caller
+ uptr filename_size;
+ uptr protection;
+ ModuleArch arch;
+ u8 uuid[kModuleUUIDSize];
+
+ private:
+ friend class MemoryMappingLayout;
+
+ // This field is assigned and owned by MemoryMappingLayout if needed
+ MemoryMappedSegmentData *data_;
+};
+
+class MemoryMappingLayout {
+ public:
+ explicit MemoryMappingLayout(bool cache_enabled);
+ ~MemoryMappingLayout();
+ bool Next(MemoryMappedSegment *segment);
+ bool Error() const;
+ void Reset();
+ // In some cases, e.g. when running under a sandbox on Linux, ASan is unable
+ // to obtain the memory mappings. It should fall back to pre-cached data
+ // instead of aborting.
+ static void CacheMemoryMappings();
+
+ // Adds all mapped objects into a vector.
+ void DumpListOfModules(InternalMmapVectorNoCtor<LoadedModule> *modules);
+
+ private:
+ void LoadFromCache();
+
+ MemoryMappingLayoutData data_;
+};
+
+// Returns code range for the specified module.
+bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end);
+
+bool IsDecimal(char c);
+uptr ParseDecimal(const char **p);
+bool IsHex(char c);
+uptr ParseHex(const char **p);
+
+} // namespace __sanitizer
+
+#endif
+#endif // SANITIZER_PROCMAPS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_bsd.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_bsd.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_bsd.cc (revision 351984)
@@ -0,0 +1,139 @@
+//===-- sanitizer_procmaps_bsd.cc -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings
+// (FreeBSD, OpenBSD and NetBSD-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD
+#include "sanitizer_common.h"
+#if SANITIZER_FREEBSD
+#include "sanitizer_freebsd.h"
+#endif
+#include "sanitizer_procmaps.h"
+
+// clang-format off
+#include <sys/types.h>
+#include <sys/sysctl.h>
+// clang-format on
+#include <unistd.h>
+#if SANITIZER_FREEBSD
+#include <sys/user.h>
+#endif
+
+#include <limits.h>
+#if SANITIZER_OPENBSD
+#define KVME_PROT_READ KVE_PROT_READ
+#define KVME_PROT_WRITE KVE_PROT_WRITE
+#define KVME_PROT_EXEC KVE_PROT_EXEC
+#endif
+
+// Fix 'kinfo_vmentry' definition on FreeBSD prior v9.2 in 32-bit mode.
+#if SANITIZER_FREEBSD && (SANITIZER_WORDSIZE == 32)
+#include <osreldate.h>
+#if __FreeBSD_version <= 902001 // v9.2
+#define kinfo_vmentry xkinfo_vmentry
+#endif
+#endif
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ const int Mib[] = {
+#if SANITIZER_FREEBSD
+ CTL_KERN,
+ KERN_PROC,
+ KERN_PROC_VMMAP,
+ getpid()
+#elif SANITIZER_OPENBSD
+ CTL_KERN,
+ KERN_PROC_VMMAP,
+ getpid()
+#elif SANITIZER_NETBSD
+ CTL_VM,
+ VM_PROC,
+ VM_PROC_MAP,
+ getpid(),
+ sizeof(struct kinfo_vmentry)
+#else
+#error "not supported"
+#endif
+ };
+
+ uptr Size = 0;
+ int Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), NULL, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ CHECK_GT(Size, 0);
+
+#if !SANITIZER_OPENBSD
+ size_t MmapedSize = Size * 4 / 3;
+ void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = MmapedSize;
+ Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), VmMap, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ proc_maps->data = (char *)VmMap;
+#else
+ size_t PageSize = GetPageSize();
+ size_t MmapedSize = Size;
+ MmapedSize = ((MmapedSize - 1) / PageSize + 1) * PageSize;
+ char *Mem = (char *)MmapOrDie(MmapedSize, "ReadProcMaps()");
+ Size = 2 * Size + 10 * sizeof(struct kinfo_vmentry);
+ if (Size > 0x10000)
+ Size = 0x10000;
+ Size = (Size / sizeof(struct kinfo_vmentry)) * sizeof(struct kinfo_vmentry);
+ Err = internal_sysctl(Mib, ARRAY_SIZE(Mib), Mem, &Size, NULL, 0);
+ CHECK_EQ(Err, 0);
+ MmapedSize = Size;
+ proc_maps->data = Mem;
+#endif
+
+ proc_maps->mmaped_size = MmapedSize;
+ proc_maps->len = Size;
+}
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ CHECK(!Error()); // can not fail
+ char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
+ if (data_.current >= last)
+ return false;
+ const struct kinfo_vmentry *VmEntry =
+ (const struct kinfo_vmentry *)data_.current;
+
+ segment->start = (uptr)VmEntry->kve_start;
+ segment->end = (uptr)VmEntry->kve_end;
+ segment->offset = (uptr)VmEntry->kve_offset;
+
+ segment->protection = 0;
+ if ((VmEntry->kve_protection & KVME_PROT_READ) != 0)
+ segment->protection |= kProtectionRead;
+ if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0)
+ segment->protection |= kProtectionWrite;
+ if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0)
+ segment->protection |= kProtectionExecute;
+
+#if !SANITIZER_OPENBSD
+ if (segment->filename != NULL && segment->filename_size > 0) {
+ internal_snprintf(segment->filename,
+ Min(segment->filename_size, (uptr)PATH_MAX), "%s",
+ VmEntry->kve_path);
+ }
+#endif
+
+#if SANITIZER_FREEBSD
+ data_.current += VmEntry->kve_structsize;
+#else
+ data_.current += sizeof(*VmEntry);
+#endif
+
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_bsd.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_common.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_common.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_common.cc (revision 351984)
@@ -0,0 +1,174 @@
+//===-- sanitizer_procmaps_common.cc --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (common parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+static ProcSelfMapsBuff cached_proc_self_maps;
+static StaticSpinMutex cache_lock;
+
+static int TranslateDigit(char c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 10;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 10;
+ return -1;
+}
+
+// Parse a number and promote 'p' up to the first non-digit character.
+static uptr ParseNumber(const char **p, int base) {
+ uptr n = 0;
+ int d;
+ CHECK(base >= 2 && base <= 16);
+ while ((d = TranslateDigit(**p)) >= 0 && d < base) {
+ n = n * base + d;
+ (*p)++;
+ }
+ return n;
+}
+
+bool IsDecimal(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 10;
+}
+
+uptr ParseDecimal(const char **p) {
+ return ParseNumber(p, 10);
+}
+
+bool IsHex(char c) {
+ int d = TranslateDigit(c);
+ return d >= 0 && d < 16;
+}
+
+uptr ParseHex(const char **p) {
+ return ParseNumber(p, 16);
+}
+
+void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
+ // data_ should be unused on this platform
+ CHECK(!data_);
+ module->addAddressRange(start, end, IsExecutable(), IsWritable());
+}
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ // FIXME: in the future we may want to cache the mappings on demand only.
+ if (cache_enabled)
+ CacheMemoryMappings();
+
+ // Read maps after the cache update to capture the maps/unmaps happening in
+ // the process of updating.
+ ReadProcMaps(&data_.proc_self_maps);
+ if (cache_enabled && data_.proc_self_maps.mmaped_size == 0)
+ LoadFromCache();
+
+ Reset();
+}
+
+bool MemoryMappingLayout::Error() const {
+ return data_.current == nullptr;
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+ // Only unmap the buffer if it is different from the cached one. Otherwise
+ // it will be unmapped when the cache is refreshed.
+ if (data_.proc_self_maps.data != cached_proc_self_maps.data)
+ UnmapOrDie(data_.proc_self_maps.data, data_.proc_self_maps.mmaped_size);
+}
+
+void MemoryMappingLayout::Reset() {
+ data_.current = data_.proc_self_maps.data;
+}
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ ProcSelfMapsBuff new_proc_self_maps;
+ ReadProcMaps(&new_proc_self_maps);
+ // Don't invalidate the cache if the mappings are unavailable.
+ if (new_proc_self_maps.mmaped_size == 0)
+ return;
+ SpinMutexLock l(&cache_lock);
+ if (cached_proc_self_maps.mmaped_size)
+ UnmapOrDie(cached_proc_self_maps.data, cached_proc_self_maps.mmaped_size);
+ cached_proc_self_maps = new_proc_self_maps;
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ SpinMutexLock l(&cache_lock);
+ if (cached_proc_self_maps.data)
+ data_.proc_self_maps = cached_proc_self_maps;
+}
+
+void MemoryMappingLayout::DumpListOfModules(
+ InternalMmapVectorNoCtor<LoadedModule> *modules) {
+ Reset();
+ InternalScopedString module_name(kMaxPathLength);
+ MemoryMappedSegment segment(module_name.data(), module_name.size());
+ for (uptr i = 0; Next(&segment); i++) {
+ const char *cur_name = segment.filename;
+ if (cur_name[0] == '\0')
+ continue;
+ // Don't subtract 'cur_beg' from the first entry:
+ // * If a binary is compiled w/o -pie, then the first entry in
+ // process maps is likely the binary itself (all dynamic libs
+ // are mapped higher in address space). For such a binary,
+ // instruction offset in binary coincides with the actual
+ // instruction address in virtual memory (as code section
+ // is mapped to a fixed memory range).
+ // * If a binary is compiled with -pie, all the modules are
+ // mapped high at address space (in particular, higher than
+ // shadow memory of the tool), so the module can't be the
+ // first entry.
+ uptr base_address = (i ? segment.start : 0) - segment.offset;
+ LoadedModule cur_module;
+ cur_module.set(cur_name, base_address);
+ segment.AddAddressRanges(&cur_module);
+ modules->push_back(cur_module);
+ }
+}
+
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) {
+ char *smaps = nullptr;
+ uptr smaps_cap = 0;
+ uptr smaps_len = 0;
+ if (!ReadFileToBuffer("/proc/self/smaps", &smaps, &smaps_cap, &smaps_len))
+ return;
+ uptr start = 0;
+ bool file = false;
+ const char *pos = smaps;
+ while (pos < smaps + smaps_len) {
+ if (IsHex(pos[0])) {
+ start = ParseHex(&pos);
+ for (; *pos != '/' && *pos > '\n'; pos++) {}
+ file = *pos == '/';
+ } else if (internal_strncmp(pos, "Rss:", 4) == 0) {
+ while (!IsDecimal(*pos)) pos++;
+ uptr rss = ParseDecimal(&pos) * 1024;
+ cb(start, rss, file, stats, stats_size);
+ }
+ while (*pos++ != '\n') {}
+ }
+ UnmapOrDie(smaps, smaps_cap);
+}
+
+} // namespace __sanitizer
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_common.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_linux.cc (revision 351984)
@@ -0,0 +1,81 @@
+//===-- sanitizer_procmaps_linux.cc ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Linux-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_LINUX
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ if (!ReadFileToBuffer("/proc/self/maps", &proc_maps->data,
+ &proc_maps->mmaped_size, &proc_maps->len)) {
+ proc_maps->data = nullptr;
+ proc_maps->mmaped_size = 0;
+ proc_maps->len = 0;
+ }
+}
+
+static bool IsOneOf(char c, char c1, char c2) {
+ return c == c1 || c == c2;
+}
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ if (Error()) return false; // simulate empty maps
+ char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
+ if (data_.current >= last) return false;
+ char *next_line =
+ (char *)internal_memchr(data_.current, '\n', last - data_.current);
+ if (next_line == 0)
+ next_line = last;
+ // Example: 08048000-08056000 r-xp 00000000 03:0c 64593 /foo/bar
+ segment->start = ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, '-');
+ segment->end = ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ' ');
+ CHECK(IsOneOf(*data_.current, '-', 'r'));
+ segment->protection = 0;
+ if (*data_.current++ == 'r') segment->protection |= kProtectionRead;
+ CHECK(IsOneOf(*data_.current, '-', 'w'));
+ if (*data_.current++ == 'w') segment->protection |= kProtectionWrite;
+ CHECK(IsOneOf(*data_.current, '-', 'x'));
+ if (*data_.current++ == 'x') segment->protection |= kProtectionExecute;
+ CHECK(IsOneOf(*data_.current, 's', 'p'));
+ if (*data_.current++ == 's') segment->protection |= kProtectionShared;
+ CHECK_EQ(*data_.current++, ' ');
+ segment->offset = ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ' ');
+ ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ':');
+ ParseHex(&data_.current);
+ CHECK_EQ(*data_.current++, ' ');
+ while (IsDecimal(*data_.current)) data_.current++;
+ // Qemu may lack the trailing space.
+ // https://github.com/google/sanitizers/issues/160
+ // CHECK_EQ(*data_.current++, ' ');
+ // Skip spaces.
+ while (data_.current < next_line && *data_.current == ' ') data_.current++;
+ // Fill in the filename.
+ if (segment->filename) {
+ uptr len =
+ Min((uptr)(next_line - data_.current), segment->filename_size - 1);
+ internal_strncpy(segment->filename, data_.current, len);
+ segment->filename[len] = 0;
+ }
+
+ data_.current = next_line + 1;
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_linux.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_mac.cc (revision 351984)
@@ -0,0 +1,378 @@
+//===-- sanitizer_procmaps_mac.cc -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Mac-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_procmaps.h"
+
+#include <mach-o/dyld.h>
+#include <mach-o/loader.h>
+#include <mach/mach.h>
+
+// These are not available in older macOS SDKs.
+#ifndef CPU_SUBTYPE_X86_64_H
+#define CPU_SUBTYPE_X86_64_H ((cpu_subtype_t)8) /* Haswell */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7S
+#define CPU_SUBTYPE_ARM_V7S ((cpu_subtype_t)11) /* Swift */
+#endif
+#ifndef CPU_SUBTYPE_ARM_V7K
+#define CPU_SUBTYPE_ARM_V7K ((cpu_subtype_t)12)
+#endif
+#ifndef CPU_TYPE_ARM64
+#define CPU_TYPE_ARM64 (CPU_TYPE_ARM | CPU_ARCH_ABI64)
+#endif
+
+namespace __sanitizer {
+
+// Contains information used to iterate through sections.
+struct MemoryMappedSegmentData {
+ char name[kMaxSegName];
+ uptr nsects;
+ const char *current_load_cmd_addr;
+ u32 lc_type;
+ uptr base_virt_addr;
+ uptr addr_mask;
+};
+
+template <typename Section>
+static void NextSectionLoad(LoadedModule *module, MemoryMappedSegmentData *data,
+ bool isWritable) {
+ const Section *sc = (const Section *)data->current_load_cmd_addr;
+ data->current_load_cmd_addr += sizeof(Section);
+
+ uptr sec_start = (sc->addr & data->addr_mask) + data->base_virt_addr;
+ uptr sec_end = sec_start + sc->size;
+ module->addAddressRange(sec_start, sec_end, /*executable=*/false, isWritable,
+ sc->sectname);
+}
+
+void MemoryMappedSegment::AddAddressRanges(LoadedModule *module) {
+ // Don't iterate over sections when the caller hasn't set up the
+ // data pointer, when there are no sections, or when the segment
+ // is executable. Avoid iterating over executable sections because
+ // it will confuse libignore, and because the extra granularity
+ // of information is not needed by any sanitizers.
+ if (!data_ || !data_->nsects || IsExecutable()) {
+ module->addAddressRange(start, end, IsExecutable(), IsWritable(),
+ data_ ? data_->name : nullptr);
+ return;
+ }
+
+ do {
+ if (data_->lc_type == LC_SEGMENT) {
+ NextSectionLoad<struct section>(module, data_, IsWritable());
+#ifdef MH_MAGIC_64
+ } else if (data_->lc_type == LC_SEGMENT_64) {
+ NextSectionLoad<struct section_64>(module, data_, IsWritable());
+#endif
+ }
+ } while (--data_->nsects);
+}
+
+MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) {
+ Reset();
+}
+
+MemoryMappingLayout::~MemoryMappingLayout() {
+}
+
+bool MemoryMappingLayout::Error() const {
+ return false;
+}
+
+// More information about Mach-O headers can be found in mach-o/loader.h
+// Each Mach-O image has a header (mach_header or mach_header_64) starting with
+// a magic number, and a list of linker load commands directly following the
+// header.
+// A load command is at least two 32-bit words: the command type and the
+// command size in bytes. We're interested only in segment load commands
+// (LC_SEGMENT and LC_SEGMENT_64), which tell that a part of the file is mapped
+// into the task's address space.
+// The |vmaddr|, |vmsize| and |fileoff| fields of segment_command or
+// segment_command_64 correspond to the memory address, memory size and the
+// file offset of the current memory segment.
+// Because these fields are taken from the images as is, one needs to add
+// _dyld_get_image_vmaddr_slide() to get the actual addresses at runtime.
+
+void MemoryMappingLayout::Reset() {
+ // Count down from the top.
+ // TODO(glider): as per man 3 dyld, iterating over the headers with
+ // _dyld_image_count is thread-unsafe. We need to register callbacks for
+ // adding and removing images which will invalidate the MemoryMappingLayout
+ // state.
+ data_.current_image = _dyld_image_count();
+ data_.current_load_cmd_count = -1;
+ data_.current_load_cmd_addr = 0;
+ data_.current_magic = 0;
+ data_.current_filetype = 0;
+ data_.current_arch = kModuleArchUnknown;
+ internal_memset(data_.current_uuid, 0, kModuleUUIDSize);
+}
+
+// The dyld load address should be unchanged throughout process execution,
+// and it is expensive to compute once many libraries have been loaded,
+// so cache it here and do not reset.
+static mach_header *dyld_hdr = 0;
+static const char kDyldPath[] = "/usr/lib/dyld";
+static const int kDyldImageIdx = -1;
+
+// static
+void MemoryMappingLayout::CacheMemoryMappings() {
+ // No-op on Mac for now.
+}
+
+void MemoryMappingLayout::LoadFromCache() {
+ // No-op on Mac for now.
+}
+
+// _dyld_get_image_header() and related APIs don't report dyld itself.
+// We work around this by manually recursing through the memory map
+// until we hit a Mach header matching dyld instead. These recurse
+// calls are expensive, but the first memory map generation occurs
+// early in the process, when dyld is one of the only images loaded,
+// so it will be hit after only a few iterations.
+static mach_header *get_dyld_image_header() {
+ unsigned depth = 1;
+ vm_size_t size = 0;
+ vm_address_t address = 0;
+ kern_return_t err = KERN_SUCCESS;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ while (true) {
+ struct vm_region_submap_info_64 info;
+ err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
+ if (err != KERN_SUCCESS) return nullptr;
+
+ if (size >= sizeof(mach_header) && info.protection & kProtectionRead) {
+ mach_header *hdr = (mach_header *)address;
+ if ((hdr->magic == MH_MAGIC || hdr->magic == MH_MAGIC_64) &&
+ hdr->filetype == MH_DYLINKER) {
+ return hdr;
+ }
+ }
+ address += size;
+ }
+}
+
+const mach_header *get_dyld_hdr() {
+ if (!dyld_hdr) dyld_hdr = get_dyld_image_header();
+
+ return dyld_hdr;
+}
+
+// Next and NextSegmentLoad were inspired by base/sysinfo.cc in
+// Google Perftools, https://github.com/gperftools/gperftools.
+
+// NextSegmentLoad scans the current image for the next segment load command
+// and returns the start and end addresses and file offset of the corresponding
+// segment.
+// Note that the segment addresses are not necessarily sorted.
+template <u32 kLCSegment, typename SegmentCommand>
+static bool NextSegmentLoad(MemoryMappedSegment *segment,
+MemoryMappedSegmentData *seg_data, MemoryMappingLayoutData &layout_data) {
+ const char *lc = layout_data.current_load_cmd_addr;
+ layout_data.current_load_cmd_addr += ((const load_command *)lc)->cmdsize;
+ if (((const load_command *)lc)->cmd == kLCSegment) {
+ const SegmentCommand* sc = (const SegmentCommand *)lc;
+ uptr base_virt_addr, addr_mask;
+ if (layout_data.current_image == kDyldImageIdx) {
+ base_virt_addr = (uptr)get_dyld_hdr();
+ // vmaddr is masked with 0xfffff because on macOS versions < 10.12,
+ // it contains an absolute address rather than an offset for dyld.
+ // To make matters even more complicated, this absolute address
+ // isn't actually the absolute segment address, but the offset portion
+ // of the address is accurate when combined with the dyld base address,
+ // and the mask will give just this offset.
+ addr_mask = 0xfffff;
+ } else {
+ base_virt_addr =
+ (uptr)_dyld_get_image_vmaddr_slide(layout_data.current_image);
+ addr_mask = ~0;
+ }
+
+ segment->start = (sc->vmaddr & addr_mask) + base_virt_addr;
+ segment->end = segment->start + sc->vmsize;
+ // Most callers don't need section information, so only fill this struct
+ // when required.
+ if (seg_data) {
+ seg_data->nsects = sc->nsects;
+ seg_data->current_load_cmd_addr =
+ (const char *)lc + sizeof(SegmentCommand);
+ seg_data->lc_type = kLCSegment;
+ seg_data->base_virt_addr = base_virt_addr;
+ seg_data->addr_mask = addr_mask;
+ internal_strncpy(seg_data->name, sc->segname,
+ ARRAY_SIZE(seg_data->name));
+ }
+
+ // Return the initial protection.
+ segment->protection = sc->initprot;
+ segment->offset = (layout_data.current_filetype ==
+ /*MH_EXECUTE*/ 0x2)
+ ? sc->vmaddr
+ : sc->fileoff;
+ if (segment->filename) {
+ const char *src = (layout_data.current_image == kDyldImageIdx)
+ ? kDyldPath
+ : _dyld_get_image_name(layout_data.current_image);
+ internal_strncpy(segment->filename, src, segment->filename_size);
+ }
+ segment->arch = layout_data.current_arch;
+ internal_memcpy(segment->uuid, layout_data.current_uuid, kModuleUUIDSize);
+ return true;
+ }
+ return false;
+}
+
+ModuleArch ModuleArchFromCpuType(cpu_type_t cputype, cpu_subtype_t cpusubtype) {
+ cpusubtype = cpusubtype & ~CPU_SUBTYPE_MASK;
+ switch (cputype) {
+ case CPU_TYPE_I386:
+ return kModuleArchI386;
+ case CPU_TYPE_X86_64:
+ if (cpusubtype == CPU_SUBTYPE_X86_64_ALL) return kModuleArchX86_64;
+ if (cpusubtype == CPU_SUBTYPE_X86_64_H) return kModuleArchX86_64H;
+ CHECK(0 && "Invalid subtype of x86_64");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM:
+ if (cpusubtype == CPU_SUBTYPE_ARM_V6) return kModuleArchARMV6;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7) return kModuleArchARMV7;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7S) return kModuleArchARMV7S;
+ if (cpusubtype == CPU_SUBTYPE_ARM_V7K) return kModuleArchARMV7K;
+ CHECK(0 && "Invalid subtype of ARM");
+ return kModuleArchUnknown;
+ case CPU_TYPE_ARM64:
+ return kModuleArchARM64;
+ default:
+ CHECK(0 && "Invalid CPU type");
+ return kModuleArchUnknown;
+ }
+}
+
+static const load_command *NextCommand(const load_command *lc) {
+ return (const load_command *)((const char *)lc + lc->cmdsize);
+}
+
+static void FindUUID(const load_command *first_lc, u8 *uuid_output) {
+ for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
+ if (lc->cmd != LC_UUID) continue;
+
+ const uuid_command *uuid_lc = (const uuid_command *)lc;
+ const uint8_t *uuid = &uuid_lc->uuid[0];
+ internal_memcpy(uuid_output, uuid, kModuleUUIDSize);
+ return;
+ }
+}
+
+static bool IsModuleInstrumented(const load_command *first_lc) {
+ for (const load_command *lc = first_lc; lc->cmd != 0; lc = NextCommand(lc)) {
+ if (lc->cmd != LC_LOAD_DYLIB) continue;
+
+ const dylib_command *dylib_lc = (const dylib_command *)lc;
+ uint32_t dylib_name_offset = dylib_lc->dylib.name.offset;
+ const char *dylib_name = ((const char *)dylib_lc) + dylib_name_offset;
+ dylib_name = StripModuleName(dylib_name);
+ if (dylib_name != 0 && (internal_strstr(dylib_name, "libclang_rt."))) {
+ return true;
+ }
+ }
+ return false;
+}
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ for (; data_.current_image >= kDyldImageIdx; data_.current_image--) {
+ const mach_header *hdr = (data_.current_image == kDyldImageIdx)
+ ? get_dyld_hdr()
+ : _dyld_get_image_header(data_.current_image);
+ if (!hdr) continue;
+ if (data_.current_load_cmd_count < 0) {
+ // Set up for this image;
+ data_.current_load_cmd_count = hdr->ncmds;
+ data_.current_magic = hdr->magic;
+ data_.current_filetype = hdr->filetype;
+ data_.current_arch = ModuleArchFromCpuType(hdr->cputype, hdr->cpusubtype);
+ switch (data_.current_magic) {
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ data_.current_load_cmd_addr =
+ (const char *)hdr + sizeof(mach_header_64);
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ data_.current_load_cmd_addr = (const char *)hdr + sizeof(mach_header);
+ break;
+ }
+ default: {
+ continue;
+ }
+ }
+ FindUUID((const load_command *)data_.current_load_cmd_addr,
+ data_.current_uuid);
+ data_.current_instrumented = IsModuleInstrumented(
+ (const load_command *)data_.current_load_cmd_addr);
+ }
+
+ for (; data_.current_load_cmd_count >= 0; data_.current_load_cmd_count--) {
+ switch (data_.current_magic) {
+ // data_.current_magic may be only one of MH_MAGIC, MH_MAGIC_64.
+#ifdef MH_MAGIC_64
+ case MH_MAGIC_64: {
+ if (NextSegmentLoad<LC_SEGMENT_64, struct segment_command_64>(
+ segment, segment->data_, data_))
+ return true;
+ break;
+ }
+#endif
+ case MH_MAGIC: {
+ if (NextSegmentLoad<LC_SEGMENT, struct segment_command>(
+ segment, segment->data_, data_))
+ return true;
+ break;
+ }
+ }
+ }
+ // If we get here, no more load_cmd's in this image talk about
+ // segments. Go on to the next image.
+ }
+ return false;
+}
+
+void MemoryMappingLayout::DumpListOfModules(
+ InternalMmapVectorNoCtor<LoadedModule> *modules) {
+ Reset();
+ InternalScopedString module_name(kMaxPathLength);
+ MemoryMappedSegment segment(module_name.data(), kMaxPathLength);
+ MemoryMappedSegmentData data;
+ segment.data_ = &data;
+ while (Next(&segment)) {
+ if (segment.filename[0] == '\0') continue;
+ LoadedModule *cur_module = nullptr;
+ if (!modules->empty() &&
+ 0 == internal_strcmp(segment.filename, modules->back().full_name())) {
+ cur_module = &modules->back();
+ } else {
+ modules->push_back(LoadedModule());
+ cur_module = &modules->back();
+ cur_module->set(segment.filename, segment.start, segment.arch,
+ segment.uuid, data_.current_instrumented);
+ }
+ segment.AddAddressRanges(cur_module);
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_mac.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_solaris.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_solaris.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_solaris.cc (revision 351984)
@@ -0,0 +1,67 @@
+//===-- sanitizer_procmaps_solaris.cc -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Information about the process mappings (Solaris-specific parts).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_SOLARIS
+#include "sanitizer_common.h"
+#include "sanitizer_procmaps.h"
+
+// Before Solaris 11.4, <procfs.h> doesn't work in a largefile environment.
+#undef _FILE_OFFSET_BITS
+#include <procfs.h>
+#include <limits.h>
+
+namespace __sanitizer {
+
+void ReadProcMaps(ProcSelfMapsBuff *proc_maps) {
+ if (!ReadFileToBuffer("/proc/self/xmap", &proc_maps->data,
+ &proc_maps->mmaped_size, &proc_maps->len)) {
+ proc_maps->data = nullptr;
+ proc_maps->mmaped_size = 0;
+ proc_maps->len = 0;
+ }
+}
+
+bool MemoryMappingLayout::Next(MemoryMappedSegment *segment) {
+ if (Error()) return false; // simulate empty maps
+ char *last = data_.proc_self_maps.data + data_.proc_self_maps.len;
+ if (data_.current >= last) return false;
+
+ prxmap_t *xmapentry = (prxmap_t*)data_.current;
+
+ segment->start = (uptr)xmapentry->pr_vaddr;
+ segment->end = (uptr)(xmapentry->pr_vaddr + xmapentry->pr_size);
+ segment->offset = (uptr)xmapentry->pr_offset;
+
+ segment->protection = 0;
+ if ((xmapentry->pr_mflags & MA_READ) != 0)
+ segment->protection |= kProtectionRead;
+ if ((xmapentry->pr_mflags & MA_WRITE) != 0)
+ segment->protection |= kProtectionWrite;
+ if ((xmapentry->pr_mflags & MA_EXEC) != 0)
+ segment->protection |= kProtectionExecute;
+
+ if (segment->filename != NULL && segment->filename_size > 0) {
+ char proc_path[PATH_MAX + 1];
+
+ internal_snprintf(proc_path, sizeof(proc_path), "/proc/self/path/%s",
+ xmapentry->pr_mapname);
+ internal_readlink(proc_path, segment->filename, segment->filename_size);
+ }
+
+ data_.current += sizeof(prxmap_t);
+
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SOLARIS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_procmaps_solaris.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_quarantine.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_quarantine.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_quarantine.h (revision 351984)
@@ -0,0 +1,317 @@
+//===-- sanitizer_quarantine.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Memory quarantine for AddressSanitizer and potentially other tools.
+// Quarantine caches some specified amount of memory in per-thread caches,
+// then evicts to global FIFO queue. When the queue reaches specified threshold,
+// oldest memory is recycled.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_QUARANTINE_H
+#define SANITIZER_QUARANTINE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_list.h"
+
+namespace __sanitizer {
+
+template<typename Node> class QuarantineCache;
+
+struct QuarantineBatch {
+ static const uptr kSize = 1021;
+ QuarantineBatch *next;
+ uptr size;
+ uptr count;
+ void *batch[kSize];
+
+ void init(void *ptr, uptr size) {
+ count = 1;
+ batch[0] = ptr;
+ this->size = size + sizeof(QuarantineBatch); // Account for the batch size.
+ }
+
+ // The total size of quarantined nodes recorded in this batch.
+ uptr quarantined_size() const {
+ return size - sizeof(QuarantineBatch);
+ }
+
+ void push_back(void *ptr, uptr size) {
+ CHECK_LT(count, kSize);
+ batch[count++] = ptr;
+ this->size += size;
+ }
+
+ bool can_merge(const QuarantineBatch* const from) const {
+ return count + from->count <= kSize;
+ }
+
+ void merge(QuarantineBatch* const from) {
+ CHECK_LE(count + from->count, kSize);
+ CHECK_GE(size, sizeof(QuarantineBatch));
+
+ for (uptr i = 0; i < from->count; ++i)
+ batch[count + i] = from->batch[i];
+ count += from->count;
+ size += from->quarantined_size();
+
+ from->count = 0;
+ from->size = sizeof(QuarantineBatch);
+ }
+};
+
+COMPILER_CHECK(sizeof(QuarantineBatch) <= (1 << 13)); // 8Kb.
+
+// The callback interface is:
+// void Callback::Recycle(Node *ptr);
+// void *cb.Allocate(uptr size);
+// void cb.Deallocate(void *ptr);
+template<typename Callback, typename Node>
+class Quarantine {
+ public:
+ typedef QuarantineCache<Callback> Cache;
+
+ explicit Quarantine(LinkerInitialized)
+ : cache_(LINKER_INITIALIZED) {
+ }
+
+ void Init(uptr size, uptr cache_size) {
+ // Thread local quarantine size can be zero only when global quarantine size
+ // is zero (it allows us to perform just one atomic read per Put() call).
+ CHECK((size == 0 && cache_size == 0) || cache_size != 0);
+
+ atomic_store_relaxed(&max_size_, size);
+ atomic_store_relaxed(&min_size_, size / 10 * 9); // 90% of max size.
+ atomic_store_relaxed(&max_cache_size_, cache_size);
+
+ cache_mutex_.Init();
+ recycle_mutex_.Init();
+ }
+
+ uptr GetSize() const { return atomic_load_relaxed(&max_size_); }
+ uptr GetCacheSize() const {
+ return atomic_load_relaxed(&max_cache_size_);
+ }
+
+ void Put(Cache *c, Callback cb, Node *ptr, uptr size) {
+ uptr cache_size = GetCacheSize();
+ if (cache_size) {
+ c->Enqueue(cb, ptr, size);
+ } else {
+ // GetCacheSize() == 0 only when GetSize() == 0 (see Init).
+ cb.Recycle(ptr);
+ }
+ // Check cache size anyway to accommodate for runtime cache_size change.
+ if (c->Size() > cache_size)
+ Drain(c, cb);
+ }
+
+ void NOINLINE Drain(Cache *c, Callback cb) {
+ {
+ SpinMutexLock l(&cache_mutex_);
+ cache_.Transfer(c);
+ }
+ if (cache_.Size() > GetSize() && recycle_mutex_.TryLock())
+ Recycle(atomic_load_relaxed(&min_size_), cb);
+ }
+
+ void NOINLINE DrainAndRecycle(Cache *c, Callback cb) {
+ {
+ SpinMutexLock l(&cache_mutex_);
+ cache_.Transfer(c);
+ }
+ recycle_mutex_.Lock();
+ Recycle(0, cb);
+ }
+
+ void PrintStats() const {
+ // It assumes that the world is stopped, just as the allocator's PrintStats.
+ Printf("Quarantine limits: global: %zdMb; thread local: %zdKb\n",
+ GetSize() >> 20, GetCacheSize() >> 10);
+ cache_.PrintStats();
+ }
+
+ private:
+ // Read-only data.
+ char pad0_[kCacheLineSize];
+ atomic_uintptr_t max_size_;
+ atomic_uintptr_t min_size_;
+ atomic_uintptr_t max_cache_size_;
+ char pad1_[kCacheLineSize];
+ StaticSpinMutex cache_mutex_;
+ StaticSpinMutex recycle_mutex_;
+ Cache cache_;
+ char pad2_[kCacheLineSize];
+
+ void NOINLINE Recycle(uptr min_size, Callback cb) {
+ Cache tmp;
+ {
+ SpinMutexLock l(&cache_mutex_);
+ // Go over the batches and merge partially filled ones to
+ // save some memory, otherwise batches themselves (since the memory used
+ // by them is counted against quarantine limit) can overcome the actual
+ // user's quarantined chunks, which diminishes the purpose of the
+ // quarantine.
+ uptr cache_size = cache_.Size();
+ uptr overhead_size = cache_.OverheadSize();
+ CHECK_GE(cache_size, overhead_size);
+ // Do the merge only when overhead exceeds this predefined limit (might
+ // require some tuning). It saves us merge attempt when the batch list
+ // quarantine is unlikely to contain batches suitable for merge.
+ const uptr kOverheadThresholdPercents = 100;
+ if (cache_size > overhead_size &&
+ overhead_size * (100 + kOverheadThresholdPercents) >
+ cache_size * kOverheadThresholdPercents) {
+ cache_.MergeBatches(&tmp);
+ }
+ // Extract enough chunks from the quarantine to get below the max
+ // quarantine size and leave some leeway for the newly quarantined chunks.
+ while (cache_.Size() > min_size) {
+ tmp.EnqueueBatch(cache_.DequeueBatch());
+ }
+ }
+ recycle_mutex_.Unlock();
+ DoRecycle(&tmp, cb);
+ }
+
+ void NOINLINE DoRecycle(Cache *c, Callback cb) {
+ while (QuarantineBatch *b = c->DequeueBatch()) {
+ const uptr kPrefetch = 16;
+ CHECK(kPrefetch <= ARRAY_SIZE(b->batch));
+ for (uptr i = 0; i < kPrefetch; i++)
+ PREFETCH(b->batch[i]);
+ for (uptr i = 0, count = b->count; i < count; i++) {
+ if (i + kPrefetch < count)
+ PREFETCH(b->batch[i + kPrefetch]);
+ cb.Recycle((Node*)b->batch[i]);
+ }
+ cb.Deallocate(b);
+ }
+ }
+};
+
+// Per-thread cache of memory blocks.
+template<typename Callback>
+class QuarantineCache {
+ public:
+ explicit QuarantineCache(LinkerInitialized) {
+ }
+
+ QuarantineCache()
+ : size_() {
+ list_.clear();
+ }
+
+ // Total memory used, including internal accounting.
+ uptr Size() const {
+ return atomic_load_relaxed(&size_);
+ }
+
+ // Memory used for internal accounting.
+ uptr OverheadSize() const {
+ return list_.size() * sizeof(QuarantineBatch);
+ }
+
+ void Enqueue(Callback cb, void *ptr, uptr size) {
+ if (list_.empty() || list_.back()->count == QuarantineBatch::kSize) {
+ QuarantineBatch *b = (QuarantineBatch *)cb.Allocate(sizeof(*b));
+ CHECK(b);
+ b->init(ptr, size);
+ EnqueueBatch(b);
+ } else {
+ list_.back()->push_back(ptr, size);
+ SizeAdd(size);
+ }
+ }
+
+ void Transfer(QuarantineCache *from_cache) {
+ list_.append_back(&from_cache->list_);
+ SizeAdd(from_cache->Size());
+
+ atomic_store_relaxed(&from_cache->size_, 0);
+ }
+
+ void EnqueueBatch(QuarantineBatch *b) {
+ list_.push_back(b);
+ SizeAdd(b->size);
+ }
+
+ QuarantineBatch *DequeueBatch() {
+ if (list_.empty())
+ return nullptr;
+ QuarantineBatch *b = list_.front();
+ list_.pop_front();
+ SizeSub(b->size);
+ return b;
+ }
+
+ void MergeBatches(QuarantineCache *to_deallocate) {
+ uptr extracted_size = 0;
+ QuarantineBatch *current = list_.front();
+ while (current && current->next) {
+ if (current->can_merge(current->next)) {
+ QuarantineBatch *extracted = current->next;
+ // Move all the chunks into the current batch.
+ current->merge(extracted);
+ CHECK_EQ(extracted->count, 0);
+ CHECK_EQ(extracted->size, sizeof(QuarantineBatch));
+ // Remove the next batch from the list and account for its size.
+ list_.extract(current, extracted);
+ extracted_size += extracted->size;
+ // Add it to deallocation list.
+ to_deallocate->EnqueueBatch(extracted);
+ } else {
+ current = current->next;
+ }
+ }
+ SizeSub(extracted_size);
+ }
+
+ void PrintStats() const {
+ uptr batch_count = 0;
+ uptr total_overhead_bytes = 0;
+ uptr total_bytes = 0;
+ uptr total_quarantine_chunks = 0;
+ for (List::ConstIterator it = list_.begin(); it != list_.end(); ++it) {
+ batch_count++;
+ total_bytes += (*it).size;
+ total_overhead_bytes += (*it).size - (*it).quarantined_size();
+ total_quarantine_chunks += (*it).count;
+ }
+ uptr quarantine_chunks_capacity = batch_count * QuarantineBatch::kSize;
+ int chunks_usage_percent = quarantine_chunks_capacity == 0 ?
+ 0 : total_quarantine_chunks * 100 / quarantine_chunks_capacity;
+ uptr total_quarantined_bytes = total_bytes - total_overhead_bytes;
+ int memory_overhead_percent = total_quarantined_bytes == 0 ?
+ 0 : total_overhead_bytes * 100 / total_quarantined_bytes;
+ Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
+ "chunks: %zd (capacity: %zd); %d%% chunks used; %d%% memory overhead"
+ "\n",
+ batch_count, total_bytes, total_quarantined_bytes,
+ total_quarantine_chunks, quarantine_chunks_capacity,
+ chunks_usage_percent, memory_overhead_percent);
+ }
+
+ private:
+ typedef IntrusiveList<QuarantineBatch> List;
+
+ List list_;
+ atomic_uintptr_t size_;
+
+ void SizeAdd(uptr add) {
+ atomic_store_relaxed(&size_, Size() + add);
+ }
+ void SizeSub(uptr sub) {
+ atomic_store_relaxed(&size_, Size() - sub);
+ }
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_QUARANTINE_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_report_decorator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_report_decorator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_report_decorator.h (revision 351984)
@@ -0,0 +1,48 @@
+//===-- sanitizer_report_decorator.h ----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Tags to decorate the sanitizer reports.
+// Currently supported tags:
+// * None.
+// * ANSI color sequences.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_REPORT_DECORATOR_H
+#define SANITIZER_REPORT_DECORATOR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+class SanitizerCommonDecorator {
+ // FIXME: This is not portable. It assumes the special strings are printed to
+ // stdout, which is not the case on Windows (see SetConsoleTextAttribute()).
+ public:
+ SanitizerCommonDecorator() : ansi_(ColorizeReports()) {}
+ const char *Bold() const { return ansi_ ? "\033[1m" : ""; }
+ const char *Default() const { return ansi_ ? "\033[1m\033[0m" : ""; }
+ const char *Warning() const { return Red(); }
+ const char *Error() const { return Red(); }
+ const char *MemoryByte() const { return Magenta(); }
+
+ protected:
+ const char *Black() const { return ansi_ ? "\033[1m\033[30m" : ""; }
+ const char *Red() const { return ansi_ ? "\033[1m\033[31m" : ""; }
+ const char *Green() const { return ansi_ ? "\033[1m\033[32m" : ""; }
+ const char *Yellow() const { return ansi_ ? "\033[1m\033[33m" : ""; }
+ const char *Blue() const { return ansi_ ? "\033[1m\033[34m" : ""; }
+ const char *Magenta() const { return ansi_ ? "\033[1m\033[35m" : ""; }
+ const char *Cyan() const { return ansi_ ? "\033[1m\033[36m" : ""; }
+ const char *White() const { return ansi_ ? "\033[1m\033[37m" : ""; }
+ private:
+ bool ansi_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_REPORT_DECORATOR_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_ring_buffer.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_ring_buffer.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_ring_buffer.h (revision 351984)
@@ -0,0 +1,161 @@
+//===-- sanitizer_ring_buffer.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Simple ring buffer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_RING_BUFFER_H
+#define SANITIZER_RING_BUFFER_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+// RingBuffer<T>: fixed-size ring buffer optimized for speed of push().
+// T should be a POD type and sizeof(T) should be divisible by sizeof(void*).
+// At creation, all elements are zero.
+template<class T>
+class RingBuffer {
+ public:
+ COMPILER_CHECK(sizeof(T) % sizeof(void *) == 0);
+ static RingBuffer *New(uptr Size) {
+ void *Ptr = MmapOrDie(SizeInBytes(Size), "RingBuffer");
+ RingBuffer *RB = reinterpret_cast<RingBuffer*>(Ptr);
+ uptr End = reinterpret_cast<uptr>(Ptr) + SizeInBytes(Size);
+ RB->last_ = RB->next_ = reinterpret_cast<T*>(End - sizeof(T));
+ return RB;
+ }
+ void Delete() {
+ UnmapOrDie(this, SizeInBytes(size()));
+ }
+ uptr size() const {
+ return last_ + 1 -
+ reinterpret_cast<T *>(reinterpret_cast<uptr>(this) +
+ 2 * sizeof(T *));
+ }
+
+ static uptr SizeInBytes(uptr Size) {
+ return Size * sizeof(T) + 2 * sizeof(T*);
+ }
+
+ uptr SizeInBytes() { return SizeInBytes(size()); }
+
+ void push(T t) {
+ *next_ = t;
+ next_--;
+ // The condition below works only if sizeof(T) is divisible by sizeof(T*).
+ if (next_ <= reinterpret_cast<T*>(&next_))
+ next_ = last_;
+ }
+
+ T operator[](uptr Idx) const {
+ CHECK_LT(Idx, size());
+ sptr IdxNext = Idx + 1;
+ if (IdxNext > last_ - next_)
+ IdxNext -= size();
+ return next_[IdxNext];
+ }
+
+ private:
+ RingBuffer() {}
+ ~RingBuffer() {}
+ RingBuffer(const RingBuffer&) = delete;
+
+ // Data layout:
+ // LNDDDDDDDD
+ // D: data elements.
+ // L: last_, always points to the last data element.
+ // N: next_, initially equals to last_, is decremented on every push,
+ // wraps around if it's less or equal than its own address.
+ T *last_;
+ T *next_;
+ T data_[1]; // flexible array.
+};
+
+// A ring buffer with externally provided storage that encodes its state in 8
+// bytes. Has significant constraints on size and alignment of storage.
+// See a comment in hwasan/hwasan_thread_list.h for the motivation behind this.
+#if SANITIZER_WORDSIZE == 64
+template <class T>
+class CompactRingBuffer {
+ // Top byte of long_ stores the buffer size in pages.
+ // Lower bytes store the address of the next buffer element.
+ static constexpr int kPageSizeBits = 12;
+ static constexpr int kSizeShift = 56;
+ static constexpr uptr kNextMask = (1ULL << kSizeShift) - 1;
+
+ uptr GetStorageSize() const { return (long_ >> kSizeShift) << kPageSizeBits; }
+
+ void Init(void *storage, uptr size) {
+ CHECK_EQ(sizeof(CompactRingBuffer<T>), sizeof(void *));
+ CHECK(IsPowerOfTwo(size));
+ CHECK_GE(size, 1 << kPageSizeBits);
+ CHECK_LE(size, 128 << kPageSizeBits);
+ CHECK_EQ(size % 4096, 0);
+ CHECK_EQ(size % sizeof(T), 0);
+ CHECK_EQ((uptr)storage % (size * 2), 0);
+ long_ = (uptr)storage | ((size >> kPageSizeBits) << kSizeShift);
+ }
+
+ void SetNext(const T *next) {
+ long_ = (long_ & ~kNextMask) | (uptr)next;
+ }
+
+ public:
+ CompactRingBuffer(void *storage, uptr size) {
+ Init(storage, size);
+ }
+
+ // A copy constructor of sorts.
+ CompactRingBuffer(const CompactRingBuffer &other, void *storage) {
+ uptr size = other.GetStorageSize();
+ internal_memcpy(storage, other.StartOfStorage(), size);
+ Init(storage, size);
+ uptr Idx = other.Next() - (const T *)other.StartOfStorage();
+ SetNext((const T *)storage + Idx);
+ }
+
+ T *Next() const { return (T *)(long_ & kNextMask); }
+
+ void *StartOfStorage() const {
+ return (void *)((uptr)Next() & ~(GetStorageSize() - 1));
+ }
+
+ void *EndOfStorage() const {
+ return (void *)((uptr)StartOfStorage() + GetStorageSize());
+ }
+
+ uptr size() const { return GetStorageSize() / sizeof(T); }
+
+ void push(T t) {
+ T *next = Next();
+ *next = t;
+ next++;
+ next = (T *)((uptr)next & ~GetStorageSize());
+ SetNext(next);
+ }
+
+ const T &operator[](uptr Idx) const {
+ CHECK_LT(Idx, size());
+ const T *Begin = (const T *)StartOfStorage();
+ sptr StorageIdx = Next() - Begin;
+ StorageIdx -= (sptr)(Idx + 1);
+ if (StorageIdx < 0)
+ StorageIdx += size();
+ return Begin[StorageIdx];
+ }
+
+ public:
+ ~CompactRingBuffer() {}
+ CompactRingBuffer(const CompactRingBuffer &) = delete;
+
+ uptr long_;
+};
+#endif
+} // namespace __sanitizer
+
+#endif // SANITIZER_RING_BUFFER_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_rtems.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_rtems.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_rtems.cc (revision 351984)
@@ -0,0 +1,279 @@
+//===-- sanitizer_rtems.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements RTEMS-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_rtems.h"
+#if SANITIZER_RTEMS
+
+#define posix_memalign __real_posix_memalign
+#define free __real_free
+#define memset __real_memset
+
+#include "sanitizer_file.h"
+#include "sanitizer_symbolizer.h"
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+// There is no mmap on RTEMS. Use memalign, etc.
+#define __mmap_alloc_aligned posix_memalign
+#define __mmap_free free
+#define __mmap_memset memset
+
+namespace __sanitizer {
+
+#include "sanitizer_syscall_generic.inc"
+
+void NORETURN internal__exit(int exitcode) {
+ _exit(exitcode);
+}
+
+uptr internal_sched_yield() {
+ return sched_yield();
+}
+
+uptr internal_getpid() {
+ return getpid();
+}
+
+bool FileExists(const char *filename) {
+ struct stat st;
+ if (stat(filename, &st))
+ return false;
+ // Sanity check: filename is a regular file.
+ return S_ISREG(st.st_mode);
+}
+
+uptr GetThreadSelf() { return static_cast<uptr>(pthread_self()); }
+
+tid_t GetTid() { return GetThreadSelf(); }
+
+void Abort() { abort(); }
+
+int Atexit(void (*function)(void)) { return atexit(function); }
+
+void SleepForSeconds(int seconds) { sleep(seconds); }
+
+void SleepForMillis(int millis) { usleep(millis * 1000); }
+
+bool SupportsColoredOutput(fd_t fd) { return false; }
+
+void GetThreadStackTopAndBottom(bool at_initialization,
+ uptr *stack_top, uptr *stack_bottom) {
+ pthread_attr_t attr;
+ pthread_attr_init(&attr);
+ CHECK_EQ(pthread_getattr_np(pthread_self(), &attr), 0);
+ void *base = nullptr;
+ size_t size = 0;
+ CHECK_EQ(pthread_attr_getstack(&attr, &base, &size), 0);
+ CHECK_EQ(pthread_attr_destroy(&attr), 0);
+
+ *stack_bottom = reinterpret_cast<uptr>(base);
+ *stack_top = *stack_bottom + size;
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+ *tls_addr = *tls_size = 0;
+}
+
+void InitializePlatformEarly() {}
+void MaybeReexec() {}
+void CheckASLR() {}
+void CheckMPROTECT() {}
+void DisableCoreDumperIfNecessary() {}
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
+void SetAlternateSignalStack() {}
+void UnsetAlternateSignalStack() {}
+void InitTlsSize() {}
+
+void PrintModuleMap() {}
+
+void SignalContext::DumpAllRegisters(void *context) {}
+const char *DescribeSignalOrException(int signo) { UNIMPLEMENTED(); }
+
+enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
+
+BlockingMutex::BlockingMutex() {
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ CHECK_EQ(owner_, 0);
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ if (atomic_exchange(m, MtxLocked, memory_order_acquire) == MtxUnlocked)
+ return;
+ while (atomic_exchange(m, MtxSleeping, memory_order_acquire) != MtxUnlocked) {
+ internal_sched_yield();
+ }
+}
+
+void BlockingMutex::Unlock() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ u32 v = atomic_exchange(m, MtxUnlocked, memory_order_release);
+ CHECK_NE(v, MtxUnlocked);
+}
+
+void BlockingMutex::CheckLocked() {
+ atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+ CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
+}
+
+uptr GetPageSize() { return getpagesize(); }
+
+uptr GetMmapGranularity() { return GetPageSize(); }
+
+uptr GetMaxVirtualAddress() {
+ return (1ULL << 32) - 1; // 0xffffffff
+}
+
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
+ void* ptr = 0;
+ int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
+ if (UNLIKELY(res))
+ ReportMmapFailureAndDie(size, mem_type, "allocate", res, raw_report);
+ __mmap_memset(ptr, 0, size);
+ IncreaseTotalMmap(size);
+ return ptr;
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ void* ptr = 0;
+ int res = __mmap_alloc_aligned(&ptr, GetPageSize(), size);
+ if (UNLIKELY(res)) {
+ if (res == ENOMEM)
+ return nullptr;
+ ReportMmapFailureAndDie(size, mem_type, "allocate", false);
+ }
+ __mmap_memset(ptr, 0, size);
+ IncreaseTotalMmap(size);
+ return ptr;
+}
+
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+ void* ptr = 0;
+ int res = __mmap_alloc_aligned(&ptr, alignment, size);
+ if (res)
+ ReportMmapFailureAndDie(size, mem_type, "align allocate", res, false);
+ __mmap_memset(ptr, 0, size);
+ IncreaseTotalMmap(size);
+ return ptr;
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ return MmapOrDie(size, mem_type, false);
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (!addr || !size) return;
+ __mmap_free(addr);
+ DecreaseTotalMmap(size);
+}
+
+fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *errno_p) {
+ int flags;
+ switch (mode) {
+ case RdOnly: flags = O_RDONLY; break;
+ case WrOnly: flags = O_WRONLY | O_CREAT | O_TRUNC; break;
+ case RdWr: flags = O_RDWR | O_CREAT; break;
+ }
+ fd_t res = open(filename, flags, 0660);
+ if (internal_iserror(res, errno_p))
+ return kInvalidFd;
+ return res;
+}
+
+void CloseFile(fd_t fd) {
+ close(fd);
+}
+
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
+ error_t *error_p) {
+ uptr res = read(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_read)
+ *bytes_read = res;
+ return true;
+}
+
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
+ error_t *error_p) {
+ uptr res = write(fd, buff, buff_size);
+ if (internal_iserror(res, error_p))
+ return false;
+ if (bytes_written)
+ *bytes_written = res;
+ return true;
+}
+
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {}
+void DumpProcessMap() {}
+
+// There is no page protection so everything is "accessible."
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ return true;
+}
+
+char **GetArgv() { return nullptr; }
+char **GetEnviron() { return nullptr; }
+
+const char *GetEnv(const char *name) {
+ return getenv(name);
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ internal_strncpy(buf, "StubBinaryName", buf_len);
+ return internal_strlen(buf);
+}
+
+uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len) {
+ internal_strncpy(buf, "StubProcessName", buf_len);
+ return internal_strlen(buf);
+}
+
+bool IsPathSeparator(const char c) {
+ return c == '/';
+}
+
+bool IsAbsolutePath(const char *path) {
+ return path != nullptr && IsPathSeparator(path[0]);
+}
+
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ static const char *kWriteError =
+ "ReportFile::Write() can't output requested buffer!\n";
+ ReopenIfNecessary();
+ if (length != write(fd, buffer, length)) {
+ write(fd, kWriteError, internal_strlen(kWriteError));
+ Die();
+ }
+}
+
+uptr MainThreadStackBase, MainThreadStackSize;
+uptr MainThreadTlsBase, MainThreadTlsSize;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_RTEMS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_rtems.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_rtems.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_rtems.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_rtems.h (revision 351984)
@@ -0,0 +1,20 @@
+//===-- sanitizer_rtems.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// provides definitions for RTEMS-specific functions.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_RTEMS_H
+#define SANITIZER_RTEMS_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_RTEMS
+#include "sanitizer_common.h"
+
+#endif // SANITIZER_RTEMS
+#endif // SANITIZER_RTEMS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_rtems.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_signal_interceptors.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_signal_interceptors.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_signal_interceptors.inc (revision 351984)
@@ -0,0 +1,86 @@
+//===-- sanitizer_signal_interceptors.inc -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Signal interceptors for sanitizers.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_platform_interceptors.h"
+
+using namespace __sanitizer;
+
+#if SANITIZER_NETBSD
+#define sigaction_symname __sigaction14
+#else
+#define sigaction_symname sigaction
+#endif
+
+#ifndef SIGNAL_INTERCEPTOR_SIGNAL_IMPL
+#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signum, handler) \
+ { return REAL(func)(signum, handler); }
+#endif
+
+#ifndef SIGNAL_INTERCEPTOR_SIGACTION_IMPL
+#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact) \
+ { return REAL(sigaction_symname)(signum, act, oldact); }
+#endif
+
+#if SANITIZER_INTERCEPT_BSD_SIGNAL
+INTERCEPTOR(uptr, bsd_signal, int signum, uptr handler) {
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
+ SIGNAL_INTERCEPTOR_SIGNAL_IMPL(bsd_signal, signum, handler);
+}
+#define INIT_BSD_SIGNAL COMMON_INTERCEPT_FUNCTION(bsd_signal)
+#else // SANITIZER_INTERCEPT_BSD_SIGNAL
+#define INIT_BSD_SIGNAL
+#endif // SANITIZER_INTERCEPT_BSD_SIGNAL
+
+#if SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+INTERCEPTOR(uptr, signal, int signum, uptr handler) {
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive)
+ return (uptr) nullptr;
+ SIGNAL_INTERCEPTOR_SIGNAL_IMPL(signal, signum, handler);
+}
+#define INIT_SIGNAL COMMON_INTERCEPT_FUNCTION(signal)
+
+INTERCEPTOR(int, sigaction_symname, int signum,
+ const __sanitizer_sigaction *act, __sanitizer_sigaction *oldact) {
+ if (GetHandleSignalMode(signum) == kHandleSignalExclusive) return 0;
+ SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signum, act, oldact);
+}
+#define INIT_SIGACTION COMMON_INTERCEPT_FUNCTION(sigaction_symname)
+
+namespace __sanitizer {
+int real_sigaction(int signum, const void *act, void *oldact) {
+ return REAL(sigaction_symname)(signum, (const __sanitizer_sigaction *)act,
+ (__sanitizer_sigaction *)oldact);
+}
+} // namespace __sanitizer
+#else // SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+#define INIT_SIGNAL
+#define INIT_SIGACTION
+// We need to have defined REAL(sigaction) on other systems.
+namespace __sanitizer {
+struct __sanitizer_sigaction;
+}
+DEFINE_REAL(int, sigaction, int signum, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *oldact)
+#endif // SANITIZER_INTERCEPT_SIGNAL_AND_SIGACTION
+
+static void InitializeSignalInterceptors() {
+ static bool was_called_once;
+ CHECK(!was_called_once);
+ was_called_once = true;
+
+ INIT_BSD_SIGNAL;
+ INIT_SIGNAL;
+ INIT_SIGACTION;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_signal_interceptors.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_solaris.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_solaris.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_solaris.cc (revision 351984)
@@ -0,0 +1,230 @@
+//===-- sanitizer_solaris.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries and
+// implements Solaris-specific functions.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_SOLARIS
+
+#include <stdio.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_procmaps.h"
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <sched.h>
+#include <thread.h>
+#include <synch.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <dirent.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdlib.h>
+
+namespace __sanitizer {
+
+//#include "sanitizer_syscall_generic.inc"
+
+#define _REAL(func) _ ## func
+#define DECLARE__REAL(ret_type, func, ...) \
+ extern "C" ret_type _REAL(func)(__VA_ARGS__)
+#define DECLARE__REAL_AND_INTERNAL(ret_type, func, ...) \
+ DECLARE__REAL(ret_type, func, __VA_ARGS__); \
+ ret_type internal_ ## func(__VA_ARGS__)
+
+#if !defined(_LP64) && _FILE_OFFSET_BITS == 64
+#define _REAL64(func) _ ## func ## 64
+#else
+#define _REAL64(func) _REAL(func)
+#endif
+#define DECLARE__REAL64(ret_type, func, ...) \
+ extern "C" ret_type _REAL64(func)(__VA_ARGS__)
+#define DECLARE__REAL_AND_INTERNAL64(ret_type, func, ...) \
+ DECLARE__REAL64(ret_type, func, __VA_ARGS__); \
+ ret_type internal_ ## func(__VA_ARGS__)
+
+// ---------------------- sanitizer_libc.h
+DECLARE__REAL_AND_INTERNAL64(uptr, mmap, void *addr, uptr /*size_t*/ length,
+ int prot, int flags, int fd, OFF_T offset) {
+ return (uptr)_REAL64(mmap)(addr, length, prot, flags, fd, offset);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, munmap, void *addr, uptr length) {
+ return _REAL(munmap)(addr, length);
+}
+
+DECLARE__REAL_AND_INTERNAL(int, mprotect, void *addr, uptr length, int prot) {
+ return _REAL(mprotect)(addr, length, prot);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, close, fd_t fd) {
+ return _REAL(close)(fd);
+}
+
+extern "C" int _REAL64(open)(const char *, int, ...);
+
+uptr internal_open(const char *filename, int flags) {
+ return _REAL64(open)(filename, flags);
+}
+
+uptr internal_open(const char *filename, int flags, u32 mode) {
+ return _REAL64(open)(filename, flags, mode);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, read, fd_t fd, void *buf, uptr count) {
+ return _REAL(read)(fd, buf, count);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, write, fd_t fd, const void *buf, uptr count) {
+ return _REAL(write)(fd, buf, count);
+}
+
+// FIXME: There's only _ftruncate64 beginning with Solaris 11.
+DECLARE__REAL_AND_INTERNAL(uptr, ftruncate, fd_t fd, uptr size) {
+ return ftruncate(fd, size);
+}
+
+DECLARE__REAL_AND_INTERNAL64(uptr, stat, const char *path, void *buf) {
+ return _REAL64(stat)(path, (struct stat *)buf);
+}
+
+DECLARE__REAL_AND_INTERNAL64(uptr, lstat, const char *path, void *buf) {
+ return _REAL64(lstat)(path, (struct stat *)buf);
+}
+
+DECLARE__REAL_AND_INTERNAL64(uptr, fstat, fd_t fd, void *buf) {
+ return _REAL64(fstat)(fd, (struct stat *)buf);
+}
+
+uptr internal_filesize(fd_t fd) {
+ struct stat st;
+ if (internal_fstat(fd, &st))
+ return -1;
+ return (uptr)st.st_size;
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, dup, int oldfd) {
+ return _REAL(dup)(oldfd);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, dup2, int oldfd, int newfd) {
+ return _REAL(dup2)(oldfd, newfd);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, readlink, const char *path, char *buf,
+ uptr bufsize) {
+ return _REAL(readlink)(path, buf, bufsize);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, unlink, const char *path) {
+ return _REAL(unlink)(path);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, rename, const char *oldpath,
+ const char *newpath) {
+ return _REAL(rename)(oldpath, newpath);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, sched_yield, void) {
+ return sched_yield();
+}
+
+DECLARE__REAL_AND_INTERNAL(void, _exit, int exitcode) {
+ _exit(exitcode);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, execve, const char *filename,
+ char *const argv[], char *const envp[]) {
+ return _REAL(execve)(filename, argv, envp);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, waitpid, int pid, int *status, int options) {
+ return _REAL(waitpid)(pid, status, options);
+}
+
+DECLARE__REAL_AND_INTERNAL(uptr, getpid, void) {
+ return _REAL(getpid)();
+}
+
+// FIXME: This might be wrong: _getdents doesn't take a struct linux_dirent *.
+DECLARE__REAL_AND_INTERNAL64(uptr, getdents, fd_t fd, struct linux_dirent *dirp,
+ unsigned int count) {
+ return _REAL64(getdents)(fd, dirp, count);
+}
+
+DECLARE__REAL_AND_INTERNAL64(uptr, lseek, fd_t fd, OFF_T offset, int whence) {
+ return _REAL64(lseek)(fd, offset, whence);
+}
+
+// FIXME: This might be wrong: _sigfillset doesn't take a
+// __sanitizer_sigset_t *.
+DECLARE__REAL_AND_INTERNAL(void, sigfillset, __sanitizer_sigset_t *set) {
+ _REAL(sigfillset)(set);
+}
+
+// FIXME: This might be wrong: _sigprocmask doesn't take __sanitizer_sigset_t *.
+DECLARE__REAL_AND_INTERNAL(uptr, sigprocmask, int how,
+ __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ return _REAL(sigprocmask)(how, set, oldset);
+}
+
+DECLARE__REAL_AND_INTERNAL(int, fork, void) {
+ // TODO(glider): this may call user's pthread_atfork() handlers which is bad.
+ return _REAL(fork)();
+}
+
+u64 NanoTime() {
+ return gethrtime();
+}
+
+uptr internal_clock_gettime(__sanitizer_clockid_t clk_id, void *tp) {
+ // FIXME: No internal variant.
+ return clock_gettime(clk_id, (timespec *)tp);
+}
+
+// ----------------- sanitizer_common.h
+BlockingMutex::BlockingMutex() {
+ CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
+ internal_memset(this, 0, sizeof(*this));
+ CHECK_EQ(mutex_init((mutex_t *)&opaque_storage_, USYNC_THREAD, NULL), 0);
+}
+
+void BlockingMutex::Lock() {
+ CHECK(sizeof(mutex_t) <= sizeof(opaque_storage_));
+ CHECK_NE(owner_, (uptr)thr_self());
+ CHECK_EQ(mutex_lock((mutex_t *)&opaque_storage_), 0);
+ CHECK(!owner_);
+ owner_ = (uptr)thr_self();
+}
+
+void BlockingMutex::Unlock() {
+ CHECK(owner_ == (uptr)thr_self());
+ owner_ = 0;
+ CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
+}
+
+void BlockingMutex::CheckLocked() {
+ CHECK_EQ((uptr)thr_self(), owner_);
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SOLARIS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_solaris.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepot.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepot.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepot.cc (revision 351984)
@@ -0,0 +1,149 @@
+//===-- sanitizer_stackdepot.cc -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_stackdepot.h"
+
+#include "sanitizer_common.h"
+#include "sanitizer_hash.h"
+#include "sanitizer_stackdepotbase.h"
+
+namespace __sanitizer {
+
+struct StackDepotNode {
+ StackDepotNode *link;
+ u32 id;
+ atomic_uint32_t hash_and_use_count; // hash_bits : 12; use_count : 20;
+ u32 size;
+ u32 tag;
+ uptr stack[1]; // [size]
+
+ static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
+ // Lower kTabSizeLog bits are equal for all items in one bucket.
+ // We use these bits to store the per-stack use counter.
+ static const u32 kUseCountBits = kTabSizeLog;
+ static const u32 kMaxUseCount = 1 << kUseCountBits;
+ static const u32 kUseCountMask = (1 << kUseCountBits) - 1;
+ static const u32 kHashMask = ~kUseCountMask;
+
+ typedef StackTrace args_type;
+ bool eq(u32 hash, const args_type &args) const {
+ u32 hash_bits =
+ atomic_load(&hash_and_use_count, memory_order_relaxed) & kHashMask;
+ if ((hash & kHashMask) != hash_bits || args.size != size || args.tag != tag)
+ return false;
+ uptr i = 0;
+ for (; i < size; i++) {
+ if (stack[i] != args.trace[i]) return false;
+ }
+ return true;
+ }
+ static uptr storage_size(const args_type &args) {
+ return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr);
+ }
+ static u32 hash(const args_type &args) {
+ MurMur2HashBuilder H(args.size * sizeof(uptr));
+ for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]);
+ return H.get();
+ }
+ static bool is_valid(const args_type &args) {
+ return args.size > 0 && args.trace;
+ }
+ void store(const args_type &args, u32 hash) {
+ atomic_store(&hash_and_use_count, hash & kHashMask, memory_order_relaxed);
+ size = args.size;
+ tag = args.tag;
+ internal_memcpy(stack, args.trace, size * sizeof(uptr));
+ }
+ args_type load() const {
+ return args_type(&stack[0], size, tag);
+ }
+ StackDepotHandle get_handle() { return StackDepotHandle(this); }
+
+ typedef StackDepotHandle handle_type;
+};
+
+COMPILER_CHECK(StackDepotNode::kMaxUseCount == (u32)kStackDepotMaxUseCount);
+
+u32 StackDepotHandle::id() { return node_->id; }
+int StackDepotHandle::use_count() {
+ return atomic_load(&node_->hash_and_use_count, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
+}
+void StackDepotHandle::inc_use_count_unsafe() {
+ u32 prev =
+ atomic_fetch_add(&node_->hash_and_use_count, 1, memory_order_relaxed) &
+ StackDepotNode::kUseCountMask;
+ CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount);
+}
+
+// FIXME(dvyukov): this single reserved bit is used in TSan.
+typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
+ StackDepot;
+static StackDepot theDepot;
+
+StackDepotStats *StackDepotGetStats() {
+ return theDepot.GetStats();
+}
+
+u32 StackDepotPut(StackTrace stack) {
+ StackDepotHandle h = theDepot.Put(stack);
+ return h.valid() ? h.id() : 0;
+}
+
+StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) {
+ return theDepot.Put(stack);
+}
+
+StackTrace StackDepotGet(u32 id) {
+ return theDepot.Get(id);
+}
+
+void StackDepotLockAll() {
+ theDepot.LockAll();
+}
+
+void StackDepotUnlockAll() {
+ theDepot.UnlockAll();
+}
+
+bool StackDepotReverseMap::IdDescPair::IdComparator(
+ const StackDepotReverseMap::IdDescPair &a,
+ const StackDepotReverseMap::IdDescPair &b) {
+ return a.id < b.id;
+}
+
+StackDepotReverseMap::StackDepotReverseMap() {
+ map_.reserve(StackDepotGetStats()->n_uniq_ids + 100);
+ for (int idx = 0; idx < StackDepot::kTabSize; idx++) {
+ atomic_uintptr_t *p = &theDepot.tab[idx];
+ uptr v = atomic_load(p, memory_order_consume);
+ StackDepotNode *s = (StackDepotNode*)(v & ~1);
+ for (; s; s = s->link) {
+ IdDescPair pair = {s->id, s};
+ map_.push_back(pair);
+ }
+ }
+ Sort(map_.data(), map_.size(), &IdDescPair::IdComparator);
+}
+
+StackTrace StackDepotReverseMap::Get(u32 id) {
+ if (!map_.size())
+ return StackTrace();
+ IdDescPair pair = {id, nullptr};
+ uptr idx =
+ InternalLowerBound(map_, 0, map_.size(), pair, IdDescPair::IdComparator);
+ if (idx > map_.size() || map_[idx].id != id)
+ return StackTrace();
+ return map_[idx].desc->load();
+}
+
+} // namespace __sanitizer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepot.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepot.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepot.h (revision 351984)
@@ -0,0 +1,71 @@
+//===-- sanitizer_stackdepot.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_STACKDEPOT_H
+#define SANITIZER_STACKDEPOT_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+// StackDepot efficiently stores huge amounts of stack traces.
+struct StackDepotNode;
+struct StackDepotHandle {
+ StackDepotNode *node_;
+ StackDepotHandle() : node_(nullptr) {}
+ explicit StackDepotHandle(StackDepotNode *node) : node_(node) {}
+ bool valid() { return node_; }
+ u32 id();
+ int use_count();
+ void inc_use_count_unsafe();
+};
+
+const int kStackDepotMaxUseCount = 1U << (SANITIZER_ANDROID ? 16 : 20);
+
+StackDepotStats *StackDepotGetStats();
+u32 StackDepotPut(StackTrace stack);
+StackDepotHandle StackDepotPut_WithHandle(StackTrace stack);
+// Retrieves a stored stack trace by the id.
+StackTrace StackDepotGet(u32 id);
+
+void StackDepotLockAll();
+void StackDepotUnlockAll();
+
+// Instantiating this class creates a snapshot of StackDepot which can be
+// efficiently queried with StackDepotGet(). You can use it concurrently with
+// StackDepot, but the snapshot is only guaranteed to contain those stack traces
+// which were stored before it was instantiated.
+class StackDepotReverseMap {
+ public:
+ StackDepotReverseMap();
+ StackTrace Get(u32 id);
+
+ private:
+ struct IdDescPair {
+ u32 id;
+ StackDepotNode *desc;
+
+ static bool IdComparator(const IdDescPair &a, const IdDescPair &b);
+ };
+
+ InternalMmapVector<IdDescPair> map_;
+
+ // Disallow evil constructors.
+ StackDepotReverseMap(const StackDepotReverseMap&);
+ void operator=(const StackDepotReverseMap&);
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKDEPOT_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepotbase.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepotbase.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepotbase.h (revision 351984)
@@ -0,0 +1,177 @@
+//===-- sanitizer_stackdepotbase.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of a mapping from arbitrary values to unique 32-bit
+// identifiers.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_STACKDEPOTBASE_H
+#define SANITIZER_STACKDEPOTBASE_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_persistent_allocator.h"
+
+namespace __sanitizer {
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+class StackDepotBase {
+ public:
+ typedef typename Node::args_type args_type;
+ typedef typename Node::handle_type handle_type;
+ // Maps stack trace to an unique id.
+ handle_type Put(args_type args, bool *inserted = nullptr);
+ // Retrieves a stored stack trace by the id.
+ args_type Get(u32 id);
+
+ StackDepotStats *GetStats() { return &stats; }
+
+ void LockAll();
+ void UnlockAll();
+
+ private:
+ static Node *find(Node *s, args_type args, u32 hash);
+ static Node *lock(atomic_uintptr_t *p);
+ static void unlock(atomic_uintptr_t *p, Node *s);
+
+ static const int kTabSize = 1 << kTabSizeLog; // Hash table size.
+ static const int kPartBits = 8;
+ static const int kPartShift = sizeof(u32) * 8 - kPartBits - kReservedBits;
+ static const int kPartCount =
+ 1 << kPartBits; // Number of subparts in the table.
+ static const int kPartSize = kTabSize / kPartCount;
+ static const int kMaxId = 1 << kPartShift;
+
+ atomic_uintptr_t tab[kTabSize]; // Hash table of Node's.
+ atomic_uint32_t seq[kPartCount]; // Unique id generators.
+
+ StackDepotStats stats;
+
+ friend class StackDepotReverseMap;
+};
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::find(Node *s,
+ args_type args,
+ u32 hash) {
+ // Searches linked list s for the stack, returns its id.
+ for (; s; s = s->link) {
+ if (s->eq(hash, args)) {
+ return s;
+ }
+ }
+ return nullptr;
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+Node *StackDepotBase<Node, kReservedBits, kTabSizeLog>::lock(
+ atomic_uintptr_t *p) {
+ // Uses the pointer lsb as mutex.
+ for (int i = 0;; i++) {
+ uptr cmp = atomic_load(p, memory_order_relaxed);
+ if ((cmp & 1) == 0 &&
+ atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
+ return (Node *)cmp;
+ if (i < 10)
+ proc_yield(10);
+ else
+ internal_sched_yield();
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::unlock(
+ atomic_uintptr_t *p, Node *s) {
+ DCHECK_EQ((uptr)s & 1, 0);
+ atomic_store(p, (uptr)s, memory_order_release);
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::handle_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Put(args_type args,
+ bool *inserted) {
+ if (inserted) *inserted = false;
+ if (!Node::is_valid(args)) return handle_type();
+ uptr h = Node::hash(args);
+ atomic_uintptr_t *p = &tab[h % kTabSize];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ // First, try to find the existing stack.
+ Node *node = find(s, args, h);
+ if (node) return node->get_handle();
+ // If failed, lock, retry and insert new.
+ Node *s2 = lock(p);
+ if (s2 != s) {
+ node = find(s2, args, h);
+ if (node) {
+ unlock(p, s2);
+ return node->get_handle();
+ }
+ }
+ uptr part = (h % kTabSize) / kPartSize;
+ u32 id = atomic_fetch_add(&seq[part], 1, memory_order_relaxed) + 1;
+ stats.n_uniq_ids++;
+ CHECK_LT(id, kMaxId);
+ id |= part << kPartShift;
+ CHECK_NE(id, 0);
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ uptr memsz = Node::storage_size(args);
+ s = (Node *)PersistentAlloc(memsz);
+ stats.allocated += memsz;
+ s->id = id;
+ s->store(args, h);
+ s->link = s2;
+ unlock(p, s);
+ if (inserted) *inserted = true;
+ return s->get_handle();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+typename StackDepotBase<Node, kReservedBits, kTabSizeLog>::args_type
+StackDepotBase<Node, kReservedBits, kTabSizeLog>::Get(u32 id) {
+ if (id == 0) {
+ return args_type();
+ }
+ CHECK_EQ(id & (((u32)-1) >> kReservedBits), id);
+ // High kPartBits contain part id, so we need to scan at most kPartSize lists.
+ uptr part = id >> kPartShift;
+ for (int i = 0; i != kPartSize; i++) {
+ uptr idx = part * kPartSize + i;
+ CHECK_LT(idx, kTabSize);
+ atomic_uintptr_t *p = &tab[idx];
+ uptr v = atomic_load(p, memory_order_consume);
+ Node *s = (Node *)(v & ~1);
+ for (; s; s = s->link) {
+ if (s->id == id) {
+ return s->load();
+ }
+ }
+ }
+ return args_type();
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::LockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ lock(&tab[i]);
+ }
+}
+
+template <class Node, int kReservedBits, int kTabSizeLog>
+void StackDepotBase<Node, kReservedBits, kTabSizeLog>::UnlockAll() {
+ for (int i = 0; i < kTabSize; ++i) {
+ atomic_uintptr_t *p = &tab[i];
+ uptr s = atomic_load(p, memory_order_relaxed);
+ unlock(p, (Node *)(s & ~1UL));
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKDEPOTBASE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stackdepotbase.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace.cc (revision 351984)
@@ -0,0 +1,133 @@
+//===-- sanitizer_stacktrace.cc -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+uptr StackTrace::GetNextInstructionPc(uptr pc) {
+#if defined(__sparc__) || defined(__mips__)
+ return pc + 8;
+#elif defined(__powerpc__) || defined(__arm__) || defined(__aarch64__)
+ return pc + 4;
+#else
+ return pc + 1;
+#endif
+}
+
+uptr StackTrace::GetCurrentPc() {
+ return GET_CALLER_PC();
+}
+
+void BufferedStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ size = cnt + !!extra_top_pc;
+ CHECK_LE(size, kStackTraceMax);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
+ top_frame_bp = 0;
+}
+
+// Sparc implemention is in its own file.
+#if !defined(__sparc__)
+
+// In GCC on ARM bp points to saved lr, not fp, so we should check the next
+// cell in stack to be a saved frame pointer. GetCanonicFrame returns the
+// pointer to saved frame pointer in any case.
+static inline uhwptr *GetCanonicFrame(uptr bp,
+ uptr stack_top,
+ uptr stack_bottom) {
+ CHECK_GT(stack_top, stack_bottom);
+#ifdef __arm__
+ if (!IsValidFrame(bp, stack_top, stack_bottom)) return 0;
+ uhwptr *bp_prev = (uhwptr *)bp;
+ if (IsValidFrame((uptr)bp_prev[0], stack_top, stack_bottom)) return bp_prev;
+ // The next frame pointer does not look right. This could be a GCC frame, step
+ // back by 1 word and try again.
+ if (IsValidFrame((uptr)bp_prev[-1], stack_top, stack_bottom))
+ return bp_prev - 1;
+ // Nope, this does not look right either. This means the frame after next does
+ // not have a valid frame pointer, but we can still extract the caller PC.
+ // Unfortunately, there is no way to decide between GCC and LLVM frame
+ // layouts. Assume LLVM.
+ return bp_prev;
+#else
+ return (uhwptr*)bp;
+#endif
+}
+
+void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
+ uptr stack_bottom, u32 max_depth) {
+ // TODO(yln): add arg sanity check for stack_top/stack_bottom
+ CHECK_GE(max_depth, 2);
+ const uptr kPageSize = GetPageSizeCached();
+ trace_buffer[0] = pc;
+ size = 1;
+ if (stack_top < 4096) return; // Sanity check for stack top.
+ uhwptr *frame = GetCanonicFrame(bp, stack_top, stack_bottom);
+ // Lowest possible address that makes sense as the next frame pointer.
+ // Goes up as we walk the stack.
+ uptr bottom = stack_bottom;
+ // Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
+ while (IsValidFrame((uptr)frame, stack_top, bottom) &&
+ IsAligned((uptr)frame, sizeof(*frame)) &&
+ size < max_depth) {
+#ifdef __powerpc__
+ // PowerPC ABIs specify that the return address is saved at offset
+ // 16 of the *caller's* stack frame. Thus we must dereference the
+ // back chain to find the caller frame before extracting it.
+ uhwptr *caller_frame = (uhwptr*)frame[0];
+ if (!IsValidFrame((uptr)caller_frame, stack_top, bottom) ||
+ !IsAligned((uptr)caller_frame, sizeof(uhwptr)))
+ break;
+ uhwptr pc1 = caller_frame[2];
+#elif defined(__s390__)
+ uhwptr pc1 = frame[14];
+#else
+ uhwptr pc1 = frame[1];
+#endif
+ // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
+ // x86_64) is invalid and stop unwinding here. If we're adding support for
+ // a platform where this isn't true, we need to reconsider this check.
+ if (pc1 < kPageSize)
+ break;
+ if (pc1 != pc) {
+ trace_buffer[size++] = (uptr) pc1;
+ }
+ bottom = (uptr)frame;
+ frame = GetCanonicFrame((uptr)frame[0], stack_top, bottom);
+ }
+}
+
+#endif // !defined(__sparc__)
+
+void BufferedStackTrace::PopStackFrames(uptr count) {
+ CHECK_LT(count, size);
+ size -= count;
+ for (uptr i = 0; i < size; ++i) {
+ trace_buffer[i] = trace_buffer[i + count];
+ }
+}
+
+static uptr Distance(uptr a, uptr b) { return a < b ? b - a : a - b; }
+
+uptr BufferedStackTrace::LocatePcInTrace(uptr pc) {
+ uptr best = 0;
+ for (uptr i = 1; i < size; ++i) {
+ if (Distance(trace[i], pc) < Distance(trace[best], pc)) best = i;
+ }
+ return best;
+}
+
+} // namespace __sanitizer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace.h (revision 351984)
@@ -0,0 +1,176 @@
+//===-- sanitizer_stacktrace.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKTRACE_H
+#define SANITIZER_STACKTRACE_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+struct BufferedStackTrace;
+
+static const u32 kStackTraceMax = 256;
+
+#if SANITIZER_LINUX && defined(__mips__)
+# define SANITIZER_CAN_FAST_UNWIND 0
+#elif SANITIZER_WINDOWS
+# define SANITIZER_CAN_FAST_UNWIND 0
+#elif SANITIZER_OPENBSD
+# define SANITIZER_CAN_FAST_UNWIND 0
+#else
+# define SANITIZER_CAN_FAST_UNWIND 1
+#endif
+
+// Fast unwind is the only option on Mac for now; we will need to
+// revisit this macro when slow unwind works on Mac, see
+// https://github.com/google/sanitizers/issues/137
+#if SANITIZER_MAC || SANITIZER_OPENBSD || SANITIZER_RTEMS
+# define SANITIZER_CAN_SLOW_UNWIND 0
+#else
+# define SANITIZER_CAN_SLOW_UNWIND 1
+#endif
+
+struct StackTrace {
+ const uptr *trace;
+ u32 size;
+ u32 tag;
+
+ static const int TAG_UNKNOWN = 0;
+ static const int TAG_ALLOC = 1;
+ static const int TAG_DEALLOC = 2;
+ static const int TAG_CUSTOM = 100; // Tool specific tags start here.
+
+ StackTrace() : trace(nullptr), size(0), tag(0) {}
+ StackTrace(const uptr *trace, u32 size) : trace(trace), size(size), tag(0) {}
+ StackTrace(const uptr *trace, u32 size, u32 tag)
+ : trace(trace), size(size), tag(tag) {}
+
+ // Prints a symbolized stacktrace, followed by an empty line.
+ void Print() const;
+
+ static bool WillUseFastUnwind(bool request_fast_unwind) {
+ if (!SANITIZER_CAN_FAST_UNWIND)
+ return false;
+ if (!SANITIZER_CAN_SLOW_UNWIND)
+ return true;
+ return request_fast_unwind;
+ }
+
+ static uptr GetCurrentPc();
+ static inline uptr GetPreviousInstructionPc(uptr pc);
+ static uptr GetNextInstructionPc(uptr pc);
+ typedef bool (*SymbolizeCallback)(const void *pc, char *out_buffer,
+ int out_size);
+};
+
+// Performance-critical, must be in the header.
+ALWAYS_INLINE
+uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
+#if defined(__arm__)
+ // T32 (Thumb) branch instructions might be 16 or 32 bit long,
+ // so we return (pc-2) in that case in order to be safe.
+ // For A32 mode we return (pc-4) because all instructions are 32 bit long.
+ return (pc - 3) & (~1);
+#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__)
+ // PCs are always 4 byte aligned.
+ return pc - 4;
+#elif defined(__sparc__) || defined(__mips__)
+ return pc - 8;
+#else
+ return pc - 1;
+#endif
+}
+
+// StackTrace that owns the buffer used to store the addresses.
+struct BufferedStackTrace : public StackTrace {
+ uptr trace_buffer[kStackTraceMax];
+ uptr top_frame_bp; // Optional bp of a top frame.
+
+ BufferedStackTrace() : StackTrace(trace_buffer, 0), top_frame_bp(0) {}
+
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+
+ // Get the stack trace with the given pc and bp.
+ // The pc will be in the position 0 of the resulting stack trace.
+ // The bp may refer to the current frame or to the caller's frame.
+ void Unwind(uptr pc, uptr bp, void *context, bool request_fast,
+ u32 max_depth = kStackTraceMax) {
+ top_frame_bp = (max_depth > 0) ? bp : 0;
+ // Small max_depth optimization
+ if (max_depth <= 1) {
+ if (max_depth == 1)
+ trace_buffer[0] = pc;
+ size = max_depth;
+ return;
+ }
+ UnwindImpl(pc, bp, context, request_fast, max_depth);
+ }
+
+ void Unwind(u32 max_depth, uptr pc, uptr bp, void *context, uptr stack_top,
+ uptr stack_bottom, bool request_fast_unwind);
+
+ void Reset() {
+ *static_cast<StackTrace *>(this) = StackTrace(trace_buffer, 0);
+ top_frame_bp = 0;
+ }
+
+ private:
+ // Every runtime defines its own implementation of this method
+ void UnwindImpl(uptr pc, uptr bp, void *context, bool request_fast,
+ u32 max_depth);
+
+ // UnwindFast/Slow have platform-specific implementations
+ void UnwindFast(uptr pc, uptr bp, uptr stack_top, uptr stack_bottom,
+ u32 max_depth);
+ void UnwindSlow(uptr pc, u32 max_depth);
+ void UnwindSlow(uptr pc, void *context, u32 max_depth);
+
+ void PopStackFrames(uptr count);
+ uptr LocatePcInTrace(uptr pc);
+
+ BufferedStackTrace(const BufferedStackTrace &) = delete;
+ void operator=(const BufferedStackTrace &) = delete;
+
+ friend class FastUnwindTest;
+};
+
+// Check if given pointer points into allocated stack area.
+static inline bool IsValidFrame(uptr frame, uptr stack_top, uptr stack_bottom) {
+ return frame > stack_bottom && frame < stack_top - 2 * sizeof (uhwptr);
+}
+
+} // namespace __sanitizer
+
+// Use this macro if you want to print stack trace with the caller
+// of the current function in the top frame.
+#define GET_CALLER_PC_BP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = GET_CALLER_PC();
+
+#define GET_CALLER_PC_BP_SP \
+ GET_CALLER_PC_BP; \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
+// Use this macro if you want to print stack trace with the current
+// function in the top frame.
+#define GET_CURRENT_PC_BP \
+ uptr bp = GET_CURRENT_FRAME(); \
+ uptr pc = StackTrace::GetCurrentPc()
+
+#define GET_CURRENT_PC_BP_SP \
+ GET_CURRENT_PC_BP; \
+ uptr local_stack; \
+ uptr sp = (uptr)&local_stack
+
+
+#endif // SANITIZER_STACKTRACE_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc (revision 351984)
@@ -0,0 +1,158 @@
+//===-- sanitizer_stacktrace_libcdep.cc -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_stacktrace_printer.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+void StackTrace::Print() const {
+ if (trace == nullptr || size == 0) {
+ Printf(" <empty stack>\n\n");
+ return;
+ }
+ InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ InternalScopedString dedup_token(GetPageSizeCached());
+ int dedup_frames = common_flags()->dedup_token_length;
+ uptr frame_num = 0;
+ for (uptr i = 0; i < size && trace[i]; i++) {
+ // PCs in stack traces are actually the return addresses, that is,
+ // addresses of the next instructions after the call.
+ uptr pc = GetPreviousInstructionPc(trace[i]);
+ SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ CHECK(frames);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ frame_desc.clear();
+ RenderFrame(&frame_desc, common_flags()->stack_trace_format, frame_num++,
+ cur->info, common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ Printf("%s\n", frame_desc.data());
+ if (dedup_frames-- > 0) {
+ if (dedup_token.length())
+ dedup_token.append("--");
+ if (cur->info.function != nullptr)
+ dedup_token.append(cur->info.function);
+ }
+ }
+ frames->ClearAll();
+ }
+ // Always print a trailing empty line after stack trace.
+ Printf("\n");
+ if (dedup_token.length())
+ Printf("DEDUP_TOKEN: %s\n", dedup_token.data());
+}
+
+void BufferedStackTrace::Unwind(u32 max_depth, uptr pc, uptr bp, void *context,
+ uptr stack_top, uptr stack_bottom,
+ bool request_fast_unwind) {
+ // Ensures all call sites get what they requested.
+ CHECK_EQ(request_fast_unwind, WillUseFastUnwind(request_fast_unwind));
+ top_frame_bp = (max_depth > 0) ? bp : 0;
+ // Avoid doing any work for small max_depth.
+ if (max_depth == 0) {
+ size = 0;
+ return;
+ }
+ if (max_depth == 1) {
+ size = 1;
+ trace_buffer[0] = pc;
+ return;
+ }
+ if (!WillUseFastUnwind(request_fast_unwind)) {
+#if SANITIZER_CAN_SLOW_UNWIND
+ if (context)
+ UnwindSlow(pc, context, max_depth);
+ else
+ UnwindSlow(pc, max_depth);
+#else
+ UNREACHABLE("slow unwind requested but not available");
+#endif
+ } else {
+ UnwindFast(pc, bp, stack_top, stack_bottom, max_depth);
+ }
+}
+
+static int GetModuleAndOffsetForPc(uptr pc, char *module_name,
+ uptr module_name_len, uptr *pc_offset) {
+ const char *found_module_name = nullptr;
+ bool ok = Symbolizer::GetOrInit()->GetModuleNameAndOffsetForPC(
+ pc, &found_module_name, pc_offset);
+
+ if (!ok) return false;
+
+ if (module_name && module_name_len) {
+ internal_strncpy(module_name, found_module_name, module_name_len);
+ module_name[module_name_len - 1] = '\x00';
+ }
+ return true;
+}
+
+} // namespace __sanitizer
+using namespace __sanitizer;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_symbolize_pc(uptr pc, const char *fmt, char *out_buf,
+ uptr out_buf_size) {
+ if (!out_buf_size) return;
+ pc = StackTrace::GetPreviousInstructionPc(pc);
+ SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ if (!frame) {
+ internal_strncpy(out_buf, "<can't symbolize>", out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+ return;
+ }
+ InternalScopedString frame_desc(GetPageSizeCached());
+ uptr frame_num = 0;
+ // Reserve one byte for the final 0.
+ char *out_end = out_buf + out_buf_size - 1;
+ for (SymbolizedStack *cur = frame; cur && out_buf < out_end;
+ cur = cur->next) {
+ frame_desc.clear();
+ RenderFrame(&frame_desc, fmt, frame_num++, cur->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ if (!frame_desc.length())
+ continue;
+ // Reserve one byte for the terminating 0.
+ uptr n = out_end - out_buf - 1;
+ internal_strncpy(out_buf, frame_desc.data(), n);
+ out_buf += __sanitizer::Min<uptr>(n, frame_desc.length());
+ *out_buf++ = 0;
+ }
+ CHECK(out_buf <= out_end);
+ *out_buf = 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_symbolize_global(uptr data_addr, const char *fmt,
+ char *out_buf, uptr out_buf_size) {
+ if (!out_buf_size) return;
+ out_buf[0] = 0;
+ DataInfo DI;
+ if (!Symbolizer::GetOrInit()->SymbolizeData(data_addr, &DI)) return;
+ InternalScopedString data_desc(GetPageSizeCached());
+ RenderData(&data_desc, fmt, &DI, common_flags()->strip_path_prefix);
+ internal_strncpy(out_buf, data_desc.data(), out_buf_size);
+ out_buf[out_buf_size - 1] = 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_module_and_offset_for_pc( // NOLINT
+ uptr pc, char *module_name, uptr module_name_len, uptr *pc_offset) {
+ return __sanitizer::GetModuleAndOffsetForPc(pc, module_name, module_name_len,
+ pc_offset);
+}
+} // extern "C"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_libcdep.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_printer.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_printer.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_printer.cc (revision 351984)
@@ -0,0 +1,263 @@
+//===-- sanitizer_common.cc -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers' run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_stacktrace_printer.h"
+#include "sanitizer_file.h"
+#include "sanitizer_fuchsia.h"
+
+namespace __sanitizer {
+
+// sanitizer_symbolizer_markup.cc implements these differently.
+#if !SANITIZER_SYMBOLIZER_MARKUP
+
+static const char *StripFunctionName(const char *function, const char *prefix) {
+ if (!function) return nullptr;
+ if (!prefix) return function;
+ uptr prefix_len = internal_strlen(prefix);
+ if (0 == internal_strncmp(function, prefix, prefix_len))
+ return function + prefix_len;
+ return function;
+}
+
+static const char *DemangleFunctionName(const char *function) {
+ if (!function) return nullptr;
+
+ // NetBSD uses indirection for old threading functions for historical reasons
+ // The mangled names are internal implementation detail and should not be
+ // exposed even in backtraces.
+#if SANITIZER_NETBSD
+ if (!internal_strcmp(function, "__libc_mutex_init"))
+ return "pthread_mutex_init";
+ if (!internal_strcmp(function, "__libc_mutex_lock"))
+ return "pthread_mutex_lock";
+ if (!internal_strcmp(function, "__libc_mutex_trylock"))
+ return "pthread_mutex_trylock";
+ if (!internal_strcmp(function, "__libc_mutex_unlock"))
+ return "pthread_mutex_unlock";
+ if (!internal_strcmp(function, "__libc_mutex_destroy"))
+ return "pthread_mutex_destroy";
+ if (!internal_strcmp(function, "__libc_mutexattr_init"))
+ return "pthread_mutexattr_init";
+ if (!internal_strcmp(function, "__libc_mutexattr_settype"))
+ return "pthread_mutexattr_settype";
+ if (!internal_strcmp(function, "__libc_mutexattr_destroy"))
+ return "pthread_mutexattr_destroy";
+ if (!internal_strcmp(function, "__libc_cond_init"))
+ return "pthread_cond_init";
+ if (!internal_strcmp(function, "__libc_cond_signal"))
+ return "pthread_cond_signal";
+ if (!internal_strcmp(function, "__libc_cond_broadcast"))
+ return "pthread_cond_broadcast";
+ if (!internal_strcmp(function, "__libc_cond_wait"))
+ return "pthread_cond_wait";
+ if (!internal_strcmp(function, "__libc_cond_timedwait"))
+ return "pthread_cond_timedwait";
+ if (!internal_strcmp(function, "__libc_cond_destroy"))
+ return "pthread_cond_destroy";
+ if (!internal_strcmp(function, "__libc_rwlock_init"))
+ return "pthread_rwlock_init";
+ if (!internal_strcmp(function, "__libc_rwlock_rdlock"))
+ return "pthread_rwlock_rdlock";
+ if (!internal_strcmp(function, "__libc_rwlock_wrlock"))
+ return "pthread_rwlock_wrlock";
+ if (!internal_strcmp(function, "__libc_rwlock_tryrdlock"))
+ return "pthread_rwlock_tryrdlock";
+ if (!internal_strcmp(function, "__libc_rwlock_trywrlock"))
+ return "pthread_rwlock_trywrlock";
+ if (!internal_strcmp(function, "__libc_rwlock_unlock"))
+ return "pthread_rwlock_unlock";
+ if (!internal_strcmp(function, "__libc_rwlock_destroy"))
+ return "pthread_rwlock_destroy";
+ if (!internal_strcmp(function, "__libc_thr_keycreate"))
+ return "pthread_key_create";
+ if (!internal_strcmp(function, "__libc_thr_setspecific"))
+ return "pthread_setspecific";
+ if (!internal_strcmp(function, "__libc_thr_getspecific"))
+ return "pthread_getspecific";
+ if (!internal_strcmp(function, "__libc_thr_keydelete"))
+ return "pthread_key_delete";
+ if (!internal_strcmp(function, "__libc_thr_once"))
+ return "pthread_once";
+ if (!internal_strcmp(function, "__libc_thr_self"))
+ return "pthread_self";
+ if (!internal_strcmp(function, "__libc_thr_exit"))
+ return "pthread_exit";
+ if (!internal_strcmp(function, "__libc_thr_setcancelstate"))
+ return "pthread_setcancelstate";
+ if (!internal_strcmp(function, "__libc_thr_equal"))
+ return "pthread_equal";
+ if (!internal_strcmp(function, "__libc_thr_curcpu"))
+ return "pthread_curcpu_np";
+ if (!internal_strcmp(function, "__libc_thr_sigsetmask"))
+ return "pthread_sigmask";
+#endif
+
+ return function;
+}
+
+static const char kDefaultFormat[] = " #%n %p %F %L";
+
+void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
+ const AddressInfo &info, bool vs_style,
+ const char *strip_path_prefix, const char *strip_func_prefix) {
+ if (0 == internal_strcmp(format, "DEFAULT"))
+ format = kDefaultFormat;
+ for (const char *p = format; *p != '\0'; p++) {
+ if (*p != '%') {
+ buffer->append("%c", *p);
+ continue;
+ }
+ p++;
+ switch (*p) {
+ case '%':
+ buffer->append("%%");
+ break;
+ // Frame number and all fields of AddressInfo structure.
+ case 'n':
+ buffer->append("%zu", frame_no);
+ break;
+ case 'p':
+ buffer->append("0x%zx", info.address);
+ break;
+ case 'm':
+ buffer->append("%s", StripPathPrefix(info.module, strip_path_prefix));
+ break;
+ case 'o':
+ buffer->append("0x%zx", info.module_offset);
+ break;
+ case 'f':
+ buffer->append("%s",
+ DemangleFunctionName(
+ StripFunctionName(info.function, strip_func_prefix)));
+ break;
+ case 'q':
+ buffer->append("0x%zx", info.function_offset != AddressInfo::kUnknown
+ ? info.function_offset
+ : 0x0);
+ break;
+ case 's':
+ buffer->append("%s", StripPathPrefix(info.file, strip_path_prefix));
+ break;
+ case 'l':
+ buffer->append("%d", info.line);
+ break;
+ case 'c':
+ buffer->append("%d", info.column);
+ break;
+ // Smarter special cases.
+ case 'F':
+ // Function name and offset, if file is unknown.
+ if (info.function) {
+ buffer->append("in %s",
+ DemangleFunctionName(
+ StripFunctionName(info.function, strip_func_prefix)));
+ if (!info.file && info.function_offset != AddressInfo::kUnknown)
+ buffer->append("+0x%zx", info.function_offset);
+ }
+ break;
+ case 'S':
+ // File/line information.
+ RenderSourceLocation(buffer, info.file, info.line, info.column, vs_style,
+ strip_path_prefix);
+ break;
+ case 'L':
+ // Source location, or module location.
+ if (info.file) {
+ RenderSourceLocation(buffer, info.file, info.line, info.column,
+ vs_style, strip_path_prefix);
+ } else if (info.module) {
+ RenderModuleLocation(buffer, info.module, info.module_offset,
+ info.module_arch, strip_path_prefix);
+ } else {
+ buffer->append("(<unknown module>)");
+ }
+ break;
+ case 'M':
+ // Module basename and offset, or PC.
+ if (info.address & kExternalPCBit)
+ {} // There PCs are not meaningful.
+ else if (info.module)
+ // Always strip the module name for %M.
+ RenderModuleLocation(buffer, StripModuleName(info.module),
+ info.module_offset, info.module_arch, "");
+ else
+ buffer->append("(%p)", (void *)info.address);
+ break;
+ default:
+ Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
+ *p);
+ Die();
+ }
+ }
+}
+
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix) {
+ for (const char *p = format; *p != '\0'; p++) {
+ if (*p != '%') {
+ buffer->append("%c", *p);
+ continue;
+ }
+ p++;
+ switch (*p) {
+ case '%':
+ buffer->append("%%");
+ break;
+ case 's':
+ buffer->append("%s", StripPathPrefix(DI->file, strip_path_prefix));
+ break;
+ case 'l':
+ buffer->append("%d", DI->line);
+ break;
+ case 'g':
+ buffer->append("%s", DI->name);
+ break;
+ default:
+ Report("Unsupported specifier in stack frame format: %c (0x%zx)!\n", *p,
+ *p);
+ Die();
+ }
+ }
+}
+
+#endif // !SANITIZER_SYMBOLIZER_MARKUP
+
+void RenderSourceLocation(InternalScopedString *buffer, const char *file,
+ int line, int column, bool vs_style,
+ const char *strip_path_prefix) {
+ if (vs_style && line > 0) {
+ buffer->append("%s(%d", StripPathPrefix(file, strip_path_prefix), line);
+ if (column > 0)
+ buffer->append(",%d", column);
+ buffer->append(")");
+ return;
+ }
+
+ buffer->append("%s", StripPathPrefix(file, strip_path_prefix));
+ if (line > 0) {
+ buffer->append(":%d", line);
+ if (column > 0)
+ buffer->append(":%d", column);
+ }
+}
+
+void RenderModuleLocation(InternalScopedString *buffer, const char *module,
+ uptr offset, ModuleArch arch,
+ const char *strip_path_prefix) {
+ buffer->append("(%s", StripPathPrefix(module, strip_path_prefix));
+ if (arch != kModuleArchUnknown) {
+ buffer->append(":%s", ModuleArchToString(arch));
+ }
+ buffer->append("+0x%zx)", offset);
+}
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_printer.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_printer.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_printer.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_printer.h (revision 351984)
@@ -0,0 +1,71 @@
+//===-- sanitizer_stacktrace_printer.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers' run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STACKTRACE_PRINTER_H
+#define SANITIZER_STACKTRACE_PRINTER_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_symbolizer.h"
+
+namespace __sanitizer {
+
+// Render the contents of "info" structure, which represents the contents of
+// stack frame "frame_no" and appends it to the "buffer". "format" is a
+// string with placeholders, which is copied to the output with
+// placeholders substituted with the contents of "info". For example,
+// format string
+// " frame %n: function %F at %S"
+// will be turned into
+// " frame 10: function foo::bar() at my/file.cc:10"
+// You may additionally pass "strip_path_prefix" to strip prefixes of paths to
+// source files and modules, and "strip_func_prefix" to strip prefixes of
+// function names.
+// Here's the full list of available placeholders:
+// %% - represents a '%' character;
+// %n - frame number (copy of frame_no);
+// %p - PC in hex format;
+// %m - path to module (binary or shared object);
+// %o - offset in the module in hex format;
+// %f - function name;
+// %q - offset in the function in hex format (*if available*);
+// %s - path to source file;
+// %l - line in the source file;
+// %c - column in the source file;
+// %F - if function is known to be <foo>, prints "in <foo>", possibly
+// followed by the offset in this function, but only if source file
+// is unknown;
+// %S - prints file/line/column information;
+// %L - prints location information: file/line/column, if it is known, or
+// module+offset if it is known, or (<unknown module>) string.
+// %M - prints module basename and offset, if it is known, or PC.
+void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
+ const AddressInfo &info, bool vs_style,
+ const char *strip_path_prefix = "",
+ const char *strip_func_prefix = "");
+
+void RenderSourceLocation(InternalScopedString *buffer, const char *file,
+ int line, int column, bool vs_style,
+ const char *strip_path_prefix);
+
+void RenderModuleLocation(InternalScopedString *buffer, const char *module,
+ uptr offset, ModuleArch arch,
+ const char *strip_path_prefix);
+
+// Same as RenderFrame, but for data section (global variables).
+// Accepts %s, %l from above.
+// Also accepts:
+// %g - name of the global variable.
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix = "");
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STACKTRACE_PRINTER_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_printer.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc (revision 351984)
@@ -0,0 +1,85 @@
+//===-- sanitizer_stacktrace_sparc.cc -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//
+// Implemention of fast stack unwinding for Sparc.
+//===----------------------------------------------------------------------===//
+
+#if defined(__sparc__)
+
+#if defined(__arch64__) || defined(__sparcv9)
+#define STACK_BIAS 2047
+#else
+#define STACK_BIAS 0
+#endif
+
+#include "sanitizer_common.h"
+#include "sanitizer_stacktrace.h"
+
+namespace __sanitizer {
+
+void BufferedStackTrace::UnwindFast(uptr pc, uptr bp, uptr stack_top,
+ uptr stack_bottom, u32 max_depth) {
+ // TODO(yln): add arg sanity check for stack_top/stack_bottom
+ CHECK_GE(max_depth, 2);
+ const uptr kPageSize = GetPageSizeCached();
+#if defined(__GNUC__)
+ // __builtin_return_address returns the address of the call instruction
+ // on the SPARC and not the return address, so we need to compensate.
+ trace_buffer[0] = GetNextInstructionPc(pc);
+#else
+ trace_buffer[0] = pc;
+#endif
+ size = 1;
+ if (stack_top < 4096) return; // Sanity check for stack top.
+ // Flush register windows to memory
+#if defined(__sparc_v9__) || defined(__sparcv9__) || defined(__sparcv9)
+ asm volatile("flushw" ::: "memory");
+#else
+ asm volatile("ta 3" ::: "memory");
+#endif
+ // On the SPARC, the return address is not in the frame, it is in a
+ // register. There is no way to access it off of the current frame
+ // pointer, but it can be accessed off the previous frame pointer by
+ // reading the value from the register window save area.
+ uptr prev_bp = GET_CURRENT_FRAME();
+ uptr next_bp = prev_bp;
+ unsigned int i = 0;
+ while (next_bp != bp && IsAligned(next_bp, sizeof(uhwptr)) && i++ < 8) {
+ prev_bp = next_bp;
+ next_bp = (uptr)((uhwptr *)next_bp)[14] + STACK_BIAS;
+ }
+ if (next_bp == bp)
+ bp = prev_bp;
+ // Lowest possible address that makes sense as the next frame pointer.
+ // Goes up as we walk the stack.
+ uptr bottom = stack_bottom;
+ // Avoid infinite loop when frame == frame[0] by using frame > prev_frame.
+ while (IsValidFrame(bp, stack_top, bottom) && IsAligned(bp, sizeof(uhwptr)) &&
+ size < max_depth) {
+ uhwptr pc1 = ((uhwptr *)bp)[15];
+ // Let's assume that any pointer in the 0th page is invalid and
+ // stop unwinding here. If we're adding support for a platform
+ // where this isn't true, we need to reconsider this check.
+ if (pc1 < kPageSize)
+ break;
+ if (pc1 != pc) {
+ // %o7 contains the address of the call instruction and not the
+ // return address, so we need to compensate.
+ trace_buffer[size++] = GetNextInstructionPc((uptr)pc1);
+ }
+ bottom = bp;
+ bp = (uptr)((uhwptr *)bp)[14] + STACK_BIAS;
+ }
+}
+
+} // namespace __sanitizer
+
+#endif // !defined(__sparc__)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stacktrace_sparc.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld.h (revision 351984)
@@ -0,0 +1,64 @@
+//===-- sanitizer_stoptheworld.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the StopTheWorld function which suspends the execution of the current
+// process and runs the user-supplied callback in the same address space.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_STOPTHEWORLD_H
+#define SANITIZER_STOPTHEWORLD_H
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+enum PtraceRegistersStatus {
+ REGISTERS_UNAVAILABLE_FATAL = -1,
+ REGISTERS_UNAVAILABLE = 0,
+ REGISTERS_AVAILABLE = 1
+};
+
+// Holds the list of suspended threads and provides an interface to dump their
+// register contexts.
+class SuspendedThreadsList {
+ public:
+ SuspendedThreadsList() = default;
+
+ // Can't declare pure virtual functions in sanitizer runtimes:
+ // __cxa_pure_virtual might be unavailable. Use UNIMPLEMENTED() instead.
+ virtual PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const {
+ UNIMPLEMENTED();
+ }
+
+ // The buffer in GetRegistersAndSP should be at least this big.
+ virtual uptr RegisterCount() const { UNIMPLEMENTED(); }
+ virtual uptr ThreadCount() const { UNIMPLEMENTED(); }
+ virtual tid_t GetThreadID(uptr index) const { UNIMPLEMENTED(); }
+
+ private:
+ // Prohibit copy and assign.
+ SuspendedThreadsList(const SuspendedThreadsList&);
+ void operator=(const SuspendedThreadsList&);
+};
+
+typedef void (*StopTheWorldCallback)(
+ const SuspendedThreadsList &suspended_threads_list,
+ void *argument);
+
+// Suspend all threads in the current process and run the callback on the list
+// of suspended threads. This function will resume the threads before returning.
+// The callback should not call any libc functions. The callback must not call
+// exit() nor _exit() and instead return to the caller.
+// This function should NOT be called from multiple threads simultaneously.
+void StopTheWorld(StopTheWorldCallback callback, void *argument);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_STOPTHEWORLD_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_linux_libcdep.cc (revision 351984)
@@ -0,0 +1,572 @@
+//===-- sanitizer_stoptheworld_linux_libcdep.cc ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__) || \
+ defined(__aarch64__) || defined(__powerpc64__) || \
+ defined(__s390__) || defined(__i386__) || \
+ defined(__arm__))
+
+#include "sanitizer_stoptheworld.h"
+
+#include "sanitizer_platform_limits_posix.h"
+#include "sanitizer_atomic.h"
+
+#include <errno.h>
+#include <sched.h> // for CLONE_* definitions
+#include <stddef.h>
+#include <sys/prctl.h> // for PR_* definitions
+#include <sys/ptrace.h> // for PTRACE_* definitions
+#include <sys/types.h> // for pid_t
+#include <sys/uio.h> // for iovec
+#include <elf.h> // for NT_PRSTATUS
+#if defined(__aarch64__) && !SANITIZER_ANDROID
+// GLIBC 2.20+ sys/user does not include asm/ptrace.h
+# include <asm/ptrace.h>
+#endif
+#include <sys/user.h> // for user_regs_struct
+#if SANITIZER_ANDROID && SANITIZER_MIPS
+# include <asm/reg.h> // for mips SP register in sys/user.h
+#endif
+#include <sys/wait.h> // for signal-related stuff
+
+#ifdef sa_handler
+# undef sa_handler
+#endif
+
+#ifdef sa_sigaction
+# undef sa_sigaction
+#endif
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+
+// Sufficiently old kernel headers don't provide this value, but we can still
+// call prctl with it. If the runtime kernel is new enough, the prctl call will
+// have the desired effect; if the kernel is too old, the call will error and we
+// can ignore said error.
+#ifndef PR_SET_PTRACER
+#define PR_SET_PTRACER 0x59616d61
+#endif
+
+// This module works by spawning a Linux task which then attaches to every
+// thread in the caller process with ptrace. This suspends the threads, and
+// PTRACE_GETREGS can then be used to obtain their register state. The callback
+// supplied to StopTheWorld() is run in the tracer task while the threads are
+// suspended.
+// The tracer task must be placed in a different thread group for ptrace to
+// work, so it cannot be spawned as a pthread. Instead, we use the low-level
+// clone() interface (we want to share the address space with the caller
+// process, so we prefer clone() over fork()).
+//
+// We don't use any libc functions, relying instead on direct syscalls. There
+// are two reasons for this:
+// 1. calling a library function while threads are suspended could cause a
+// deadlock, if one of the treads happens to be holding a libc lock;
+// 2. it's generally not safe to call libc functions from the tracer task,
+// because clone() does not set up a thread-local storage for it. Any
+// thread-local variables used by libc will be shared between the tracer task
+// and the thread which spawned it.
+
+namespace __sanitizer {
+
+class SuspendedThreadsListLinux : public SuspendedThreadsList {
+ public:
+ SuspendedThreadsListLinux() { thread_ids_.reserve(1024); }
+
+ tid_t GetThreadID(uptr index) const;
+ uptr ThreadCount() const;
+ bool ContainsTid(tid_t thread_id) const;
+ void Append(tid_t tid);
+
+ PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const;
+ uptr RegisterCount() const;
+
+ private:
+ InternalMmapVector<tid_t> thread_ids_;
+};
+
+// Structure for passing arguments into the tracer thread.
+struct TracerThreadArgument {
+ StopTheWorldCallback callback;
+ void *callback_argument;
+ // The tracer thread waits on this mutex while the parent finishes its
+ // preparations.
+ BlockingMutex mutex;
+ // Tracer thread signals its completion by setting done.
+ atomic_uintptr_t done;
+ uptr parent_pid;
+};
+
+// This class handles thread suspending/unsuspending in the tracer thread.
+class ThreadSuspender {
+ public:
+ explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
+ : arg(arg)
+ , pid_(pid) {
+ CHECK_GE(pid, 0);
+ }
+ bool SuspendAllThreads();
+ void ResumeAllThreads();
+ void KillAllThreads();
+ SuspendedThreadsListLinux &suspended_threads_list() {
+ return suspended_threads_list_;
+ }
+ TracerThreadArgument *arg;
+ private:
+ SuspendedThreadsListLinux suspended_threads_list_;
+ pid_t pid_;
+ bool SuspendThread(tid_t thread_id);
+};
+
+bool ThreadSuspender::SuspendThread(tid_t tid) {
+ // Are we already attached to this thread?
+ // Currently this check takes linear time, however the number of threads is
+ // usually small.
+ if (suspended_threads_list_.ContainsTid(tid)) return false;
+ int pterrno;
+ if (internal_iserror(internal_ptrace(PTRACE_ATTACH, tid, nullptr, nullptr),
+ &pterrno)) {
+ // Either the thread is dead, or something prevented us from attaching.
+ // Log this event and move on.
+ VReport(1, "Could not attach to thread %zu (errno %d).\n", (uptr)tid,
+ pterrno);
+ return false;
+ } else {
+ VReport(2, "Attached to thread %zu.\n", (uptr)tid);
+ // The thread is not guaranteed to stop before ptrace returns, so we must
+ // wait on it. Note: if the thread receives a signal concurrently,
+ // we can get notification about the signal before notification about stop.
+ // In such case we need to forward the signal to the thread, otherwise
+ // the signal will be missed (as we do PTRACE_DETACH with arg=0) and
+ // any logic relying on signals will break. After forwarding we need to
+ // continue to wait for stopping, because the thread is not stopped yet.
+ // We do ignore delivery of SIGSTOP, because we want to make stop-the-world
+ // as invisible as possible.
+ for (;;) {
+ int status;
+ uptr waitpid_status;
+ HANDLE_EINTR(waitpid_status, internal_waitpid(tid, &status, __WALL));
+ int wperrno;
+ if (internal_iserror(waitpid_status, &wperrno)) {
+ // Got a ECHILD error. I don't think this situation is possible, but it
+ // doesn't hurt to report it.
+ VReport(1, "Waiting on thread %zu failed, detaching (errno %d).\n",
+ (uptr)tid, wperrno);
+ internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr);
+ return false;
+ }
+ if (WIFSTOPPED(status) && WSTOPSIG(status) != SIGSTOP) {
+ internal_ptrace(PTRACE_CONT, tid, nullptr,
+ (void*)(uptr)WSTOPSIG(status));
+ continue;
+ }
+ break;
+ }
+ suspended_threads_list_.Append(tid);
+ return true;
+ }
+}
+
+void ThreadSuspender::ResumeAllThreads() {
+ for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++) {
+ pid_t tid = suspended_threads_list_.GetThreadID(i);
+ int pterrno;
+ if (!internal_iserror(internal_ptrace(PTRACE_DETACH, tid, nullptr, nullptr),
+ &pterrno)) {
+ VReport(2, "Detached from thread %d.\n", tid);
+ } else {
+ // Either the thread is dead, or we are already detached.
+ // The latter case is possible, for instance, if this function was called
+ // from a signal handler.
+ VReport(1, "Could not detach from thread %d (errno %d).\n", tid, pterrno);
+ }
+ }
+}
+
+void ThreadSuspender::KillAllThreads() {
+ for (uptr i = 0; i < suspended_threads_list_.ThreadCount(); i++)
+ internal_ptrace(PTRACE_KILL, suspended_threads_list_.GetThreadID(i),
+ nullptr, nullptr);
+}
+
+bool ThreadSuspender::SuspendAllThreads() {
+ ThreadLister thread_lister(pid_);
+ bool retry = true;
+ InternalMmapVector<tid_t> threads;
+ threads.reserve(128);
+ for (int i = 0; i < 30 && retry; ++i) {
+ retry = false;
+ switch (thread_lister.ListThreads(&threads)) {
+ case ThreadLister::Error:
+ ResumeAllThreads();
+ return false;
+ case ThreadLister::Incomplete:
+ retry = true;
+ break;
+ case ThreadLister::Ok:
+ break;
+ }
+ for (tid_t tid : threads)
+ if (SuspendThread(tid))
+ retry = true;
+ };
+ return suspended_threads_list_.ThreadCount();
+}
+
+// Pointer to the ThreadSuspender instance for use in signal handler.
+static ThreadSuspender *thread_suspender_instance = nullptr;
+
+// Synchronous signals that should not be blocked.
+static const int kSyncSignals[] = { SIGABRT, SIGILL, SIGFPE, SIGSEGV, SIGBUS,
+ SIGXCPU, SIGXFSZ };
+
+static void TracerThreadDieCallback() {
+ // Generally a call to Die() in the tracer thread should be fatal to the
+ // parent process as well, because they share the address space.
+ // This really only works correctly if all the threads are suspended at this
+ // point. So we correctly handle calls to Die() from within the callback, but
+ // not those that happen before or after the callback. Hopefully there aren't
+ // a lot of opportunities for that to happen...
+ ThreadSuspender *inst = thread_suspender_instance;
+ if (inst && stoptheworld_tracer_pid == internal_getpid()) {
+ inst->KillAllThreads();
+ thread_suspender_instance = nullptr;
+ }
+}
+
+// Signal handler to wake up suspended threads when the tracer thread dies.
+static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
+ void *uctx) {
+ SignalContext ctx(siginfo, uctx);
+ Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
+ ctx.addr, ctx.pc, ctx.sp);
+ ThreadSuspender *inst = thread_suspender_instance;
+ if (inst) {
+ if (signum == SIGABRT)
+ inst->KillAllThreads();
+ else
+ inst->ResumeAllThreads();
+ RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
+ thread_suspender_instance = nullptr;
+ atomic_store(&inst->arg->done, 1, memory_order_relaxed);
+ }
+ internal__exit((signum == SIGABRT) ? 1 : 2);
+}
+
+// Size of alternative stack for signal handlers in the tracer thread.
+static const int kHandlerStackSize = 8192;
+
+// This function will be run as a cloned task.
+static int TracerThread(void* argument) {
+ TracerThreadArgument *tracer_thread_argument =
+ (TracerThreadArgument *)argument;
+
+ internal_prctl(PR_SET_PDEATHSIG, SIGKILL, 0, 0, 0);
+ // Check if parent is already dead.
+ if (internal_getppid() != tracer_thread_argument->parent_pid)
+ internal__exit(4);
+
+ // Wait for the parent thread to finish preparations.
+ tracer_thread_argument->mutex.Lock();
+ tracer_thread_argument->mutex.Unlock();
+
+ RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
+
+ ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
+ // Global pointer for the signal handler.
+ thread_suspender_instance = &thread_suspender;
+
+ // Alternate stack for signal handling.
+ InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);
+ stack_t handler_stack;
+ internal_memset(&handler_stack, 0, sizeof(handler_stack));
+ handler_stack.ss_sp = handler_stack_memory.data();
+ handler_stack.ss_size = kHandlerStackSize;
+ internal_sigaltstack(&handler_stack, nullptr);
+
+ // Install our handler for synchronous signals. Other signals should be
+ // blocked by the mask we inherited from the parent thread.
+ for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
+ __sanitizer_sigaction act;
+ internal_memset(&act, 0, sizeof(act));
+ act.sigaction = TracerThreadSignalHandler;
+ act.sa_flags = SA_ONSTACK | SA_SIGINFO;
+ internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
+ }
+
+ int exit_code = 0;
+ if (!thread_suspender.SuspendAllThreads()) {
+ VReport(1, "Failed suspending threads.\n");
+ exit_code = 3;
+ } else {
+ tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
+ tracer_thread_argument->callback_argument);
+ thread_suspender.ResumeAllThreads();
+ exit_code = 0;
+ }
+ RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
+ thread_suspender_instance = nullptr;
+ atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
+ return exit_code;
+}
+
+class ScopedStackSpaceWithGuard {
+ public:
+ explicit ScopedStackSpaceWithGuard(uptr stack_size) {
+ stack_size_ = stack_size;
+ guard_size_ = GetPageSizeCached();
+ // FIXME: Omitting MAP_STACK here works in current kernels but might break
+ // in the future.
+ guard_start_ = (uptr)MmapOrDie(stack_size_ + guard_size_,
+ "ScopedStackWithGuard");
+ CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
+ }
+ ~ScopedStackSpaceWithGuard() {
+ UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
+ }
+ void *Bottom() const {
+ return (void *)(guard_start_ + stack_size_ + guard_size_);
+ }
+
+ private:
+ uptr stack_size_;
+ uptr guard_size_;
+ uptr guard_start_;
+};
+
+// We have a limitation on the stack frame size, so some stuff had to be moved
+// into globals.
+static __sanitizer_sigset_t blocked_sigset;
+static __sanitizer_sigset_t old_sigset;
+
+class StopTheWorldScope {
+ public:
+ StopTheWorldScope() {
+ // Make this process dumpable. Processes that are not dumpable cannot be
+ // attached to.
+ process_was_dumpable_ = internal_prctl(PR_GET_DUMPABLE, 0, 0, 0, 0);
+ if (!process_was_dumpable_)
+ internal_prctl(PR_SET_DUMPABLE, 1, 0, 0, 0);
+ }
+
+ ~StopTheWorldScope() {
+ // Restore the dumpable flag.
+ if (!process_was_dumpable_)
+ internal_prctl(PR_SET_DUMPABLE, 0, 0, 0, 0);
+ }
+
+ private:
+ int process_was_dumpable_;
+};
+
+// When sanitizer output is being redirected to file (i.e. by using log_path),
+// the tracer should write to the parent's log instead of trying to open a new
+// file. Alert the logging code to the fact that we have a tracer.
+struct ScopedSetTracerPID {
+ explicit ScopedSetTracerPID(uptr tracer_pid) {
+ stoptheworld_tracer_pid = tracer_pid;
+ stoptheworld_tracer_ppid = internal_getpid();
+ }
+ ~ScopedSetTracerPID() {
+ stoptheworld_tracer_pid = 0;
+ stoptheworld_tracer_ppid = 0;
+ }
+};
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ StopTheWorldScope in_stoptheworld;
+ // Prepare the arguments for TracerThread.
+ struct TracerThreadArgument tracer_thread_argument;
+ tracer_thread_argument.callback = callback;
+ tracer_thread_argument.callback_argument = argument;
+ tracer_thread_argument.parent_pid = internal_getpid();
+ atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
+ const uptr kTracerStackSize = 2 * 1024 * 1024;
+ ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
+ // Block the execution of TracerThread until after we have set ptrace
+ // permissions.
+ tracer_thread_argument.mutex.Lock();
+ // Signal handling story.
+ // We don't want async signals to be delivered to the tracer thread,
+ // so we block all async signals before creating the thread. An async signal
+ // handler can temporary modify errno, which is shared with this thread.
+ // We ought to use pthread_sigmask here, because sigprocmask has undefined
+ // behavior in multithreaded programs. However, on linux sigprocmask is
+ // equivalent to pthread_sigmask with the exception that pthread_sigmask
+ // does not allow to block some signals used internally in pthread
+ // implementation. We are fine with blocking them here, we are really not
+ // going to pthread_cancel the thread.
+ // The tracer thread should not raise any synchronous signals. But in case it
+ // does, we setup a special handler for sync signals that properly kills the
+ // parent as well. Note: we don't pass CLONE_SIGHAND to clone, so handlers
+ // in the tracer thread won't interfere with user program. Double note: if a
+ // user does something along the lines of 'kill -11 pid', that can kill the
+ // process even if user setup own handler for SEGV.
+ // Thing to watch out for: this code should not change behavior of user code
+ // in any observable way. In particular it should not override user signal
+ // handlers.
+ internal_sigfillset(&blocked_sigset);
+ for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
+ internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
+ int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
+ CHECK_EQ(rv, 0);
+ uptr tracer_pid = internal_clone(
+ TracerThread, tracer_stack.Bottom(),
+ CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_UNTRACED,
+ &tracer_thread_argument, nullptr /* parent_tidptr */,
+ nullptr /* newtls */, nullptr /* child_tidptr */);
+ internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
+ int local_errno = 0;
+ if (internal_iserror(tracer_pid, &local_errno)) {
+ VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
+ tracer_thread_argument.mutex.Unlock();
+ } else {
+ ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
+ // On some systems we have to explicitly declare that we want to be traced
+ // by the tracer thread.
+ internal_prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0);
+ // Allow the tracer thread to start.
+ tracer_thread_argument.mutex.Unlock();
+ // NOTE: errno is shared between this thread and the tracer thread.
+ // internal_waitpid() may call syscall() which can access/spoil errno,
+ // so we can't call it now. Instead we for the tracer thread to finish using
+ // the spin loop below. Man page for sched_yield() says "In the Linux
+ // implementation, sched_yield() always succeeds", so let's hope it does not
+ // spoil errno. Note that this spin loop runs only for brief periods before
+ // the tracer thread has suspended us and when it starts unblocking threads.
+ while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
+ sched_yield();
+ // Now the tracer thread is about to exit and does not touch errno,
+ // wait for it.
+ for (;;) {
+ uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
+ if (!internal_iserror(waitpid_status, &local_errno))
+ break;
+ if (local_errno == EINTR)
+ continue;
+ VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
+ local_errno);
+ break;
+ }
+ }
+}
+
+// Platform-specific methods from SuspendedThreadsList.
+#if SANITIZER_ANDROID && defined(__arm__)
+typedef pt_regs regs_struct;
+#define REG_SP ARM_sp
+
+#elif SANITIZER_LINUX && defined(__arm__)
+typedef user_regs regs_struct;
+#define REG_SP uregs[13]
+
+#elif defined(__i386__) || defined(__x86_64__)
+typedef user_regs_struct regs_struct;
+#if defined(__i386__)
+#define REG_SP esp
+#else
+#define REG_SP rsp
+#endif
+
+#elif defined(__powerpc__) || defined(__powerpc64__)
+typedef pt_regs regs_struct;
+#define REG_SP gpr[PT_R1]
+
+#elif defined(__mips__)
+typedef struct user regs_struct;
+# if SANITIZER_ANDROID
+# define REG_SP regs[EF_R29]
+# else
+# define REG_SP regs[EF_REG29]
+# endif
+
+#elif defined(__aarch64__)
+typedef struct user_pt_regs regs_struct;
+#define REG_SP sp
+#define ARCH_IOVEC_FOR_GETREGSET
+
+#elif defined(__s390__)
+typedef _user_regs_struct regs_struct;
+#define REG_SP gprs[15]
+#define ARCH_IOVEC_FOR_GETREGSET
+
+#else
+#error "Unsupported architecture"
+#endif // SANITIZER_ANDROID && defined(__arm__)
+
+tid_t SuspendedThreadsListLinux::GetThreadID(uptr index) const {
+ CHECK_LT(index, thread_ids_.size());
+ return thread_ids_[index];
+}
+
+uptr SuspendedThreadsListLinux::ThreadCount() const {
+ return thread_ids_.size();
+}
+
+bool SuspendedThreadsListLinux::ContainsTid(tid_t thread_id) const {
+ for (uptr i = 0; i < thread_ids_.size(); i++) {
+ if (thread_ids_[i] == thread_id) return true;
+ }
+ return false;
+}
+
+void SuspendedThreadsListLinux::Append(tid_t tid) {
+ thread_ids_.push_back(tid);
+}
+
+PtraceRegistersStatus SuspendedThreadsListLinux::GetRegistersAndSP(
+ uptr index, uptr *buffer, uptr *sp) const {
+ pid_t tid = GetThreadID(index);
+ regs_struct regs;
+ int pterrno;
+#ifdef ARCH_IOVEC_FOR_GETREGSET
+ struct iovec regset_io;
+ regset_io.iov_base = &regs;
+ regset_io.iov_len = sizeof(regs_struct);
+ bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGSET, tid,
+ (void*)NT_PRSTATUS, (void*)&regset_io),
+ &pterrno);
+#else
+ bool isErr = internal_iserror(internal_ptrace(PTRACE_GETREGS, tid, nullptr,
+ &regs), &pterrno);
+#endif
+ if (isErr) {
+ VReport(1, "Could not get registers from thread %d (errno %d).\n", tid,
+ pterrno);
+ // ESRCH means that the given thread is not suspended or already dead.
+ // Therefore it's unsafe to inspect its data (e.g. walk through stack) and
+ // we should notify caller about this.
+ return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
+ : REGISTERS_UNAVAILABLE;
+ }
+
+ *sp = regs.REG_SP;
+ internal_memcpy(buffer, &regs, sizeof(regs));
+ return REGISTERS_AVAILABLE;
+}
+
+uptr SuspendedThreadsListLinux::RegisterCount() const {
+ return sizeof(regs_struct) / sizeof(uptr);
+}
+} // namespace __sanitizer
+
+#endif // SANITIZER_LINUX && (defined(__x86_64__) || defined(__mips__)
+ // || defined(__aarch64__) || defined(__powerpc64__)
+ // || defined(__s390__) || defined(__i386__) || defined(__arm__)
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_mac.cc (revision 351984)
@@ -0,0 +1,177 @@
+//===-- sanitizer_stoptheworld_mac.cc -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__) || \
+ defined(__i386))
+
+#include <mach/mach.h>
+#include <mach/thread_info.h>
+#include <pthread.h>
+
+#include "sanitizer_stoptheworld.h"
+
+namespace __sanitizer {
+typedef struct {
+ tid_t tid;
+ thread_t thread;
+} SuspendedThreadInfo;
+
+class SuspendedThreadsListMac : public SuspendedThreadsList {
+ public:
+ SuspendedThreadsListMac() : threads_(1024) {}
+
+ tid_t GetThreadID(uptr index) const;
+ thread_t GetThread(uptr index) const;
+ uptr ThreadCount() const;
+ bool ContainsThread(thread_t thread) const;
+ void Append(thread_t thread);
+
+ PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const;
+ uptr RegisterCount() const;
+
+ private:
+ InternalMmapVector<SuspendedThreadInfo> threads_;
+};
+
+struct RunThreadArgs {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+void RunThread(void *arg) {
+ struct RunThreadArgs *run_args = (struct RunThreadArgs *)arg;
+ SuspendedThreadsListMac suspended_threads_list;
+
+ thread_array_t threads;
+ mach_msg_type_number_t num_threads;
+ kern_return_t err = task_threads(mach_task_self(), &threads, &num_threads);
+ if (err != KERN_SUCCESS) {
+ VReport(1, "Failed to get threads for task (errno %d).\n", err);
+ return;
+ }
+
+ thread_t thread_self = mach_thread_self();
+ for (unsigned int i = 0; i < num_threads; ++i) {
+ if (threads[i] == thread_self) continue;
+
+ thread_suspend(threads[i]);
+ suspended_threads_list.Append(threads[i]);
+ }
+
+ run_args->callback(suspended_threads_list, run_args->argument);
+
+ uptr num_suspended = suspended_threads_list.ThreadCount();
+ for (unsigned int i = 0; i < num_suspended; ++i) {
+ thread_resume(suspended_threads_list.GetThread(i));
+ }
+}
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ struct RunThreadArgs arg = {callback, argument};
+ pthread_t run_thread = (pthread_t)internal_start_thread(RunThread, &arg);
+ internal_join_thread(run_thread);
+}
+
+#if defined(__x86_64__)
+typedef x86_thread_state64_t regs_struct;
+
+#define SP_REG __rsp
+
+#elif defined(__aarch64__)
+typedef arm_thread_state64_t regs_struct;
+
+# if __DARWIN_UNIX03
+# define SP_REG __sp
+# else
+# define SP_REG sp
+# endif
+
+#elif defined(__i386)
+typedef x86_thread_state32_t regs_struct;
+
+#define SP_REG __esp
+
+#else
+#error "Unsupported architecture"
+#endif
+
+tid_t SuspendedThreadsListMac::GetThreadID(uptr index) const {
+ CHECK_LT(index, threads_.size());
+ return threads_[index].tid;
+}
+
+thread_t SuspendedThreadsListMac::GetThread(uptr index) const {
+ CHECK_LT(index, threads_.size());
+ return threads_[index].thread;
+}
+
+uptr SuspendedThreadsListMac::ThreadCount() const {
+ return threads_.size();
+}
+
+bool SuspendedThreadsListMac::ContainsThread(thread_t thread) const {
+ for (uptr i = 0; i < threads_.size(); i++) {
+ if (threads_[i].thread == thread) return true;
+ }
+ return false;
+}
+
+void SuspendedThreadsListMac::Append(thread_t thread) {
+ thread_identifier_info_data_t info;
+ mach_msg_type_number_t info_count = THREAD_IDENTIFIER_INFO_COUNT;
+ kern_return_t err = thread_info(thread, THREAD_IDENTIFIER_INFO,
+ (thread_info_t)&info, &info_count);
+ if (err != KERN_SUCCESS) {
+ VReport(1, "Error - unable to get thread ident for a thread\n");
+ return;
+ }
+ threads_.push_back({info.thread_id, thread});
+}
+
+PtraceRegistersStatus SuspendedThreadsListMac::GetRegistersAndSP(
+ uptr index, uptr *buffer, uptr *sp) const {
+ thread_t thread = GetThread(index);
+ regs_struct regs;
+ int err;
+ mach_msg_type_number_t reg_count = MACHINE_THREAD_STATE_COUNT;
+ err = thread_get_state(thread, MACHINE_THREAD_STATE, (thread_state_t)&regs,
+ &reg_count);
+ if (err != KERN_SUCCESS) {
+ VReport(1, "Error - unable to get registers for a thread\n");
+ // KERN_INVALID_ARGUMENT indicates that either the flavor is invalid,
+ // or the thread does not exist. The other possible error case,
+ // MIG_ARRAY_TOO_LARGE, means that the state is too large, but it's
+ // still safe to proceed.
+ return err == KERN_INVALID_ARGUMENT ? REGISTERS_UNAVAILABLE_FATAL
+ : REGISTERS_UNAVAILABLE;
+ }
+
+ internal_memcpy(buffer, &regs, sizeof(regs));
+ *sp = regs.SP_REG;
+
+ // On x86_64 and aarch64, we must account for the stack redzone, which is 128
+ // bytes.
+ if (SANITIZER_WORDSIZE == 64) *sp -= 128;
+
+ return REGISTERS_AVAILABLE;
+}
+
+uptr SuspendedThreadsListMac::RegisterCount() const {
+ return MACHINE_THREAD_STATE_COUNT;
+}
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC && (defined(__x86_64__) || defined(__aarch64__)) ||
+ // defined(__i386))
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_mac.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_stoptheworld_netbsd_libcdep.cc (revision 351984)
@@ -0,0 +1,356 @@
+//===-- sanitizer_stoptheworld_netbsd_libcdep.cc --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// See sanitizer_stoptheworld.h for details.
+// This implementation was inspired by Markus Gutschke's linuxthreads.cc.
+//
+// This is a NetBSD variation of Linux stoptheworld implementation
+// See sanitizer_stoptheworld_linux_libcdep.cc for code comments.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#if SANITIZER_NETBSD
+
+#include "sanitizer_stoptheworld.h"
+
+#include "sanitizer_atomic.h"
+#include "sanitizer_platform_limits_posix.h"
+
+#include <sys/types.h>
+
+#include <sys/ptrace.h>
+#include <sys/uio.h>
+#include <sys/wait.h>
+
+#include <machine/reg.h>
+
+#include <elf.h>
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stddef.h>
+
+#define internal_sigaction_norestorer internal_sigaction
+
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+
+namespace __sanitizer {
+
+class SuspendedThreadsListNetBSD : public SuspendedThreadsList {
+ public:
+ SuspendedThreadsListNetBSD() { thread_ids_.reserve(1024); }
+
+ tid_t GetThreadID(uptr index) const;
+ uptr ThreadCount() const;
+ bool ContainsTid(tid_t thread_id) const;
+ void Append(tid_t tid);
+
+ PtraceRegistersStatus GetRegistersAndSP(uptr index, uptr *buffer,
+ uptr *sp) const;
+ uptr RegisterCount() const;
+
+ private:
+ InternalMmapVector<tid_t> thread_ids_;
+};
+
+struct TracerThreadArgument {
+ StopTheWorldCallback callback;
+ void *callback_argument;
+ BlockingMutex mutex;
+ atomic_uintptr_t done;
+ uptr parent_pid;
+};
+
+class ThreadSuspender {
+ public:
+ explicit ThreadSuspender(pid_t pid, TracerThreadArgument *arg)
+ : arg(arg), pid_(pid) {
+ CHECK_GE(pid, 0);
+ }
+ bool SuspendAllThreads();
+ void ResumeAllThreads();
+ void KillAllThreads();
+ SuspendedThreadsListNetBSD &suspended_threads_list() {
+ return suspended_threads_list_;
+ }
+ TracerThreadArgument *arg;
+
+ private:
+ SuspendedThreadsListNetBSD suspended_threads_list_;
+ pid_t pid_;
+};
+
+void ThreadSuspender::ResumeAllThreads() {
+ int pterrno;
+ if (!internal_iserror(internal_ptrace(PT_DETACH, pid_, (void *)(uptr)1, 0),
+ &pterrno)) {
+ VReport(2, "Detached from process %d.\n", pid_);
+ } else {
+ VReport(1, "Could not detach from process %d (errno %d).\n", pid_, pterrno);
+ }
+}
+
+void ThreadSuspender::KillAllThreads() {
+ internal_ptrace(PT_KILL, pid_, nullptr, 0);
+}
+
+bool ThreadSuspender::SuspendAllThreads() {
+ int pterrno;
+ if (internal_iserror(internal_ptrace(PT_ATTACH, pid_, nullptr, 0),
+ &pterrno)) {
+ Printf("Could not attach to process %d (errno %d).\n", pid_, pterrno);
+ return false;
+ }
+
+ int status;
+ uptr waitpid_status;
+ HANDLE_EINTR(waitpid_status, internal_waitpid(pid_, &status, 0));
+
+ VReport(2, "Attached to process %d.\n", pid_);
+
+ struct ptrace_lwpinfo pl;
+ int val;
+ pl.pl_lwpid = 0;
+ while ((val = ptrace(PT_LWPINFO, pid_, (void *)&pl, sizeof(pl))) != -1 &&
+ pl.pl_lwpid != 0) {
+ suspended_threads_list_.Append(pl.pl_lwpid);
+ VReport(2, "Appended thread %d in process %d.\n", pl.pl_lwpid, pid_);
+ }
+ return true;
+}
+
+// Pointer to the ThreadSuspender instance for use in signal handler.
+static ThreadSuspender *thread_suspender_instance = nullptr;
+
+// Synchronous signals that should not be blocked.
+static const int kSyncSignals[] = {SIGABRT, SIGILL, SIGFPE, SIGSEGV,
+ SIGBUS, SIGXCPU, SIGXFSZ};
+
+static void TracerThreadDieCallback() {
+ ThreadSuspender *inst = thread_suspender_instance;
+ if (inst && stoptheworld_tracer_pid == internal_getpid()) {
+ inst->KillAllThreads();
+ thread_suspender_instance = nullptr;
+ }
+}
+
+// Signal handler to wake up suspended threads when the tracer thread dies.
+static void TracerThreadSignalHandler(int signum, __sanitizer_siginfo *siginfo,
+ void *uctx) {
+ SignalContext ctx(siginfo, uctx);
+ Printf("Tracer caught signal %d: addr=0x%zx pc=0x%zx sp=0x%zx\n", signum,
+ ctx.addr, ctx.pc, ctx.sp);
+ ThreadSuspender *inst = thread_suspender_instance;
+ if (inst) {
+ if (signum == SIGABRT)
+ inst->KillAllThreads();
+ else
+ inst->ResumeAllThreads();
+ RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
+ thread_suspender_instance = nullptr;
+ atomic_store(&inst->arg->done, 1, memory_order_relaxed);
+ }
+ internal__exit((signum == SIGABRT) ? 1 : 2);
+}
+
+// Size of alternative stack for signal handlers in the tracer thread.
+static const int kHandlerStackSize = 8192;
+
+// This function will be run as a cloned task.
+static int TracerThread(void *argument) {
+ TracerThreadArgument *tracer_thread_argument =
+ (TracerThreadArgument *)argument;
+
+ // Check if parent is already dead.
+ if (internal_getppid() != tracer_thread_argument->parent_pid)
+ internal__exit(4);
+
+ // Wait for the parent thread to finish preparations.
+ tracer_thread_argument->mutex.Lock();
+ tracer_thread_argument->mutex.Unlock();
+
+ RAW_CHECK(AddDieCallback(TracerThreadDieCallback));
+
+ ThreadSuspender thread_suspender(internal_getppid(), tracer_thread_argument);
+ // Global pointer for the signal handler.
+ thread_suspender_instance = &thread_suspender;
+
+ // Alternate stack for signal handling.
+ InternalMmapVector<char> handler_stack_memory(kHandlerStackSize);
+ stack_t handler_stack;
+ internal_memset(&handler_stack, 0, sizeof(handler_stack));
+ handler_stack.ss_sp = handler_stack_memory.data();
+ handler_stack.ss_size = kHandlerStackSize;
+ internal_sigaltstack(&handler_stack, nullptr);
+
+ // Install our handler for synchronous signals. Other signals should be
+ // blocked by the mask we inherited from the parent thread.
+ for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++) {
+ __sanitizer_sigaction act;
+ internal_memset(&act, 0, sizeof(act));
+ act.sigaction = TracerThreadSignalHandler;
+ act.sa_flags = SA_ONSTACK | SA_SIGINFO;
+ internal_sigaction_norestorer(kSyncSignals[i], &act, 0);
+ }
+
+ int exit_code = 0;
+ if (!thread_suspender.SuspendAllThreads()) {
+ VReport(1, "Failed suspending threads.\n");
+ exit_code = 3;
+ } else {
+ tracer_thread_argument->callback(thread_suspender.suspended_threads_list(),
+ tracer_thread_argument->callback_argument);
+ thread_suspender.ResumeAllThreads();
+ exit_code = 0;
+ }
+ RAW_CHECK(RemoveDieCallback(TracerThreadDieCallback));
+ thread_suspender_instance = nullptr;
+ atomic_store(&tracer_thread_argument->done, 1, memory_order_relaxed);
+ return exit_code;
+}
+
+class ScopedStackSpaceWithGuard {
+ public:
+ explicit ScopedStackSpaceWithGuard(uptr stack_size) {
+ stack_size_ = stack_size;
+ guard_size_ = GetPageSizeCached();
+ // FIXME: Omitting MAP_STACK here works in current kernels but might break
+ // in the future.
+ guard_start_ =
+ (uptr)MmapOrDie(stack_size_ + guard_size_, "ScopedStackWithGuard");
+ CHECK(MprotectNoAccess((uptr)guard_start_, guard_size_));
+ }
+ ~ScopedStackSpaceWithGuard() {
+ UnmapOrDie((void *)guard_start_, stack_size_ + guard_size_);
+ }
+ void *Bottom() const {
+ return (void *)(guard_start_ + stack_size_ + guard_size_);
+ }
+
+ private:
+ uptr stack_size_;
+ uptr guard_size_;
+ uptr guard_start_;
+};
+
+static __sanitizer_sigset_t blocked_sigset;
+static __sanitizer_sigset_t old_sigset;
+
+struct ScopedSetTracerPID {
+ explicit ScopedSetTracerPID(uptr tracer_pid) {
+ stoptheworld_tracer_pid = tracer_pid;
+ stoptheworld_tracer_ppid = internal_getpid();
+ }
+ ~ScopedSetTracerPID() {
+ stoptheworld_tracer_pid = 0;
+ stoptheworld_tracer_ppid = 0;
+ }
+};
+
+void StopTheWorld(StopTheWorldCallback callback, void *argument) {
+ // Prepare the arguments for TracerThread.
+ struct TracerThreadArgument tracer_thread_argument;
+ tracer_thread_argument.callback = callback;
+ tracer_thread_argument.callback_argument = argument;
+ tracer_thread_argument.parent_pid = internal_getpid();
+ atomic_store(&tracer_thread_argument.done, 0, memory_order_relaxed);
+ const uptr kTracerStackSize = 2 * 1024 * 1024;
+ ScopedStackSpaceWithGuard tracer_stack(kTracerStackSize);
+
+ tracer_thread_argument.mutex.Lock();
+
+ internal_sigfillset(&blocked_sigset);
+ for (uptr i = 0; i < ARRAY_SIZE(kSyncSignals); i++)
+ internal_sigdelset(&blocked_sigset, kSyncSignals[i]);
+ int rv = internal_sigprocmask(SIG_BLOCK, &blocked_sigset, &old_sigset);
+ CHECK_EQ(rv, 0);
+ uptr tracer_pid = internal_clone(TracerThread, tracer_stack.Bottom(),
+ CLONE_VM | CLONE_FS | CLONE_FILES,
+ &tracer_thread_argument);
+ internal_sigprocmask(SIG_SETMASK, &old_sigset, 0);
+ int local_errno = 0;
+ if (internal_iserror(tracer_pid, &local_errno)) {
+ VReport(1, "Failed spawning a tracer thread (errno %d).\n", local_errno);
+ tracer_thread_argument.mutex.Unlock();
+ } else {
+ ScopedSetTracerPID scoped_set_tracer_pid(tracer_pid);
+
+ tracer_thread_argument.mutex.Unlock();
+
+ while (atomic_load(&tracer_thread_argument.done, memory_order_relaxed) == 0)
+ sched_yield();
+
+ for (;;) {
+ uptr waitpid_status = internal_waitpid(tracer_pid, nullptr, __WALL);
+ if (!internal_iserror(waitpid_status, &local_errno))
+ break;
+ if (local_errno == EINTR)
+ continue;
+ VReport(1, "Waiting on the tracer thread failed (errno %d).\n",
+ local_errno);
+ break;
+ }
+ }
+}
+
+tid_t SuspendedThreadsListNetBSD::GetThreadID(uptr index) const {
+ CHECK_LT(index, thread_ids_.size());
+ return thread_ids_[index];
+}
+
+uptr SuspendedThreadsListNetBSD::ThreadCount() const {
+ return thread_ids_.size();
+}
+
+bool SuspendedThreadsListNetBSD::ContainsTid(tid_t thread_id) const {
+ for (uptr i = 0; i < thread_ids_.size(); i++) {
+ if (thread_ids_[i] == thread_id)
+ return true;
+ }
+ return false;
+}
+
+void SuspendedThreadsListNetBSD::Append(tid_t tid) {
+ thread_ids_.push_back(tid);
+}
+
+PtraceRegistersStatus SuspendedThreadsListNetBSD::GetRegistersAndSP(
+ uptr index, uptr *buffer, uptr *sp) const {
+ lwpid_t tid = GetThreadID(index);
+ pid_t ppid = internal_getppid();
+ struct reg regs;
+ int pterrno;
+ bool isErr =
+ internal_iserror(internal_ptrace(PT_GETREGS, ppid, &regs, tid), &pterrno);
+ if (isErr) {
+ VReport(1,
+ "Could not get registers from process %d thread %d (errno %d).\n",
+ ppid, tid, pterrno);
+ return pterrno == ESRCH ? REGISTERS_UNAVAILABLE_FATAL
+ : REGISTERS_UNAVAILABLE;
+ }
+
+ *sp = PTRACE_REG_SP(&regs);
+ internal_memcpy(buffer, &regs, sizeof(regs));
+
+ return REGISTERS_AVAILABLE;
+}
+
+uptr SuspendedThreadsListNetBSD::RegisterCount() const {
+ return sizeof(struct reg) / sizeof(uptr);
+}
+} // namespace __sanitizer
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_suppressions.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_suppressions.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_suppressions.cc (revision 351984)
@@ -0,0 +1,181 @@
+//===-- sanitizer_suppressions.cc -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Suppression parsing/matching code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_suppressions.h"
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_common.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_file.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+
+namespace __sanitizer {
+
+SuppressionContext::SuppressionContext(const char *suppression_types[],
+ int suppression_types_num)
+ : suppression_types_(suppression_types),
+ suppression_types_num_(suppression_types_num),
+ can_parse_(true) {
+ CHECK_LE(suppression_types_num_, kMaxSuppressionTypes);
+ internal_memset(has_suppression_type_, 0, suppression_types_num_);
+}
+
+#if !SANITIZER_FUCHSIA
+static bool GetPathAssumingFileIsRelativeToExec(const char *file_path,
+ /*out*/char *new_file_path,
+ uptr new_file_path_size) {
+ InternalScopedString exec(kMaxPathLength);
+ if (ReadBinaryNameCached(exec.data(), exec.size())) {
+ const char *file_name_pos = StripModuleName(exec.data());
+ uptr path_to_exec_len = file_name_pos - exec.data();
+ internal_strncat(new_file_path, exec.data(),
+ Min(path_to_exec_len, new_file_path_size - 1));
+ internal_strncat(new_file_path, file_path,
+ new_file_path_size - internal_strlen(new_file_path) - 1);
+ return true;
+ }
+ return false;
+}
+
+static const char *FindFile(const char *file_path,
+ /*out*/char *new_file_path,
+ uptr new_file_path_size) {
+ // If we cannot find the file, check if its location is relative to
+ // the location of the executable.
+ if (!FileExists(file_path) && !IsAbsolutePath(file_path) &&
+ GetPathAssumingFileIsRelativeToExec(file_path, new_file_path,
+ new_file_path_size)) {
+ return new_file_path;
+ }
+ return file_path;
+}
+#else
+static const char *FindFile(const char *file_path, char *, uptr) {
+ return file_path;
+}
+#endif
+
+void SuppressionContext::ParseFromFile(const char *filename) {
+ if (filename[0] == '\0')
+ return;
+
+ InternalScopedString new_file_path(kMaxPathLength);
+ filename = FindFile(filename, new_file_path.data(), new_file_path.size());
+
+ // Read the file.
+ VPrintf(1, "%s: reading suppressions file at %s\n",
+ SanitizerToolName, filename);
+ char *file_contents;
+ uptr buffer_size;
+ uptr contents_size;
+ if (!ReadFileToBuffer(filename, &file_contents, &buffer_size,
+ &contents_size)) {
+ Printf("%s: failed to read suppressions file '%s'\n", SanitizerToolName,
+ filename);
+ Die();
+ }
+
+ Parse(file_contents);
+}
+
+bool SuppressionContext::Match(const char *str, const char *type,
+ Suppression **s) {
+ can_parse_ = false;
+ if (!HasSuppressionType(type))
+ return false;
+ for (uptr i = 0; i < suppressions_.size(); i++) {
+ Suppression &cur = suppressions_[i];
+ if (0 == internal_strcmp(cur.type, type) && TemplateMatch(cur.templ, str)) {
+ *s = &cur;
+ return true;
+ }
+ }
+ return false;
+}
+
+static const char *StripPrefix(const char *str, const char *prefix) {
+ while (*str && *str == *prefix) {
+ str++;
+ prefix++;
+ }
+ if (!*prefix)
+ return str;
+ return 0;
+}
+
+void SuppressionContext::Parse(const char *str) {
+ // Context must not mutate once Match has been called.
+ CHECK(can_parse_);
+ const char *line = str;
+ while (line) {
+ while (line[0] == ' ' || line[0] == '\t')
+ line++;
+ const char *end = internal_strchr(line, '\n');
+ if (end == 0)
+ end = line + internal_strlen(line);
+ if (line != end && line[0] != '#') {
+ const char *end2 = end;
+ while (line != end2 &&
+ (end2[-1] == ' ' || end2[-1] == '\t' || end2[-1] == '\r'))
+ end2--;
+ int type;
+ for (type = 0; type < suppression_types_num_; type++) {
+ const char *next_char = StripPrefix(line, suppression_types_[type]);
+ if (next_char && *next_char == ':') {
+ line = ++next_char;
+ break;
+ }
+ }
+ if (type == suppression_types_num_) {
+ Printf("%s: failed to parse suppressions\n", SanitizerToolName);
+ Die();
+ }
+ Suppression s;
+ s.type = suppression_types_[type];
+ s.templ = (char*)InternalAlloc(end2 - line + 1);
+ internal_memcpy(s.templ, line, end2 - line);
+ s.templ[end2 - line] = 0;
+ suppressions_.push_back(s);
+ has_suppression_type_[type] = true;
+ }
+ if (end[0] == 0)
+ break;
+ line = end + 1;
+ }
+}
+
+uptr SuppressionContext::SuppressionCount() const {
+ return suppressions_.size();
+}
+
+bool SuppressionContext::HasSuppressionType(const char *type) const {
+ for (int i = 0; i < suppression_types_num_; i++) {
+ if (0 == internal_strcmp(type, suppression_types_[i]))
+ return has_suppression_type_[i];
+ }
+ return false;
+}
+
+const Suppression *SuppressionContext::SuppressionAt(uptr i) const {
+ CHECK_LT(i, suppressions_.size());
+ return &suppressions_[i];
+}
+
+void SuppressionContext::GetMatched(
+ InternalMmapVector<Suppression *> *matched) {
+ for (uptr i = 0; i < suppressions_.size(); i++)
+ if (atomic_load_relaxed(&suppressions_[i].hit_count))
+ matched->push_back(&suppressions_[i]);
+}
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_suppressions.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_suppressions.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_suppressions.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_suppressions.h (revision 351984)
@@ -0,0 +1,56 @@
+//===-- sanitizer_suppressions.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Suppression parsing/matching code.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SUPPRESSIONS_H
+#define SANITIZER_SUPPRESSIONS_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_atomic.h"
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+struct Suppression {
+ Suppression() { internal_memset(this, 0, sizeof(*this)); }
+ const char *type;
+ char *templ;
+ atomic_uint32_t hit_count;
+ uptr weight;
+};
+
+class SuppressionContext {
+ public:
+ // Create new SuppressionContext capable of parsing given suppression types.
+ SuppressionContext(const char *supprression_types[],
+ int suppression_types_num);
+
+ void ParseFromFile(const char *filename);
+ void Parse(const char *str);
+
+ bool Match(const char *str, const char *type, Suppression **s);
+ uptr SuppressionCount() const;
+ bool HasSuppressionType(const char *type) const;
+ const Suppression *SuppressionAt(uptr i) const;
+ void GetMatched(InternalMmapVector<Suppression *> *matched);
+
+ private:
+ static const int kMaxSuppressionTypes = 32;
+ const char **const suppression_types_;
+ const int suppression_types_num_;
+
+ InternalMmapVector<Suppression> suppressions_;
+ bool has_suppression_type_[kMaxSuppressionTypes];
+ bool can_parse_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SUPPRESSIONS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_suppressions.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer.cc (revision 351984)
@@ -0,0 +1,129 @@
+//===-- sanitizer_symbolizer.cc -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_platform.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_symbolizer_internal.h"
+
+namespace __sanitizer {
+
+AddressInfo::AddressInfo() {
+ internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
+}
+
+void AddressInfo::Clear() {
+ InternalFree(module);
+ InternalFree(function);
+ InternalFree(file);
+ internal_memset(this, 0, sizeof(AddressInfo));
+ function_offset = kUnknown;
+}
+
+void AddressInfo::FillModuleInfo(const char *mod_name, uptr mod_offset,
+ ModuleArch mod_arch) {
+ module = internal_strdup(mod_name);
+ module_offset = mod_offset;
+ module_arch = mod_arch;
+}
+
+SymbolizedStack::SymbolizedStack() : next(nullptr), info() {}
+
+SymbolizedStack *SymbolizedStack::New(uptr addr) {
+ void *mem = InternalAlloc(sizeof(SymbolizedStack));
+ SymbolizedStack *res = new(mem) SymbolizedStack();
+ res->info.address = addr;
+ return res;
+}
+
+void SymbolizedStack::ClearAll() {
+ info.Clear();
+ if (next)
+ next->ClearAll();
+ InternalFree(this);
+}
+
+DataInfo::DataInfo() {
+ internal_memset(this, 0, sizeof(DataInfo));
+}
+
+void DataInfo::Clear() {
+ InternalFree(module);
+ InternalFree(file);
+ InternalFree(name);
+ internal_memset(this, 0, sizeof(DataInfo));
+}
+
+void FrameInfo::Clear() {
+ InternalFree(module);
+ for (LocalInfo &local : locals) {
+ InternalFree(local.function_name);
+ InternalFree(local.name);
+ InternalFree(local.decl_file);
+ }
+ locals.clear();
+}
+
+Symbolizer *Symbolizer::symbolizer_;
+StaticSpinMutex Symbolizer::init_mu_;
+LowLevelAllocator Symbolizer::symbolizer_allocator_;
+
+void Symbolizer::InvalidateModuleList() {
+ modules_fresh_ = false;
+}
+
+void Symbolizer::AddHooks(Symbolizer::StartSymbolizationHook start_hook,
+ Symbolizer::EndSymbolizationHook end_hook) {
+ CHECK(start_hook_ == 0 && end_hook_ == 0);
+ start_hook_ = start_hook;
+ end_hook_ = end_hook;
+}
+
+const char *Symbolizer::ModuleNameOwner::GetOwnedCopy(const char *str) {
+ mu_->CheckLocked();
+
+ // 'str' will be the same string multiple times in a row, optimize this case.
+ if (last_match_ && !internal_strcmp(last_match_, str))
+ return last_match_;
+
+ // FIXME: this is linear search.
+ // We should optimize this further if this turns out to be a bottleneck later.
+ for (uptr i = 0; i < storage_.size(); ++i) {
+ if (!internal_strcmp(storage_[i], str)) {
+ last_match_ = storage_[i];
+ return last_match_;
+ }
+ }
+ last_match_ = internal_strdup(str);
+ storage_.push_back(last_match_);
+ return last_match_;
+}
+
+Symbolizer::Symbolizer(IntrusiveList<SymbolizerTool> tools)
+ : module_names_(&mu_), modules_(), modules_fresh_(false), tools_(tools),
+ start_hook_(0), end_hook_(0) {}
+
+Symbolizer::SymbolizerScope::SymbolizerScope(const Symbolizer *sym)
+ : sym_(sym) {
+ if (sym_->start_hook_)
+ sym_->start_hook_();
+}
+
+Symbolizer::SymbolizerScope::~SymbolizerScope() {
+ if (sym_->end_hook_)
+ sym_->end_hook_();
+}
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer.h (revision 351984)
@@ -0,0 +1,220 @@
+//===-- sanitizer_symbolizer.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Symbolizer is used by sanitizers to map instruction address to a location in
+// source code at run-time. Symbolizer either uses __sanitizer_symbolize_*
+// defined in the program, or (if they are missing) tries to find and
+// launch "llvm-symbolizer" commandline tool in a separate process and
+// communicate with it.
+//
+// Generally we should try to avoid calling system library functions during
+// symbolization (and use their replacements from sanitizer_libc.h instead).
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_H
+#define SANITIZER_SYMBOLIZER_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_vector.h"
+
+namespace __sanitizer {
+
+struct AddressInfo {
+ // Owns all the string members. Storage for them is
+ // (de)allocated using sanitizer internal allocator.
+ uptr address;
+
+ char *module;
+ uptr module_offset;
+ ModuleArch module_arch;
+
+ static const uptr kUnknown = ~(uptr)0;
+ char *function;
+ uptr function_offset;
+
+ char *file;
+ int line;
+ int column;
+
+ AddressInfo();
+ // Deletes all strings and resets all fields.
+ void Clear();
+ void FillModuleInfo(const char *mod_name, uptr mod_offset, ModuleArch arch);
+};
+
+// Linked list of symbolized frames (each frame is described by AddressInfo).
+struct SymbolizedStack {
+ SymbolizedStack *next;
+ AddressInfo info;
+ static SymbolizedStack *New(uptr addr);
+ // Deletes current, and all subsequent frames in the linked list.
+ // The object cannot be accessed after the call to this function.
+ void ClearAll();
+
+ private:
+ SymbolizedStack();
+};
+
+// For now, DataInfo is used to describe global variable.
+struct DataInfo {
+ // Owns all the string members. Storage for them is
+ // (de)allocated using sanitizer internal allocator.
+ char *module;
+ uptr module_offset;
+ ModuleArch module_arch;
+
+ char *file;
+ uptr line;
+ char *name;
+ uptr start;
+ uptr size;
+
+ DataInfo();
+ void Clear();
+};
+
+struct LocalInfo {
+ char *function_name = nullptr;
+ char *name = nullptr;
+ char *decl_file = nullptr;
+ unsigned decl_line = 0;
+
+ bool has_frame_offset = false;
+ bool has_size = false;
+ bool has_tag_offset = false;
+
+ sptr frame_offset;
+ uptr size;
+ uptr tag_offset;
+
+ void Clear();
+};
+
+struct FrameInfo {
+ char *module;
+ uptr module_offset;
+ ModuleArch module_arch;
+
+ InternalMmapVector<LocalInfo> locals;
+ void Clear();
+};
+
+class SymbolizerTool;
+
+class Symbolizer final {
+ public:
+ /// Initialize and return platform-specific implementation of symbolizer
+ /// (if it wasn't already initialized).
+ static Symbolizer *GetOrInit();
+ static void LateInitialize();
+ // Returns a list of symbolized frames for a given address (containing
+ // all inlined functions, if necessary).
+ SymbolizedStack *SymbolizePC(uptr address);
+ bool SymbolizeData(uptr address, DataInfo *info);
+ bool SymbolizeFrame(uptr address, FrameInfo *info);
+
+ // The module names Symbolizer returns are stable and unique for every given
+ // module. It is safe to store and compare them as pointers.
+ bool GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address);
+ const char *GetModuleNameForPc(uptr pc) {
+ const char *module_name = nullptr;
+ uptr unused;
+ if (GetModuleNameAndOffsetForPC(pc, &module_name, &unused))
+ return module_name;
+ return nullptr;
+ }
+
+ // Release internal caches (if any).
+ void Flush();
+ // Attempts to demangle the provided C++ mangled name.
+ const char *Demangle(const char *name);
+
+ // Allow user to install hooks that would be called before/after Symbolizer
+ // does the actual file/line info fetching. Specific sanitizers may need this
+ // to distinguish system library calls made in user code from calls made
+ // during in-process symbolization.
+ typedef void (*StartSymbolizationHook)();
+ typedef void (*EndSymbolizationHook)();
+ // May be called at most once.
+ void AddHooks(StartSymbolizationHook start_hook,
+ EndSymbolizationHook end_hook);
+
+ void RefreshModules();
+ const LoadedModule *FindModuleForAddress(uptr address);
+
+ void InvalidateModuleList();
+
+ private:
+ // GetModuleNameAndOffsetForPC has to return a string to the caller.
+ // Since the corresponding module might get unloaded later, we should create
+ // our owned copies of the strings that we can safely return.
+ // ModuleNameOwner does not provide any synchronization, thus calls to
+ // its method should be protected by |mu_|.
+ class ModuleNameOwner {
+ public:
+ explicit ModuleNameOwner(BlockingMutex *synchronized_by)
+ : last_match_(nullptr), mu_(synchronized_by) {
+ storage_.reserve(kInitialCapacity);
+ }
+ const char *GetOwnedCopy(const char *str);
+
+ private:
+ static const uptr kInitialCapacity = 1000;
+ InternalMmapVector<const char*> storage_;
+ const char *last_match_;
+
+ BlockingMutex *mu_;
+ } module_names_;
+
+ /// Platform-specific function for creating a Symbolizer object.
+ static Symbolizer *PlatformInit();
+
+ bool FindModuleNameAndOffsetForAddress(uptr address, const char **module_name,
+ uptr *module_offset,
+ ModuleArch *module_arch);
+ ListOfModules modules_;
+ ListOfModules fallback_modules_;
+ // If stale, need to reload the modules before looking up addresses.
+ bool modules_fresh_;
+
+ // Platform-specific default demangler, must not return nullptr.
+ const char *PlatformDemangle(const char *name);
+
+ static Symbolizer *symbolizer_;
+ static StaticSpinMutex init_mu_;
+
+ // Mutex locked from public methods of |Symbolizer|, so that the internals
+ // (including individual symbolizer tools and platform-specific methods) are
+ // always synchronized.
+ BlockingMutex mu_;
+
+ IntrusiveList<SymbolizerTool> tools_;
+
+ explicit Symbolizer(IntrusiveList<SymbolizerTool> tools);
+
+ static LowLevelAllocator symbolizer_allocator_;
+
+ StartSymbolizationHook start_hook_;
+ EndSymbolizationHook end_hook_;
+ class SymbolizerScope {
+ public:
+ explicit SymbolizerScope(const Symbolizer *sym);
+ ~SymbolizerScope();
+ private:
+ const Symbolizer *sym_;
+ };
+};
+
+#ifdef SANITIZER_WINDOWS
+void InitializeDbgHelpIfNeeded();
+#endif
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h (revision 351984)
@@ -0,0 +1,42 @@
+//===-- sanitizer_symbolizer_fuchsia.h -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Define Fuchsia's string formats and limits for the markup symbolizer.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_FUCHSIA_H
+#define SANITIZER_SYMBOLIZER_FUCHSIA_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// See the spec at:
+// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
+
+// This is used by UBSan for type names, and by ASan for global variable names.
+constexpr const char *kFormatDemangle = "{{{symbol:%s}}}";
+constexpr uptr kFormatDemangleMax = 1024; // Arbitrary.
+
+// Function name or equivalent from PC location.
+constexpr const char *kFormatFunction = "{{{pc:%p}}}";
+constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
+
+// Global variable name or equivalent from data memory address.
+constexpr const char *kFormatData = "{{{data:%p}}}";
+
+// One frame in a backtrace (printed on a line by itself).
+constexpr const char *kFormatFrame = "{{{bt:%u:%p}}}";
+
+// Dump trigger element.
+#define FORMAT_DUMPFILE "{{{dumpfile:%s:%s}}}"
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_FUCHSIA_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_fuchsia.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_internal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_internal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_internal.h (revision 351984)
@@ -0,0 +1,157 @@
+//===-- sanitizer_symbolizer_internal.h -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Header for internal classes and functions to be used by implementations of
+// symbolizers.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_INTERNAL_H
+#define SANITIZER_SYMBOLIZER_INTERNAL_H
+
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_file.h"
+#include "sanitizer_vector.h"
+
+namespace __sanitizer {
+
+// Parsing helpers, 'str' is searched for delimiter(s) and a string or uptr
+// is extracted. When extracting a string, a newly allocated (using
+// InternalAlloc) and null-terminataed buffer is returned. They return a pointer
+// to the next characted after the found delimiter.
+const char *ExtractToken(const char *str, const char *delims, char **result);
+const char *ExtractInt(const char *str, const char *delims, int *result);
+const char *ExtractUptr(const char *str, const char *delims, uptr *result);
+const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
+ char **result);
+
+const char *DemangleSwiftAndCXX(const char *name);
+
+// SymbolizerTool is an interface that is implemented by individual "tools"
+// that can perform symbolication (external llvm-symbolizer, libbacktrace,
+// Windows DbgHelp symbolizer, etc.).
+class SymbolizerTool {
+ public:
+ // The main |Symbolizer| class implements a "fallback chain" of symbolizer
+ // tools. In a request to symbolize an address, if one tool returns false,
+ // the next tool in the chain will be tried.
+ SymbolizerTool *next;
+
+ SymbolizerTool() : next(nullptr) { }
+
+ // Can't declare pure virtual functions in sanitizer runtimes:
+ // __cxa_pure_virtual might be unavailable.
+
+ // The |stack| parameter is inout. It is pre-filled with the address,
+ // module base and module offset values and is to be used to construct
+ // other stack frames.
+ virtual bool SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ UNIMPLEMENTED();
+ }
+
+ // The |info| parameter is inout. It is pre-filled with the module base
+ // and module offset values.
+ virtual bool SymbolizeData(uptr addr, DataInfo *info) {
+ UNIMPLEMENTED();
+ }
+
+ virtual bool SymbolizeFrame(uptr addr, FrameInfo *info) {
+ return false;
+ }
+
+ virtual void Flush() {}
+
+ // Return nullptr to fallback to the default platform-specific demangler.
+ virtual const char *Demangle(const char *name) {
+ return nullptr;
+ }
+};
+
+// SymbolizerProcess encapsulates communication between the tool and
+// external symbolizer program, running in a different subprocess.
+// SymbolizerProcess may not be used from two threads simultaneously.
+class SymbolizerProcess {
+ public:
+ explicit SymbolizerProcess(const char *path, bool use_forkpty = false);
+ const char *SendCommand(const char *command);
+
+ protected:
+ virtual bool ReachedEndOfOutput(const char *buffer, uptr length) const {
+ UNIMPLEMENTED();
+ }
+
+ /// The maximum number of arguments required to invoke a tool process.
+ enum { kArgVMax = 6 };
+
+ /// Fill in an argv array to invoke the child process.
+ virtual void GetArgV(const char *path_to_binary,
+ const char *(&argv)[kArgVMax]) const {
+ UNIMPLEMENTED();
+ }
+
+ virtual bool ReadFromSymbolizer(char *buffer, uptr max_length);
+
+ private:
+ bool Restart();
+ const char *SendCommandImpl(const char *command);
+ bool WriteToSymbolizer(const char *buffer, uptr length);
+ bool StartSymbolizerSubprocess();
+
+ const char *path_;
+ fd_t input_fd_;
+ fd_t output_fd_;
+
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+
+ static const uptr kMaxTimesRestarted = 5;
+ static const int kSymbolizerStartupTimeMillis = 10;
+ uptr times_restarted_;
+ bool failed_to_start_;
+ bool reported_invalid_path_;
+ bool use_forkpty_;
+};
+
+class LLVMSymbolizerProcess;
+
+// This tool invokes llvm-symbolizer in a subprocess. It should be as portable
+// as the llvm-symbolizer tool is.
+class LLVMSymbolizer : public SymbolizerTool {
+ public:
+ explicit LLVMSymbolizer(const char *path, LowLevelAllocator *allocator);
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+ bool SymbolizeFrame(uptr addr, FrameInfo *info) override;
+
+ private:
+ const char *FormatAndSendCommand(const char *command_prefix,
+ const char *module_name, uptr module_offset,
+ ModuleArch arch);
+
+ LLVMSymbolizerProcess *symbolizer_process_;
+ static const uptr kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+};
+
+// Parses one or more two-line strings in the following format:
+// <function_name>
+// <file_name>:<line_number>[:<column_number>]
+// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of
+// them use the same output format. Returns true if any useful debug
+// information was found.
+void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res);
+
+// Parses a two-line string in the following format:
+// <symbol_name>
+// <start_address> <size>
+// Used by LLVMSymbolizer and InternalSymbolizer.
+void ParseSymbolizeDataOutput(const char *str, DataInfo *info);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_INTERNAL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_internal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc (revision 351984)
@@ -0,0 +1,209 @@
+//===-- sanitizer_symbolizer_libbacktrace.cc ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Libbacktrace implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_symbolizer.h"
+#include "sanitizer_symbolizer_libbacktrace.h"
+
+#if SANITIZER_LIBBACKTRACE
+# include "backtrace-supported.h"
+# if SANITIZER_POSIX && BACKTRACE_SUPPORTED && !BACKTRACE_USES_MALLOC
+# include "backtrace.h"
+# if SANITIZER_CP_DEMANGLE
+# undef ARRAY_SIZE
+# include "demangle.h"
+# endif
+# else
+# define SANITIZER_LIBBACKTRACE 0
+# endif
+#endif
+
+namespace __sanitizer {
+
+static char *DemangleAlloc(const char *name, bool always_alloc);
+
+#if SANITIZER_LIBBACKTRACE
+
+namespace {
+
+# if SANITIZER_CP_DEMANGLE
+struct CplusV3DemangleData {
+ char *buf;
+ uptr size, allocated;
+};
+
+extern "C" {
+static void CplusV3DemangleCallback(const char *s, size_t l, void *vdata) {
+ CplusV3DemangleData *data = (CplusV3DemangleData *)vdata;
+ uptr needed = data->size + l + 1;
+ if (needed > data->allocated) {
+ data->allocated *= 2;
+ if (needed > data->allocated)
+ data->allocated = needed;
+ char *buf = (char *)InternalAlloc(data->allocated);
+ if (data->buf) {
+ internal_memcpy(buf, data->buf, data->size);
+ InternalFree(data->buf);
+ }
+ data->buf = buf;
+ }
+ internal_memcpy(data->buf + data->size, s, l);
+ data->buf[data->size + l] = '\0';
+ data->size += l;
+}
+} // extern "C"
+
+char *CplusV3Demangle(const char *name) {
+ CplusV3DemangleData data;
+ data.buf = 0;
+ data.size = 0;
+ data.allocated = 0;
+ if (cplus_demangle_v3_callback(name, DMGL_PARAMS | DMGL_ANSI,
+ CplusV3DemangleCallback, &data)) {
+ if (data.size + 64 > data.allocated)
+ return data.buf;
+ char *buf = internal_strdup(data.buf);
+ InternalFree(data.buf);
+ return buf;
+ }
+ if (data.buf)
+ InternalFree(data.buf);
+ return 0;
+}
+# endif // SANITIZER_CP_DEMANGLE
+
+struct SymbolizeCodeCallbackArg {
+ SymbolizedStack *first;
+ SymbolizedStack *last;
+ uptr frames_symbolized;
+
+ AddressInfo *get_new_frame(uintptr_t addr) {
+ CHECK(last);
+ if (frames_symbolized > 0) {
+ SymbolizedStack *cur = SymbolizedStack::New(addr);
+ AddressInfo *info = &cur->info;
+ info->FillModuleInfo(first->info.module, first->info.module_offset,
+ first->info.module_arch);
+ last->next = cur;
+ last = cur;
+ }
+ CHECK_EQ(addr, first->info.address);
+ CHECK_EQ(addr, last->info.address);
+ return &last->info;
+ }
+};
+
+extern "C" {
+static int SymbolizeCodePCInfoCallback(void *vdata, uintptr_t addr,
+ const char *filename, int lineno,
+ const char *function) {
+ SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;
+ if (function) {
+ AddressInfo *info = cdata->get_new_frame(addr);
+ info->function = DemangleAlloc(function, /*always_alloc*/ true);
+ if (filename)
+ info->file = internal_strdup(filename);
+ info->line = lineno;
+ cdata->frames_symbolized++;
+ }
+ return 0;
+}
+
+static void SymbolizeCodeCallback(void *vdata, uintptr_t addr,
+ const char *symname, uintptr_t, uintptr_t) {
+ SymbolizeCodeCallbackArg *cdata = (SymbolizeCodeCallbackArg *)vdata;
+ if (symname) {
+ AddressInfo *info = cdata->get_new_frame(addr);
+ info->function = DemangleAlloc(symname, /*always_alloc*/ true);
+ cdata->frames_symbolized++;
+ }
+}
+
+static void SymbolizeDataCallback(void *vdata, uintptr_t, const char *symname,
+ uintptr_t symval, uintptr_t symsize) {
+ DataInfo *info = (DataInfo *)vdata;
+ if (symname && symval) {
+ info->name = DemangleAlloc(symname, /*always_alloc*/ true);
+ info->start = symval;
+ info->size = symsize;
+ }
+}
+
+static void ErrorCallback(void *, const char *, int) {}
+} // extern "C"
+
+} // namespace
+
+LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
+ // State created in backtrace_create_state is leaked.
+ void *state = (void *)(backtrace_create_state("/proc/self/exe", 0,
+ ErrorCallback, NULL));
+ if (!state)
+ return 0;
+ return new(*alloc) LibbacktraceSymbolizer(state);
+}
+
+bool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ SymbolizeCodeCallbackArg data;
+ data.first = stack;
+ data.last = stack;
+ data.frames_symbolized = 0;
+ backtrace_pcinfo((backtrace_state *)state_, addr, SymbolizeCodePCInfoCallback,
+ ErrorCallback, &data);
+ if (data.frames_symbolized > 0)
+ return true;
+ backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeCodeCallback,
+ ErrorCallback, &data);
+ return (data.frames_symbolized > 0);
+}
+
+bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ backtrace_syminfo((backtrace_state *)state_, addr, SymbolizeDataCallback,
+ ErrorCallback, info);
+ return true;
+}
+
+#else // SANITIZER_LIBBACKTRACE
+
+LibbacktraceSymbolizer *LibbacktraceSymbolizer::get(LowLevelAllocator *alloc) {
+ return 0;
+}
+
+bool LibbacktraceSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ (void)state_;
+ return false;
+}
+
+bool LibbacktraceSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ return false;
+}
+
+#endif // SANITIZER_LIBBACKTRACE
+
+static char *DemangleAlloc(const char *name, bool always_alloc) {
+#if SANITIZER_LIBBACKTRACE && SANITIZER_CP_DEMANGLE
+ if (char *demangled = CplusV3Demangle(name))
+ return demangled;
+#endif
+ if (always_alloc)
+ return internal_strdup(name);
+ return 0;
+}
+
+const char *LibbacktraceSymbolizer::Demangle(const char *name) {
+ return DemangleAlloc(name, /*always_alloc*/ false);
+}
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h (revision 351984)
@@ -0,0 +1,49 @@
+//===-- sanitizer_symbolizer_libbacktrace.h ---------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Header for libbacktrace symbolizer.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
+#define SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
+
+#include "sanitizer_platform.h"
+#include "sanitizer_common.h"
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_symbolizer_internal.h"
+
+#ifndef SANITIZER_LIBBACKTRACE
+# define SANITIZER_LIBBACKTRACE 0
+#endif
+
+#ifndef SANITIZER_CP_DEMANGLE
+# define SANITIZER_CP_DEMANGLE 0
+#endif
+
+namespace __sanitizer {
+
+class LibbacktraceSymbolizer : public SymbolizerTool {
+ public:
+ static LibbacktraceSymbolizer *get(LowLevelAllocator *alloc);
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+
+ // May return NULL if demangling failed.
+ const char *Demangle(const char *name) override;
+
+ private:
+ explicit LibbacktraceSymbolizer(void *state) : state_(state) {}
+
+ void *state_; // Leaked.
+};
+
+} // namespace __sanitizer
+#endif // SANITIZER_SYMBOLIZER_LIBBACKTRACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libbacktrace.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_libcdep.cc (revision 351984)
@@ -0,0 +1,556 @@
+//===-- sanitizer_symbolizer_libcdep.cc -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_symbolizer_internal.h"
+
+namespace __sanitizer {
+
+Symbolizer *Symbolizer::GetOrInit() {
+ SpinMutexLock l(&init_mu_);
+ if (symbolizer_)
+ return symbolizer_;
+ symbolizer_ = PlatformInit();
+ CHECK(symbolizer_);
+ return symbolizer_;
+}
+
+// See sanitizer_symbolizer_markup.cc.
+#if !SANITIZER_SYMBOLIZER_MARKUP
+
+const char *ExtractToken(const char *str, const char *delims, char **result) {
+ uptr prefix_len = internal_strcspn(str, delims);
+ *result = (char*)InternalAlloc(prefix_len + 1);
+ internal_memcpy(*result, str, prefix_len);
+ (*result)[prefix_len] = '\0';
+ const char *prefix_end = str + prefix_len;
+ if (*prefix_end != '\0') prefix_end++;
+ return prefix_end;
+}
+
+const char *ExtractInt(const char *str, const char *delims, int *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (int)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
+}
+
+const char *ExtractUptr(const char *str, const char *delims, uptr *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (uptr)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
+}
+
+const char *ExtractSptr(const char *str, const char *delims, sptr *result) {
+ char *buff;
+ const char *ret = ExtractToken(str, delims, &buff);
+ if (buff != 0) {
+ *result = (sptr)internal_atoll(buff);
+ }
+ InternalFree(buff);
+ return ret;
+}
+
+const char *ExtractTokenUpToDelimiter(const char *str, const char *delimiter,
+ char **result) {
+ const char *found_delimiter = internal_strstr(str, delimiter);
+ uptr prefix_len =
+ found_delimiter ? found_delimiter - str : internal_strlen(str);
+ *result = (char *)InternalAlloc(prefix_len + 1);
+ internal_memcpy(*result, str, prefix_len);
+ (*result)[prefix_len] = '\0';
+ const char *prefix_end = str + prefix_len;
+ if (*prefix_end != '\0') prefix_end += internal_strlen(delimiter);
+ return prefix_end;
+}
+
+SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
+ BlockingMutexLock l(&mu_);
+ const char *module_name;
+ uptr module_offset;
+ ModuleArch arch;
+ SymbolizedStack *res = SymbolizedStack::New(addr);
+ if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,
+ &arch))
+ return res;
+ // Always fill data about module name and offset.
+ res->info.FillModuleInfo(module_name, module_offset, arch);
+ for (auto &tool : tools_) {
+ SymbolizerScope sym_scope(this);
+ if (tool.SymbolizePC(addr, res)) {
+ return res;
+ }
+ }
+ return res;
+}
+
+bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ BlockingMutexLock l(&mu_);
+ const char *module_name;
+ uptr module_offset;
+ ModuleArch arch;
+ if (!FindModuleNameAndOffsetForAddress(addr, &module_name, &module_offset,
+ &arch))
+ return false;
+ info->Clear();
+ info->module = internal_strdup(module_name);
+ info->module_offset = module_offset;
+ info->module_arch = arch;
+ for (auto &tool : tools_) {
+ SymbolizerScope sym_scope(this);
+ if (tool.SymbolizeData(addr, info)) {
+ return true;
+ }
+ }
+ return true;
+}
+
+bool Symbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
+ BlockingMutexLock l(&mu_);
+ const char *module_name;
+ if (!FindModuleNameAndOffsetForAddress(
+ addr, &module_name, &info->module_offset, &info->module_arch))
+ return false;
+ info->module = internal_strdup(module_name);
+ for (auto &tool : tools_) {
+ SymbolizerScope sym_scope(this);
+ if (tool.SymbolizeFrame(addr, info)) {
+ return true;
+ }
+ }
+ return true;
+}
+
+bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
+ BlockingMutexLock l(&mu_);
+ const char *internal_module_name = nullptr;
+ ModuleArch arch;
+ if (!FindModuleNameAndOffsetForAddress(pc, &internal_module_name,
+ module_address, &arch))
+ return false;
+
+ if (module_name)
+ *module_name = module_names_.GetOwnedCopy(internal_module_name);
+ return true;
+}
+
+void Symbolizer::Flush() {
+ BlockingMutexLock l(&mu_);
+ for (auto &tool : tools_) {
+ SymbolizerScope sym_scope(this);
+ tool.Flush();
+ }
+}
+
+const char *Symbolizer::Demangle(const char *name) {
+ BlockingMutexLock l(&mu_);
+ for (auto &tool : tools_) {
+ SymbolizerScope sym_scope(this);
+ if (const char *demangled = tool.Demangle(name))
+ return demangled;
+ }
+ return PlatformDemangle(name);
+}
+
+bool Symbolizer::FindModuleNameAndOffsetForAddress(uptr address,
+ const char **module_name,
+ uptr *module_offset,
+ ModuleArch *module_arch) {
+ const LoadedModule *module = FindModuleForAddress(address);
+ if (module == nullptr)
+ return false;
+ *module_name = module->full_name();
+ *module_offset = address - module->base_address();
+ *module_arch = module->arch();
+ return true;
+}
+
+void Symbolizer::RefreshModules() {
+ modules_.init();
+ fallback_modules_.fallbackInit();
+ RAW_CHECK(modules_.size() > 0);
+ modules_fresh_ = true;
+}
+
+static const LoadedModule *SearchForModule(const ListOfModules &modules,
+ uptr address) {
+ for (uptr i = 0; i < modules.size(); i++) {
+ if (modules[i].containsAddress(address)) {
+ return &modules[i];
+ }
+ }
+ return nullptr;
+}
+
+const LoadedModule *Symbolizer::FindModuleForAddress(uptr address) {
+ bool modules_were_reloaded = false;
+ if (!modules_fresh_) {
+ RefreshModules();
+ modules_were_reloaded = true;
+ }
+ const LoadedModule *module = SearchForModule(modules_, address);
+ if (module) return module;
+
+ // dlopen/dlclose interceptors invalidate the module list, but when
+ // interception is disabled, we need to retry if the lookup fails in
+ // case the module list changed.
+#if !SANITIZER_INTERCEPT_DLOPEN_DLCLOSE
+ if (!modules_were_reloaded) {
+ RefreshModules();
+ module = SearchForModule(modules_, address);
+ if (module) return module;
+ }
+#endif
+
+ if (fallback_modules_.size()) {
+ module = SearchForModule(fallback_modules_, address);
+ }
+ return module;
+}
+
+// For now we assume the following protocol:
+// For each request of the form
+// <module_name> <module_offset>
+// passed to STDIN, external symbolizer prints to STDOUT response:
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// <function_name>
+// <file_name>:<line_number>:<column_number>
+// ...
+// <empty line>
+class LLVMSymbolizerProcess : public SymbolizerProcess {
+ public:
+ explicit LLVMSymbolizerProcess(const char *path) : SymbolizerProcess(path) {}
+
+ private:
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const override {
+ // Empty line marks the end of llvm-symbolizer output.
+ return length >= 2 && buffer[length - 1] == '\n' &&
+ buffer[length - 2] == '\n';
+ }
+
+ // When adding a new architecture, don't forget to also update
+ // script/asan_symbolize.py and sanitizer_common.h.
+ void GetArgV(const char *path_to_binary,
+ const char *(&argv)[kArgVMax]) const override {
+#if defined(__x86_64h__)
+ const char* const kSymbolizerArch = "--default-arch=x86_64h";
+#elif defined(__x86_64__)
+ const char* const kSymbolizerArch = "--default-arch=x86_64";
+#elif defined(__i386__)
+ const char* const kSymbolizerArch = "--default-arch=i386";
+#elif defined(__aarch64__)
+ const char* const kSymbolizerArch = "--default-arch=arm64";
+#elif defined(__arm__)
+ const char* const kSymbolizerArch = "--default-arch=arm";
+#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ const char* const kSymbolizerArch = "--default-arch=powerpc64";
+#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ const char* const kSymbolizerArch = "--default-arch=powerpc64le";
+#elif defined(__s390x__)
+ const char* const kSymbolizerArch = "--default-arch=s390x";
+#elif defined(__s390__)
+ const char* const kSymbolizerArch = "--default-arch=s390";
+#else
+ const char* const kSymbolizerArch = "--default-arch=unknown";
+#endif
+
+ const char *const inline_flag = common_flags()->symbolize_inline_frames
+ ? "--inlining=true"
+ : "--inlining=false";
+ int i = 0;
+ argv[i++] = path_to_binary;
+ argv[i++] = inline_flag;
+ argv[i++] = kSymbolizerArch;
+ argv[i++] = nullptr;
+ }
+};
+
+LLVMSymbolizer::LLVMSymbolizer(const char *path, LowLevelAllocator *allocator)
+ : symbolizer_process_(new(*allocator) LLVMSymbolizerProcess(path)) {}
+
+// Parse a <file>:<line>[:<column>] buffer. The file path may contain colons on
+// Windows, so extract tokens from the right hand side first. The column info is
+// also optional.
+static const char *ParseFileLineInfo(AddressInfo *info, const char *str) {
+ char *file_line_info = 0;
+ str = ExtractToken(str, "\n", &file_line_info);
+ CHECK(file_line_info);
+
+ if (uptr size = internal_strlen(file_line_info)) {
+ char *back = file_line_info + size - 1;
+ for (int i = 0; i < 2; ++i) {
+ while (back > file_line_info && IsDigit(*back)) --back;
+ if (*back != ':' || !IsDigit(back[1])) break;
+ info->column = info->line;
+ info->line = internal_atoll(back + 1);
+ // Truncate the string at the colon to keep only filename.
+ *back = '\0';
+ --back;
+ }
+ ExtractToken(file_line_info, "", &info->file);
+ }
+
+ InternalFree(file_line_info);
+ return str;
+}
+
+// Parses one or more two-line strings in the following format:
+// <function_name>
+// <file_name>:<line_number>[:<column_number>]
+// Used by LLVMSymbolizer, Addr2LinePool and InternalSymbolizer, since all of
+// them use the same output format.
+void ParseSymbolizePCOutput(const char *str, SymbolizedStack *res) {
+ bool top_frame = true;
+ SymbolizedStack *last = res;
+ while (true) {
+ char *function_name = 0;
+ str = ExtractToken(str, "\n", &function_name);
+ CHECK(function_name);
+ if (function_name[0] == '\0') {
+ // There are no more frames.
+ InternalFree(function_name);
+ break;
+ }
+ SymbolizedStack *cur;
+ if (top_frame) {
+ cur = res;
+ top_frame = false;
+ } else {
+ cur = SymbolizedStack::New(res->info.address);
+ cur->info.FillModuleInfo(res->info.module, res->info.module_offset,
+ res->info.module_arch);
+ last->next = cur;
+ last = cur;
+ }
+
+ AddressInfo *info = &cur->info;
+ info->function = function_name;
+ str = ParseFileLineInfo(info, str);
+
+ // Functions and filenames can be "??", in which case we write 0
+ // to address info to mark that names are unknown.
+ if (0 == internal_strcmp(info->function, "??")) {
+ InternalFree(info->function);
+ info->function = 0;
+ }
+ if (0 == internal_strcmp(info->file, "??")) {
+ InternalFree(info->file);
+ info->file = 0;
+ }
+ }
+}
+
+// Parses a two-line string in the following format:
+// <symbol_name>
+// <start_address> <size>
+// Used by LLVMSymbolizer and InternalSymbolizer.
+void ParseSymbolizeDataOutput(const char *str, DataInfo *info) {
+ str = ExtractToken(str, "\n", &info->name);
+ str = ExtractUptr(str, " ", &info->start);
+ str = ExtractUptr(str, "\n", &info->size);
+}
+
+static void ParseSymbolizeFrameOutput(const char *str,
+ InternalMmapVector<LocalInfo> *locals) {
+ if (internal_strncmp(str, "??", 2) == 0)
+ return;
+
+ while (*str) {
+ LocalInfo local;
+ str = ExtractToken(str, "\n", &local.function_name);
+ str = ExtractToken(str, "\n", &local.name);
+
+ AddressInfo addr;
+ str = ParseFileLineInfo(&addr, str);
+ local.decl_file = addr.file;
+ local.decl_line = addr.line;
+
+ local.has_frame_offset = internal_strncmp(str, "??", 2) != 0;
+ str = ExtractSptr(str, " ", &local.frame_offset);
+
+ local.has_size = internal_strncmp(str, "??", 2) != 0;
+ str = ExtractUptr(str, " ", &local.size);
+
+ local.has_tag_offset = internal_strncmp(str, "??", 2) != 0;
+ str = ExtractUptr(str, "\n", &local.tag_offset);
+
+ locals->push_back(local);
+ }
+}
+
+bool LLVMSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ AddressInfo *info = &stack->info;
+ const char *buf = FormatAndSendCommand(
+ "CODE", info->module, info->module_offset, info->module_arch);
+ if (buf) {
+ ParseSymbolizePCOutput(buf, stack);
+ return true;
+ }
+ return false;
+}
+
+bool LLVMSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ const char *buf = FormatAndSendCommand(
+ "DATA", info->module, info->module_offset, info->module_arch);
+ if (buf) {
+ ParseSymbolizeDataOutput(buf, info);
+ info->start += (addr - info->module_offset); // Add the base address.
+ return true;
+ }
+ return false;
+}
+
+bool LLVMSymbolizer::SymbolizeFrame(uptr addr, FrameInfo *info) {
+ const char *buf = FormatAndSendCommand(
+ "FRAME", info->module, info->module_offset, info->module_arch);
+ if (buf) {
+ ParseSymbolizeFrameOutput(buf, &info->locals);
+ return true;
+ }
+ return false;
+}
+
+const char *LLVMSymbolizer::FormatAndSendCommand(const char *command_prefix,
+ const char *module_name,
+ uptr module_offset,
+ ModuleArch arch) {
+ CHECK(module_name);
+ if (arch == kModuleArchUnknown) {
+ if (internal_snprintf(buffer_, kBufferSize, "%s \"%s\" 0x%zx\n",
+ command_prefix, module_name,
+ module_offset) >= static_cast<int>(kBufferSize)) {
+ Report("WARNING: Command buffer too small");
+ return nullptr;
+ }
+ } else {
+ if (internal_snprintf(buffer_, kBufferSize, "%s \"%s:%s\" 0x%zx\n",
+ command_prefix, module_name, ModuleArchToString(arch),
+ module_offset) >= static_cast<int>(kBufferSize)) {
+ Report("WARNING: Command buffer too small");
+ return nullptr;
+ }
+ }
+ return symbolizer_process_->SendCommand(buffer_);
+}
+
+SymbolizerProcess::SymbolizerProcess(const char *path, bool use_forkpty)
+ : path_(path),
+ input_fd_(kInvalidFd),
+ output_fd_(kInvalidFd),
+ times_restarted_(0),
+ failed_to_start_(false),
+ reported_invalid_path_(false),
+ use_forkpty_(use_forkpty) {
+ CHECK(path_);
+ CHECK_NE(path_[0], '\0');
+}
+
+static bool IsSameModule(const char* path) {
+ if (const char* ProcessName = GetProcessName()) {
+ if (const char* SymbolizerName = StripModuleName(path)) {
+ return !internal_strcmp(ProcessName, SymbolizerName);
+ }
+ }
+ return false;
+}
+
+const char *SymbolizerProcess::SendCommand(const char *command) {
+ if (failed_to_start_)
+ return nullptr;
+ if (IsSameModule(path_)) {
+ Report("WARNING: Symbolizer was blocked from starting itself!\n");
+ failed_to_start_ = true;
+ return nullptr;
+ }
+ for (; times_restarted_ < kMaxTimesRestarted; times_restarted_++) {
+ // Start or restart symbolizer if we failed to send command to it.
+ if (const char *res = SendCommandImpl(command))
+ return res;
+ Restart();
+ }
+ if (!failed_to_start_) {
+ Report("WARNING: Failed to use and restart external symbolizer!\n");
+ failed_to_start_ = true;
+ }
+ return 0;
+}
+
+const char *SymbolizerProcess::SendCommandImpl(const char *command) {
+ if (input_fd_ == kInvalidFd || output_fd_ == kInvalidFd)
+ return 0;
+ if (!WriteToSymbolizer(command, internal_strlen(command)))
+ return 0;
+ if (!ReadFromSymbolizer(buffer_, kBufferSize))
+ return 0;
+ return buffer_;
+}
+
+bool SymbolizerProcess::Restart() {
+ if (input_fd_ != kInvalidFd)
+ CloseFile(input_fd_);
+ if (output_fd_ != kInvalidFd)
+ CloseFile(output_fd_);
+ return StartSymbolizerSubprocess();
+}
+
+bool SymbolizerProcess::ReadFromSymbolizer(char *buffer, uptr max_length) {
+ if (max_length == 0)
+ return true;
+ uptr read_len = 0;
+ while (true) {
+ uptr just_read = 0;
+ bool success = ReadFromFile(input_fd_, buffer + read_len,
+ max_length - read_len - 1, &just_read);
+ // We can't read 0 bytes, as we don't expect external symbolizer to close
+ // its stdout.
+ if (!success || just_read == 0) {
+ Report("WARNING: Can't read from symbolizer at fd %d\n", input_fd_);
+ return false;
+ }
+ read_len += just_read;
+ if (ReachedEndOfOutput(buffer, read_len))
+ break;
+ if (read_len + 1 == max_length) {
+ Report("WARNING: Symbolizer buffer too small\n");
+ read_len = 0;
+ break;
+ }
+ }
+ buffer[read_len] = '\0';
+ return true;
+}
+
+bool SymbolizerProcess::WriteToSymbolizer(const char *buffer, uptr length) {
+ if (length == 0)
+ return true;
+ uptr write_len = 0;
+ bool success = WriteToFile(output_fd_, buffer, length, &write_len);
+ if (!success || write_len != length) {
+ Report("WARNING: Can't write to symbolizer at fd %d\n", output_fd_);
+ return false;
+ }
+ return true;
+}
+
+#endif // !SANITIZER_SYMBOLIZER_MARKUP
+
+} // namespace __sanitizer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_mac.cc (revision 351984)
@@ -0,0 +1,168 @@
+//===-- sanitizer_symbolizer_mac.cc ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Implementation of Mac-specific "atos" symbolizer.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_mac.h"
+#include "sanitizer_symbolizer_mac.h"
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <sys/wait.h>
+#include <unistd.h>
+#include <util.h>
+
+namespace __sanitizer {
+
+bool DlAddrSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ Dl_info info;
+ int result = dladdr((const void *)addr, &info);
+ if (!result) return false;
+ const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
+ if (!demangled) return false;
+ stack->info.function = internal_strdup(demangled);
+ return true;
+}
+
+bool DlAddrSymbolizer::SymbolizeData(uptr addr, DataInfo *datainfo) {
+ Dl_info info;
+ int result = dladdr((const void *)addr, &info);
+ if (!result) return false;
+ const char *demangled = DemangleSwiftAndCXX(info.dli_sname);
+ datainfo->name = internal_strdup(demangled);
+ datainfo->start = (uptr)info.dli_saddr;
+ return true;
+}
+
+class AtosSymbolizerProcess : public SymbolizerProcess {
+ public:
+ explicit AtosSymbolizerProcess(const char *path, pid_t parent_pid)
+ : SymbolizerProcess(path, /*use_forkpty*/ true) {
+ // Put the string command line argument in the object so that it outlives
+ // the call to GetArgV.
+ internal_snprintf(pid_str_, sizeof(pid_str_), "%d", parent_pid);
+ }
+
+ private:
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const override {
+ return (length >= 1 && buffer[length - 1] == '\n');
+ }
+
+ void GetArgV(const char *path_to_binary,
+ const char *(&argv)[kArgVMax]) const override {
+ int i = 0;
+ argv[i++] = path_to_binary;
+ argv[i++] = "-p";
+ argv[i++] = &pid_str_[0];
+ if (GetMacosVersion() == MACOS_VERSION_MAVERICKS) {
+ // On Mavericks atos prints a deprecation warning which we suppress by
+ // passing -d. The warning isn't present on other OSX versions, even the
+ // newer ones.
+ argv[i++] = "-d";
+ }
+ argv[i++] = nullptr;
+ }
+
+ char pid_str_[16];
+};
+
+static bool ParseCommandOutput(const char *str, uptr addr, char **out_name,
+ char **out_module, char **out_file, uptr *line,
+ uptr *start_address) {
+ // Trim ending newlines.
+ char *trim;
+ ExtractTokenUpToDelimiter(str, "\n", &trim);
+
+ // The line from `atos` is in one of these formats:
+ // myfunction (in library.dylib) (sourcefile.c:17)
+ // myfunction (in library.dylib) + 0x1fe
+ // myfunction (in library.dylib) + 15
+ // 0xdeadbeef (in library.dylib) + 0x1fe
+ // 0xdeadbeef (in library.dylib) + 15
+ // 0xdeadbeef (in library.dylib)
+ // 0xdeadbeef
+
+ const char *rest = trim;
+ char *symbol_name;
+ rest = ExtractTokenUpToDelimiter(rest, " (in ", &symbol_name);
+ if (rest[0] == '\0') {
+ InternalFree(symbol_name);
+ InternalFree(trim);
+ return false;
+ }
+
+ if (internal_strncmp(symbol_name, "0x", 2) != 0)
+ *out_name = symbol_name;
+ else
+ InternalFree(symbol_name);
+ rest = ExtractTokenUpToDelimiter(rest, ") ", out_module);
+
+ if (rest[0] == '(') {
+ if (out_file) {
+ rest++;
+ rest = ExtractTokenUpToDelimiter(rest, ":", out_file);
+ char *extracted_line_number;
+ rest = ExtractTokenUpToDelimiter(rest, ")", &extracted_line_number);
+ if (line) *line = (uptr)internal_atoll(extracted_line_number);
+ InternalFree(extracted_line_number);
+ }
+ } else if (rest[0] == '+') {
+ rest += 2;
+ uptr offset = internal_atoll(rest);
+ if (start_address) *start_address = addr - offset;
+ }
+
+ InternalFree(trim);
+ return true;
+}
+
+AtosSymbolizer::AtosSymbolizer(const char *path, LowLevelAllocator *allocator)
+ : process_(new(*allocator) AtosSymbolizerProcess(path, getpid())) {}
+
+bool AtosSymbolizer::SymbolizePC(uptr addr, SymbolizedStack *stack) {
+ if (!process_) return false;
+ if (addr == 0) return false;
+ char command[32];
+ internal_snprintf(command, sizeof(command), "0x%zx\n", addr);
+ const char *buf = process_->SendCommand(command);
+ if (!buf) return false;
+ uptr line;
+ if (!ParseCommandOutput(buf, addr, &stack->info.function, &stack->info.module,
+ &stack->info.file, &line, nullptr)) {
+ process_ = nullptr;
+ return false;
+ }
+ stack->info.line = (int)line;
+ return true;
+}
+
+bool AtosSymbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ if (!process_) return false;
+ char command[32];
+ internal_snprintf(command, sizeof(command), "0x%zx\n", addr);
+ const char *buf = process_->SendCommand(command);
+ if (!buf) return false;
+ if (!ParseCommandOutput(buf, addr, &info->name, &info->module, nullptr,
+ nullptr, &info->start)) {
+ process_ = nullptr;
+ return false;
+ }
+ return true;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_mac.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_mac.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_mac.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_mac.h (revision 351984)
@@ -0,0 +1,47 @@
+//===-- sanitizer_symbolizer_mac.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Header for Mac-specific "atos" symbolizer.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_SYMBOLIZER_MAC_H
+#define SANITIZER_SYMBOLIZER_MAC_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_symbolizer_internal.h"
+
+namespace __sanitizer {
+
+class DlAddrSymbolizer : public SymbolizerTool {
+ public:
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+};
+
+class AtosSymbolizerProcess;
+
+class AtosSymbolizer : public SymbolizerTool {
+ public:
+ explicit AtosSymbolizer(const char *path, LowLevelAllocator *allocator);
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override;
+
+ private:
+ AtosSymbolizerProcess *process_;
+};
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_MAC
+
+#endif // SANITIZER_SYMBOLIZER_MAC_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_mac.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_markup.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_markup.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_markup.cc (revision 351984)
@@ -0,0 +1,144 @@
+//===-- sanitizer_symbolizer_markup.cc ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Implementation of offline markup symbolizer.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_SYMBOLIZER_MARKUP
+
+#if SANITIZER_FUCHSIA
+#include "sanitizer_symbolizer_fuchsia.h"
+#elif SANITIZER_RTEMS
+#include "sanitizer_symbolizer_rtems.h"
+#endif
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h"
+
+#include <limits.h>
+#include <unwind.h>
+
+namespace __sanitizer {
+
+// This generic support for offline symbolizing is based on the
+// Fuchsia port. We don't do any actual symbolization per se.
+// Instead, we emit text containing raw addresses and raw linkage
+// symbol names, embedded in Fuchsia's symbolization markup format.
+// Fuchsia's logging infrastructure emits enough information about
+// process memory layout that a post-processing filter can do the
+// symbolization and pretty-print the markup. See the spec at:
+// https://fuchsia.googlesource.com/zircon/+/master/docs/symbolizer_markup.md
+
+// This is used by UBSan for type names, and by ASan for global variable names.
+// It's expected to return a static buffer that will be reused on each call.
+const char *Symbolizer::Demangle(const char *name) {
+ static char buffer[kFormatDemangleMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatDemangle, name);
+ return buffer;
+}
+
+// This is used mostly for suppression matching. Making it work
+// would enable "interceptor_via_lib" suppressions. It's also used
+// once in UBSan to say "in module ..." in a message that also
+// includes an address in the module, so post-processing can already
+// pretty-print that so as to indicate the module.
+bool Symbolizer::GetModuleNameAndOffsetForPC(uptr pc, const char **module_name,
+ uptr *module_address) {
+ return false;
+}
+
+// This is used in some places for suppression checking, which we
+// don't really support for Fuchsia. It's also used in UBSan to
+// identify a PC location to a function name, so we always fill in
+// the function member with a string containing markup around the PC
+// value.
+// TODO(mcgrathr): Under SANITIZER_GO, it's currently used by TSan
+// to render stack frames, but that should be changed to use
+// RenderStackFrame.
+SymbolizedStack *Symbolizer::SymbolizePC(uptr addr) {
+ SymbolizedStack *s = SymbolizedStack::New(addr);
+ char buffer[kFormatFunctionMax];
+ internal_snprintf(buffer, sizeof(buffer), kFormatFunction, addr);
+ s->info.function = internal_strdup(buffer);
+ return s;
+}
+
+// Always claim we succeeded, so that RenderDataInfo will be called.
+bool Symbolizer::SymbolizeData(uptr addr, DataInfo *info) {
+ info->Clear();
+ info->start = addr;
+ return true;
+}
+
+// We ignore the format argument to __sanitizer_symbolize_global.
+void RenderData(InternalScopedString *buffer, const char *format,
+ const DataInfo *DI, const char *strip_path_prefix) {
+ buffer->append(kFormatData, DI->start);
+}
+
+// We don't support the stack_trace_format flag at all.
+void RenderFrame(InternalScopedString *buffer, const char *format, int frame_no,
+ const AddressInfo &info, bool vs_style,
+ const char *strip_path_prefix, const char *strip_func_prefix) {
+ buffer->append(kFormatFrame, frame_no, info.address);
+}
+
+Symbolizer *Symbolizer::PlatformInit() {
+ return new (symbolizer_allocator_) Symbolizer({});
+}
+
+void Symbolizer::LateInitialize() { Symbolizer::GetOrInit(); }
+
+void StartReportDeadlySignal() {}
+void ReportDeadlySignal(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {}
+
+#if SANITIZER_CAN_SLOW_UNWIND
+struct UnwindTraceArg {
+ BufferedStackTrace *stack;
+ u32 max_depth;
+};
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ UnwindTraceArg *arg = static_cast<UnwindTraceArg *>(param);
+ CHECK_LT(arg->stack->size, arg->max_depth);
+ uptr pc = _Unwind_GetIP(ctx);
+ if (pc < PAGE_SIZE) return _URC_NORMAL_STOP;
+ arg->stack->trace_buffer[arg->stack->size++] = pc;
+ return (arg->stack->size == arg->max_depth ? _URC_NORMAL_STOP
+ : _URC_NO_REASON);
+}
+
+void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
+ CHECK_GE(max_depth, 2);
+ size = 0;
+ UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
+ _Unwind_Backtrace(Unwind_Trace, &arg);
+ CHECK_GT(size, 0);
+ // We need to pop a few frames so that pc is on top.
+ uptr to_pop = LocatePcInTrace(pc);
+ // trace_buffer[0] belongs to the current function so we always pop it,
+ // unless there is only 1 frame in the stack trace (1 frame is always better
+ // than 0!).
+ PopStackFrames(Min(to_pop, static_cast<uptr>(1)));
+ trace_buffer[0] = pc;
+}
+
+void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
+ CHECK(context);
+ CHECK_GE(max_depth, 2);
+ UNREACHABLE("signal context doesn't exist");
+}
+#endif // SANITIZER_CAN_SLOW_UNWIND
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_MARKUP
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_markup.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc (revision 351984)
@@ -0,0 +1,539 @@
+//===-- sanitizer_symbolizer_posix_libcdep.cc -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// POSIX-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_POSIX
+#include "sanitizer_allocator_internal.h"
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_internal_defs.h"
+#include "sanitizer_linux.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_posix.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_symbolizer_internal.h"
+#include "sanitizer_symbolizer_libbacktrace.h"
+#include "sanitizer_symbolizer_mac.h"
+
+#include <dlfcn.h> // for dlsym()
+#include <errno.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <sys/wait.h>
+#include <unistd.h>
+
+#if SANITIZER_MAC
+#include <util.h> // for forkpty()
+#endif // SANITIZER_MAC
+
+// C++ demangling function, as required by Itanium C++ ABI. This is weak,
+// because we do not require a C++ ABI library to be linked to a program
+// using sanitizers; if it's not present, we'll just use the mangled name.
+namespace __cxxabiv1 {
+ extern "C" SANITIZER_WEAK_ATTRIBUTE
+ char *__cxa_demangle(const char *mangled, char *buffer,
+ size_t *length, int *status);
+}
+
+namespace __sanitizer {
+
+// Attempts to demangle the name via __cxa_demangle from __cxxabiv1.
+const char *DemangleCXXABI(const char *name) {
+ // FIXME: __cxa_demangle aggressively insists on allocating memory.
+ // There's not much we can do about that, short of providing our
+ // own demangler (libc++abi's implementation could be adapted so that
+ // it does not allocate). For now, we just call it anyway, and we leak
+ // the returned value.
+ if (&__cxxabiv1::__cxa_demangle)
+ if (const char *demangled_name =
+ __cxxabiv1::__cxa_demangle(name, 0, 0, 0))
+ return demangled_name;
+
+ return name;
+}
+
+// As of now, there are no headers for the Swift runtime. Once they are
+// present, we will weakly link since we do not require Swift runtime to be
+// linked.
+typedef char *(*swift_demangle_ft)(const char *mangledName,
+ size_t mangledNameLength, char *outputBuffer,
+ size_t *outputBufferSize, uint32_t flags);
+static swift_demangle_ft swift_demangle_f;
+
+// This must not happen lazily at symbolication time, because dlsym uses
+// malloc and thread-local storage, which is not a good thing to do during
+// symbolication.
+static void InitializeSwiftDemangler() {
+ swift_demangle_f = (swift_demangle_ft)dlsym(RTLD_DEFAULT, "swift_demangle");
+ (void)dlerror(); // Cleanup error message in case of failure
+}
+
+// Attempts to demangle a Swift name. The demangler will return nullptr if a
+// non-Swift name is passed in.
+const char *DemangleSwift(const char *name) {
+ if (!name) return nullptr;
+
+ // Check if we are dealing with a Swift mangled name first.
+ if (name[0] != '_' || name[1] != 'T') {
+ return nullptr;
+ }
+
+ if (swift_demangle_f)
+ return swift_demangle_f(name, internal_strlen(name), 0, 0, 0);
+
+ return nullptr;
+}
+
+const char *DemangleSwiftAndCXX(const char *name) {
+ if (!name) return nullptr;
+ if (const char *swift_demangled_name = DemangleSwift(name))
+ return swift_demangled_name;
+ return DemangleCXXABI(name);
+}
+
+static bool CreateTwoHighNumberedPipes(int *infd_, int *outfd_) {
+ int *infd = NULL;
+ int *outfd = NULL;
+ // The client program may close its stdin and/or stdout and/or stderr
+ // thus allowing socketpair to reuse file descriptors 0, 1 or 2.
+ // In this case the communication between the forked processes may be
+ // broken if either the parent or the child tries to close or duplicate
+ // these descriptors. The loop below produces two pairs of file
+ // descriptors, each greater than 2 (stderr).
+ int sock_pair[5][2];
+ for (int i = 0; i < 5; i++) {
+ if (pipe(sock_pair[i]) == -1) {
+ for (int j = 0; j < i; j++) {
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ return false;
+ } else if (sock_pair[i][0] > 2 && sock_pair[i][1] > 2) {
+ if (infd == NULL) {
+ infd = sock_pair[i];
+ } else {
+ outfd = sock_pair[i];
+ for (int j = 0; j < i; j++) {
+ if (sock_pair[j] == infd) continue;
+ internal_close(sock_pair[j][0]);
+ internal_close(sock_pair[j][1]);
+ }
+ break;
+ }
+ }
+ }
+ CHECK(infd);
+ CHECK(outfd);
+ infd_[0] = infd[0];
+ infd_[1] = infd[1];
+ outfd_[0] = outfd[0];
+ outfd_[1] = outfd[1];
+ return true;
+}
+
+bool SymbolizerProcess::StartSymbolizerSubprocess() {
+ if (!FileExists(path_)) {
+ if (!reported_invalid_path_) {
+ Report("WARNING: invalid path to external symbolizer!\n");
+ reported_invalid_path_ = true;
+ }
+ return false;
+ }
+
+ int pid = -1;
+
+ int infd[2];
+ internal_memset(&infd, 0, sizeof(infd));
+ int outfd[2];
+ internal_memset(&outfd, 0, sizeof(outfd));
+ if (!CreateTwoHighNumberedPipes(infd, outfd)) {
+ Report("WARNING: Can't create a socket pair to start "
+ "external symbolizer (errno: %d)\n", errno);
+ return false;
+ }
+
+ if (use_forkpty_) {
+#if SANITIZER_MAC
+ fd_t fd = kInvalidFd;
+
+ // forkpty redirects stdout and stderr into a single stream, so we would
+ // receive error messages as standard replies. To avoid that, let's dup
+ // stderr and restore it in the child.
+ int saved_stderr = dup(STDERR_FILENO);
+ CHECK_GE(saved_stderr, 0);
+
+ // We only need one pipe, for stdin of the child.
+ close(outfd[0]);
+ close(outfd[1]);
+
+ // Use forkpty to disable buffering in the new terminal.
+ pid = internal_forkpty(&fd);
+ if (pid == -1) {
+ // forkpty() failed.
+ Report("WARNING: failed to fork external symbolizer (errno: %d)\n",
+ errno);
+ return false;
+ } else if (pid == 0) {
+ // Child subprocess.
+
+ // infd[0] is the child's reading end.
+ close(infd[1]);
+
+ // Set up stdin to read from the pipe.
+ CHECK_GE(dup2(infd[0], STDIN_FILENO), 0);
+ close(infd[0]);
+
+ // Restore stderr.
+ CHECK_GE(dup2(saved_stderr, STDERR_FILENO), 0);
+ close(saved_stderr);
+
+ const char *argv[kArgVMax];
+ GetArgV(path_, argv);
+ execv(path_, const_cast<char **>(&argv[0]));
+ internal__exit(1);
+ }
+
+ // Input for the child, infd[1] is our writing end.
+ output_fd_ = infd[1];
+ close(infd[0]);
+
+ // Continue execution in parent process.
+ input_fd_ = fd;
+
+ close(saved_stderr);
+
+ // Disable echo in the new terminal, disable CR.
+ struct termios termflags;
+ tcgetattr(fd, &termflags);
+ termflags.c_oflag &= ~ONLCR;
+ termflags.c_lflag &= ~ECHO;
+ tcsetattr(fd, TCSANOW, &termflags);
+#else // SANITIZER_MAC
+ UNIMPLEMENTED();
+#endif // SANITIZER_MAC
+ } else {
+ const char *argv[kArgVMax];
+ GetArgV(path_, argv);
+ pid = StartSubprocess(path_, argv, /* stdin */ outfd[0],
+ /* stdout */ infd[1]);
+ if (pid < 0) {
+ internal_close(infd[0]);
+ internal_close(outfd[1]);
+ return false;
+ }
+
+ input_fd_ = infd[0];
+ output_fd_ = outfd[1];
+ }
+
+ CHECK_GT(pid, 0);
+
+ // Check that symbolizer subprocess started successfully.
+ SleepForMillis(kSymbolizerStartupTimeMillis);
+ if (!IsProcessRunning(pid)) {
+ // Either waitpid failed, or child has already exited.
+ Report("WARNING: external symbolizer didn't start up correctly!\n");
+ return false;
+ }
+
+ return true;
+}
+
+class Addr2LineProcess : public SymbolizerProcess {
+ public:
+ Addr2LineProcess(const char *path, const char *module_name)
+ : SymbolizerProcess(path), module_name_(internal_strdup(module_name)) {}
+
+ const char *module_name() const { return module_name_; }
+
+ private:
+ void GetArgV(const char *path_to_binary,
+ const char *(&argv)[kArgVMax]) const override {
+ int i = 0;
+ argv[i++] = path_to_binary;
+ argv[i++] = "-iCfe";
+ argv[i++] = module_name_;
+ argv[i++] = nullptr;
+ }
+
+ bool ReachedEndOfOutput(const char *buffer, uptr length) const override;
+
+ bool ReadFromSymbolizer(char *buffer, uptr max_length) override {
+ if (!SymbolizerProcess::ReadFromSymbolizer(buffer, max_length))
+ return false;
+ // The returned buffer is empty when output is valid, but exceeds
+ // max_length.
+ if (*buffer == '\0')
+ return true;
+ // We should cut out output_terminator_ at the end of given buffer,
+ // appended by addr2line to mark the end of its meaningful output.
+ // We cannot scan buffer from it's beginning, because it is legal for it
+ // to start with output_terminator_ in case given offset is invalid. So,
+ // scanning from second character.
+ char *garbage = internal_strstr(buffer + 1, output_terminator_);
+ // This should never be NULL since buffer must end up with
+ // output_terminator_.
+ CHECK(garbage);
+ // Trim the buffer.
+ garbage[0] = '\0';
+ return true;
+ }
+
+ const char *module_name_; // Owned, leaked.
+ static const char output_terminator_[];
+};
+
+const char Addr2LineProcess::output_terminator_[] = "??\n??:0\n";
+
+bool Addr2LineProcess::ReachedEndOfOutput(const char *buffer,
+ uptr length) const {
+ const size_t kTerminatorLen = sizeof(output_terminator_) - 1;
+ // Skip, if we read just kTerminatorLen bytes, because Addr2Line output
+ // should consist at least of two pairs of lines:
+ // 1. First one, corresponding to given offset to be symbolized
+ // (may be equal to output_terminator_, if offset is not valid).
+ // 2. Second one for output_terminator_, itself to mark the end of output.
+ if (length <= kTerminatorLen) return false;
+ // Addr2Line output should end up with output_terminator_.
+ return !internal_memcmp(buffer + length - kTerminatorLen,
+ output_terminator_, kTerminatorLen);
+}
+
+class Addr2LinePool : public SymbolizerTool {
+ public:
+ explicit Addr2LinePool(const char *addr2line_path,
+ LowLevelAllocator *allocator)
+ : addr2line_path_(addr2line_path), allocator_(allocator) {
+ addr2line_pool_.reserve(16);
+ }
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
+ if (const char *buf =
+ SendCommand(stack->info.module, stack->info.module_offset)) {
+ ParseSymbolizePCOutput(buf, stack);
+ return true;
+ }
+ return false;
+ }
+
+ bool SymbolizeData(uptr addr, DataInfo *info) override {
+ return false;
+ }
+
+ private:
+ const char *SendCommand(const char *module_name, uptr module_offset) {
+ Addr2LineProcess *addr2line = 0;
+ for (uptr i = 0; i < addr2line_pool_.size(); ++i) {
+ if (0 ==
+ internal_strcmp(module_name, addr2line_pool_[i]->module_name())) {
+ addr2line = addr2line_pool_[i];
+ break;
+ }
+ }
+ if (!addr2line) {
+ addr2line =
+ new(*allocator_) Addr2LineProcess(addr2line_path_, module_name);
+ addr2line_pool_.push_back(addr2line);
+ }
+ CHECK_EQ(0, internal_strcmp(module_name, addr2line->module_name()));
+ char buffer[kBufferSize];
+ internal_snprintf(buffer, kBufferSize, "0x%zx\n0x%zx\n",
+ module_offset, dummy_address_);
+ return addr2line->SendCommand(buffer);
+ }
+
+ static const uptr kBufferSize = 64;
+ const char *addr2line_path_;
+ LowLevelAllocator *allocator_;
+ InternalMmapVector<Addr2LineProcess*> addr2line_pool_;
+ static const uptr dummy_address_ =
+ FIRST_32_SECOND_64(UINT32_MAX, UINT64_MAX);
+};
+
+#if SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+bool __sanitizer_symbolize_code(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+bool __sanitizer_symbolize_data(const char *ModuleName, u64 ModuleOffset,
+ char *Buffer, int MaxLength);
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_symbolize_flush();
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
+ int MaxLength);
+} // extern "C"
+
+class InternalSymbolizer : public SymbolizerTool {
+ public:
+ static InternalSymbolizer *get(LowLevelAllocator *alloc) {
+ if (__sanitizer_symbolize_code != 0 &&
+ __sanitizer_symbolize_data != 0) {
+ return new(*alloc) InternalSymbolizer();
+ }
+ return 0;
+ }
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override {
+ bool result = __sanitizer_symbolize_code(
+ stack->info.module, stack->info.module_offset, buffer_, kBufferSize);
+ if (result) ParseSymbolizePCOutput(buffer_, stack);
+ return result;
+ }
+
+ bool SymbolizeData(uptr addr, DataInfo *info) override {
+ bool result = __sanitizer_symbolize_data(info->module, info->module_offset,
+ buffer_, kBufferSize);
+ if (result) {
+ ParseSymbolizeDataOutput(buffer_, info);
+ info->start += (addr - info->module_offset); // Add the base address.
+ }
+ return result;
+ }
+
+ void Flush() override {
+ if (__sanitizer_symbolize_flush)
+ __sanitizer_symbolize_flush();
+ }
+
+ const char *Demangle(const char *name) override {
+ if (__sanitizer_symbolize_demangle) {
+ for (uptr res_length = 1024;
+ res_length <= InternalSizeClassMap::kMaxSize;) {
+ char *res_buff = static_cast<char*>(InternalAlloc(res_length));
+ uptr req_length =
+ __sanitizer_symbolize_demangle(name, res_buff, res_length);
+ if (req_length > res_length) {
+ res_length = req_length + 1;
+ InternalFree(res_buff);
+ continue;
+ }
+ return res_buff;
+ }
+ }
+ return name;
+ }
+
+ private:
+ InternalSymbolizer() { }
+
+ static const int kBufferSize = 16 * 1024;
+ char buffer_[kBufferSize];
+};
+#else // SANITIZER_SUPPORTS_WEAK_HOOKS
+
+class InternalSymbolizer : public SymbolizerTool {
+ public:
+ static InternalSymbolizer *get(LowLevelAllocator *alloc) { return 0; }
+};
+
+#endif // SANITIZER_SUPPORTS_WEAK_HOOKS
+
+const char *Symbolizer::PlatformDemangle(const char *name) {
+ return DemangleSwiftAndCXX(name);
+}
+
+static SymbolizerTool *ChooseExternalSymbolizer(LowLevelAllocator *allocator) {
+ const char *path = common_flags()->external_symbolizer_path;
+ const char *binary_name = path ? StripModuleName(path) : "";
+ if (path && path[0] == '\0') {
+ VReport(2, "External symbolizer is explicitly disabled.\n");
+ return nullptr;
+ } else if (!internal_strcmp(binary_name, "llvm-symbolizer")) {
+ VReport(2, "Using llvm-symbolizer at user-specified path: %s\n", path);
+ return new(*allocator) LLVMSymbolizer(path, allocator);
+ } else if (!internal_strcmp(binary_name, "atos")) {
+#if SANITIZER_MAC
+ VReport(2, "Using atos at user-specified path: %s\n", path);
+ return new(*allocator) AtosSymbolizer(path, allocator);
+#else // SANITIZER_MAC
+ Report("ERROR: Using `atos` is only supported on Darwin.\n");
+ Die();
+#endif // SANITIZER_MAC
+ } else if (!internal_strcmp(binary_name, "addr2line")) {
+ VReport(2, "Using addr2line at user-specified path: %s\n", path);
+ return new(*allocator) Addr2LinePool(path, allocator);
+ } else if (path) {
+ Report("ERROR: External symbolizer path is set to '%s' which isn't "
+ "a known symbolizer. Please set the path to the llvm-symbolizer "
+ "binary or other known tool.\n", path);
+ Die();
+ }
+
+ // Otherwise symbolizer program is unknown, let's search $PATH
+ CHECK(path == nullptr);
+#if SANITIZER_MAC
+ if (const char *found_path = FindPathToBinary("atos")) {
+ VReport(2, "Using atos found at: %s\n", found_path);
+ return new(*allocator) AtosSymbolizer(found_path, allocator);
+ }
+#endif // SANITIZER_MAC
+ if (const char *found_path = FindPathToBinary("llvm-symbolizer")) {
+ VReport(2, "Using llvm-symbolizer found at: %s\n", found_path);
+ return new(*allocator) LLVMSymbolizer(found_path, allocator);
+ }
+ if (common_flags()->allow_addr2line) {
+ if (const char *found_path = FindPathToBinary("addr2line")) {
+ VReport(2, "Using addr2line found at: %s\n", found_path);
+ return new(*allocator) Addr2LinePool(found_path, allocator);
+ }
+ }
+ return nullptr;
+}
+
+static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
+ LowLevelAllocator *allocator) {
+ if (!common_flags()->symbolize) {
+ VReport(2, "Symbolizer is disabled.\n");
+ return;
+ }
+ if (IsAllocatorOutOfMemory()) {
+ VReport(2, "Cannot use internal symbolizer: out of memory\n");
+ } else if (SymbolizerTool *tool = InternalSymbolizer::get(allocator)) {
+ VReport(2, "Using internal symbolizer.\n");
+ list->push_back(tool);
+ return;
+ }
+ if (SymbolizerTool *tool = LibbacktraceSymbolizer::get(allocator)) {
+ VReport(2, "Using libbacktrace symbolizer.\n");
+ list->push_back(tool);
+ return;
+ }
+
+ if (SymbolizerTool *tool = ChooseExternalSymbolizer(allocator)) {
+ list->push_back(tool);
+ }
+
+#if SANITIZER_MAC
+ VReport(2, "Using dladdr symbolizer.\n");
+ list->push_back(new(*allocator) DlAddrSymbolizer());
+#endif // SANITIZER_MAC
+}
+
+Symbolizer *Symbolizer::PlatformInit() {
+ IntrusiveList<SymbolizerTool> list;
+ list.clear();
+ ChooseSymbolizerTools(&list, &symbolizer_allocator_);
+ return new(symbolizer_allocator_) Symbolizer(list);
+}
+
+void Symbolizer::LateInitialize() {
+ Symbolizer::GetOrInit();
+ InitializeSwiftDemangler();
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_POSIX
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_posix_libcdep.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_report.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_report.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_report.cc (revision 351984)
@@ -0,0 +1,283 @@
+//===-- sanitizer_symbolizer_report.cc ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file is shared between AddressSanitizer and other sanitizer run-time
+/// libraries and implements symbolized reports related functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_flags.h"
+#include "sanitizer_procmaps.h"
+#include "sanitizer_report_decorator.h"
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_stacktrace_printer.h"
+#include "sanitizer_symbolizer.h"
+
+#if SANITIZER_POSIX
+# include "sanitizer_posix.h"
+# include <sys/mman.h>
+#endif
+
+namespace __sanitizer {
+
+#if !SANITIZER_GO
+void ReportErrorSummary(const char *error_type, const AddressInfo &info,
+ const char *alt_tool_name) {
+ if (!common_flags()->print_summary) return;
+ InternalScopedString buff(kMaxSummaryLength);
+ buff.append("%s ", error_type);
+ RenderFrame(&buff, "%L %F", 0, info, common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ ReportErrorSummary(buff.data(), alt_tool_name);
+}
+#endif
+
+#if !SANITIZER_FUCHSIA
+
+bool ReportFile::SupportsColors() {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ return SupportsColoredOutput(fd);
+}
+
+static INLINE bool ReportSupportsColors() {
+ return report_file.SupportsColors();
+}
+
+#else // SANITIZER_FUCHSIA
+
+// Fuchsia's logs always go through post-processing that handles colorization.
+static INLINE bool ReportSupportsColors() { return true; }
+
+#endif // !SANITIZER_FUCHSIA
+
+bool ColorizeReports() {
+ // FIXME: Add proper Windows support to AnsiColorDecorator and re-enable color
+ // printing on Windows.
+ if (SANITIZER_WINDOWS)
+ return false;
+
+ const char *flag = common_flags()->color;
+ return internal_strcmp(flag, "always") == 0 ||
+ (internal_strcmp(flag, "auto") == 0 && ReportSupportsColors());
+}
+
+void ReportErrorSummary(const char *error_type, const StackTrace *stack,
+ const char *alt_tool_name) {
+#if !SANITIZER_GO
+ if (!common_flags()->print_summary)
+ return;
+ if (stack->size == 0) {
+ ReportErrorSummary(error_type);
+ return;
+ }
+ // Currently, we include the first stack frame into the report summary.
+ // Maybe sometimes we need to choose another frame (e.g. skip memcpy/etc).
+ uptr pc = StackTrace::GetPreviousInstructionPc(stack->trace[0]);
+ SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc);
+ ReportErrorSummary(error_type, frame->info, alt_tool_name);
+ frame->ClearAll();
+#endif
+}
+
+void ReportMmapWriteExec(int prot) {
+#if SANITIZER_POSIX && (!SANITIZER_GO && !SANITIZER_ANDROID)
+ if ((prot & (PROT_WRITE | PROT_EXEC)) != (PROT_WRITE | PROT_EXEC))
+ return;
+
+ ScopedErrorReportLock l;
+ SanitizerCommonDecorator d;
+
+ InternalMmapVector<BufferedStackTrace> stack_buffer(1);
+ BufferedStackTrace *stack = stack_buffer.data();
+ stack->Reset();
+ uptr top = 0;
+ uptr bottom = 0;
+ GET_CALLER_PC_BP_SP;
+ (void)sp;
+ bool fast = common_flags()->fast_unwind_on_fatal;
+ if (StackTrace::WillUseFastUnwind(fast)) {
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ stack->Unwind(kStackTraceMax, pc, bp, nullptr, top, bottom, true);
+ } else
+ stack->Unwind(kStackTraceMax, pc, 0, nullptr, 0, 0, false);
+
+ Printf("%s", d.Warning());
+ Report("WARNING: %s: writable-executable page usage\n", SanitizerToolName);
+ Printf("%s", d.Default());
+
+ stack->Print();
+ ReportErrorSummary("w-and-x-usage", stack);
+#endif
+}
+
+#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_GO
+void StartReportDeadlySignal() {
+ // Write the first message using fd=2, just in case.
+ // It may actually fail to write in case stderr is closed.
+ CatastrophicErrorWrite(SanitizerToolName, internal_strlen(SanitizerToolName));
+ static const char kDeadlySignal[] = ":DEADLYSIGNAL\n";
+ CatastrophicErrorWrite(kDeadlySignal, sizeof(kDeadlySignal) - 1);
+}
+
+static void MaybeReportNonExecRegion(uptr pc) {
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if (pc >= segment.start && pc < segment.end && !segment.IsExecutable())
+ Report("Hint: PC is at a non-executable region. Maybe a wild jump?\n");
+ }
+#endif
+}
+
+static void PrintMemoryByte(InternalScopedString *str, const char *before,
+ u8 byte) {
+ SanitizerCommonDecorator d;
+ str->append("%s%s%x%x%s ", before, d.MemoryByte(), byte >> 4, byte & 15,
+ d.Default());
+}
+
+static void MaybeDumpInstructionBytes(uptr pc) {
+ if (!common_flags()->dump_instruction_bytes || (pc < GetPageSizeCached()))
+ return;
+ InternalScopedString str(1024);
+ str.append("First 16 instruction bytes at pc: ");
+ if (IsAccessibleMemoryRange(pc, 16)) {
+ for (int i = 0; i < 16; ++i) {
+ PrintMemoryByte(&str, "", ((u8 *)pc)[i]);
+ }
+ str.append("\n");
+ } else {
+ str.append("unaccessible\n");
+ }
+ Report("%s", str.data());
+}
+
+static void MaybeDumpRegisters(void *context) {
+ if (!common_flags()->dump_registers) return;
+ SignalContext::DumpAllRegisters(context);
+}
+
+static void ReportStackOverflowImpl(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {
+ SanitizerCommonDecorator d;
+ Printf("%s", d.Warning());
+ static const char kDescription[] = "stack-overflow";
+ Report("ERROR: %s: %s on address %p (pc %p bp %p sp %p T%d)\n",
+ SanitizerToolName, kDescription, (void *)sig.addr, (void *)sig.pc,
+ (void *)sig.bp, (void *)sig.sp, tid);
+ Printf("%s", d.Default());
+ InternalMmapVector<BufferedStackTrace> stack_buffer(1);
+ BufferedStackTrace *stack = stack_buffer.data();
+ stack->Reset();
+ unwind(sig, unwind_context, stack);
+ stack->Print();
+ ReportErrorSummary(kDescription, stack);
+}
+
+static void ReportDeadlySignalImpl(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {
+ SanitizerCommonDecorator d;
+ Printf("%s", d.Warning());
+ const char *description = sig.Describe();
+ Report("ERROR: %s: %s on unknown address %p (pc %p bp %p sp %p T%d)\n",
+ SanitizerToolName, description, (void *)sig.addr, (void *)sig.pc,
+ (void *)sig.bp, (void *)sig.sp, tid);
+ Printf("%s", d.Default());
+ if (sig.pc < GetPageSizeCached())
+ Report("Hint: pc points to the zero page.\n");
+ if (sig.is_memory_access) {
+ const char *access_type =
+ sig.write_flag == SignalContext::WRITE
+ ? "WRITE"
+ : (sig.write_flag == SignalContext::READ ? "READ" : "UNKNOWN");
+ Report("The signal is caused by a %s memory access.\n", access_type);
+ if (sig.addr < GetPageSizeCached())
+ Report("Hint: address points to the zero page.\n");
+ }
+ MaybeReportNonExecRegion(sig.pc);
+ InternalMmapVector<BufferedStackTrace> stack_buffer(1);
+ BufferedStackTrace *stack = stack_buffer.data();
+ stack->Reset();
+ unwind(sig, unwind_context, stack);
+ stack->Print();
+ MaybeDumpInstructionBytes(sig.pc);
+ MaybeDumpRegisters(sig.context);
+ Printf("%s can not provide additional info.\n", SanitizerToolName);
+ ReportErrorSummary(description, stack);
+}
+
+void ReportDeadlySignal(const SignalContext &sig, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {
+ if (sig.IsStackOverflow())
+ ReportStackOverflowImpl(sig, tid, unwind, unwind_context);
+ else
+ ReportDeadlySignalImpl(sig, tid, unwind, unwind_context);
+}
+
+void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
+ UnwindSignalStackCallbackType unwind,
+ const void *unwind_context) {
+ StartReportDeadlySignal();
+ ScopedErrorReportLock rl;
+ SignalContext sig(siginfo, context);
+ ReportDeadlySignal(sig, tid, unwind, unwind_context);
+ Report("ABORTING\n");
+ Die();
+}
+
+#endif // !SANITIZER_FUCHSIA && !SANITIZER_GO
+
+static atomic_uintptr_t reporting_thread = {0};
+static StaticSpinMutex CommonSanitizerReportMutex;
+
+ScopedErrorReportLock::ScopedErrorReportLock() {
+ uptr current = GetThreadSelf();
+ for (;;) {
+ uptr expected = 0;
+ if (atomic_compare_exchange_strong(&reporting_thread, &expected, current,
+ memory_order_relaxed)) {
+ // We've claimed reporting_thread so proceed.
+ CommonSanitizerReportMutex.Lock();
+ return;
+ }
+
+ if (expected == current) {
+ // This is either asynch signal or nested error during error reporting.
+ // Fail simple to avoid deadlocks in Report().
+
+ // Can't use Report() here because of potential deadlocks in nested
+ // signal handlers.
+ CatastrophicErrorWrite(SanitizerToolName,
+ internal_strlen(SanitizerToolName));
+ static const char msg[] = ": nested bug in the same thread, aborting.\n";
+ CatastrophicErrorWrite(msg, sizeof(msg) - 1);
+
+ internal__exit(common_flags()->exitcode);
+ }
+
+ internal_sched_yield();
+ }
+}
+
+ScopedErrorReportLock::~ScopedErrorReportLock() {
+ CommonSanitizerReportMutex.Unlock();
+ atomic_store_relaxed(&reporting_thread, 0);
+}
+
+void ScopedErrorReportLock::CheckLocked() {
+ CommonSanitizerReportMutex.CheckLocked();
+}
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_report.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_rtems.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_rtems.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_rtems.h (revision 351984)
@@ -0,0 +1,40 @@
+//===-- sanitizer_symbolizer_rtems.h -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between various sanitizers' runtime libraries.
+//
+// Define RTEMS's string formats and limits for the markup symbolizer.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SYMBOLIZER_RTEMS_H
+#define SANITIZER_SYMBOLIZER_RTEMS_H
+
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// The Myriad RTEMS symbolizer currently only parses backtrace lines,
+// so use a format that the symbolizer understands. For other
+// markups, keep them the same as the Fuchsia's.
+
+// This is used by UBSan for type names, and by ASan for global variable names.
+constexpr const char *kFormatDemangle = "{{{symbol:%s}}}";
+constexpr uptr kFormatDemangleMax = 1024; // Arbitrary.
+
+// Function name or equivalent from PC location.
+constexpr const char *kFormatFunction = "{{{pc:%p}}}";
+constexpr uptr kFormatFunctionMax = 64; // More than big enough for 64-bit hex.
+
+// Global variable name or equivalent from data memory address.
+constexpr const char *kFormatData = "{{{data:%p}}}";
+
+// One frame in a backtrace (printed on a line by itself).
+constexpr const char *kFormatFrame = " [%u] IP: %p";
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_SYMBOLIZER_RTEMS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_rtems.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_win.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_win.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_symbolizer_win.cc (revision 351984)
@@ -0,0 +1,318 @@
+//===-- sanitizer_symbolizer_win.cc ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries.
+// Windows-specific implementation of symbolizer parts.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#include "sanitizer_dbghelp.h"
+#include "sanitizer_symbolizer_internal.h"
+
+namespace __sanitizer {
+
+decltype(::StackWalk64) *StackWalk64;
+decltype(::SymCleanup) *SymCleanup;
+decltype(::SymFromAddr) *SymFromAddr;
+decltype(::SymFunctionTableAccess64) *SymFunctionTableAccess64;
+decltype(::SymGetLineFromAddr64) *SymGetLineFromAddr64;
+decltype(::SymGetModuleBase64) *SymGetModuleBase64;
+decltype(::SymGetSearchPathW) *SymGetSearchPathW;
+decltype(::SymInitialize) *SymInitialize;
+decltype(::SymSetOptions) *SymSetOptions;
+decltype(::SymSetSearchPathW) *SymSetSearchPathW;
+decltype(::UnDecorateSymbolName) *UnDecorateSymbolName;
+
+namespace {
+
+class WinSymbolizerTool : public SymbolizerTool {
+ public:
+ // The constructor is provided to avoid synthesized memsets.
+ WinSymbolizerTool() {}
+
+ bool SymbolizePC(uptr addr, SymbolizedStack *stack) override;
+ bool SymbolizeData(uptr addr, DataInfo *info) override {
+ return false;
+ }
+ const char *Demangle(const char *name) override;
+};
+
+bool is_dbghelp_initialized = false;
+
+bool TrySymInitialize() {
+ SymSetOptions(SYMOPT_DEFERRED_LOADS | SYMOPT_UNDNAME | SYMOPT_LOAD_LINES);
+ return SymInitialize(GetCurrentProcess(), 0, TRUE);
+ // FIXME: We don't call SymCleanup() on exit yet - should we?
+}
+
+} // namespace
+
+// Initializes DbgHelp library, if it's not yet initialized. Calls to this
+// function should be synchronized with respect to other calls to DbgHelp API
+// (e.g. from WinSymbolizerTool).
+void InitializeDbgHelpIfNeeded() {
+ if (is_dbghelp_initialized)
+ return;
+
+ HMODULE dbghelp = LoadLibraryA("dbghelp.dll");
+ CHECK(dbghelp && "failed to load dbghelp.dll");
+
+#define DBGHELP_IMPORT(name) \
+ do { \
+ name = \
+ reinterpret_cast<decltype(::name) *>(GetProcAddress(dbghelp, #name)); \
+ CHECK(name != nullptr); \
+ } while (0)
+ DBGHELP_IMPORT(StackWalk64);
+ DBGHELP_IMPORT(SymCleanup);
+ DBGHELP_IMPORT(SymFromAddr);
+ DBGHELP_IMPORT(SymFunctionTableAccess64);
+ DBGHELP_IMPORT(SymGetLineFromAddr64);
+ DBGHELP_IMPORT(SymGetModuleBase64);
+ DBGHELP_IMPORT(SymGetSearchPathW);
+ DBGHELP_IMPORT(SymInitialize);
+ DBGHELP_IMPORT(SymSetOptions);
+ DBGHELP_IMPORT(SymSetSearchPathW);
+ DBGHELP_IMPORT(UnDecorateSymbolName);
+#undef DBGHELP_IMPORT
+
+ if (!TrySymInitialize()) {
+ // OK, maybe the client app has called SymInitialize already.
+ // That's a bit unfortunate for us as all the DbgHelp functions are
+ // single-threaded and we can't coordinate with the app.
+ // FIXME: Can we stop the other threads at this point?
+ // Anyways, we have to reconfigure stuff to make sure that SymInitialize
+ // has all the appropriate options set.
+ // Cross our fingers and reinitialize DbgHelp.
+ Report("*** WARNING: Failed to initialize DbgHelp! ***\n");
+ Report("*** Most likely this means that the app is already ***\n");
+ Report("*** using DbgHelp, possibly with incompatible flags. ***\n");
+ Report("*** Due to technical reasons, symbolization might crash ***\n");
+ Report("*** or produce wrong results. ***\n");
+ SymCleanup(GetCurrentProcess());
+ TrySymInitialize();
+ }
+ is_dbghelp_initialized = true;
+
+ // When an executable is run from a location different from the one where it
+ // was originally built, we may not see the nearby PDB files.
+ // To work around this, let's append the directory of the main module
+ // to the symbol search path. All the failures below are not fatal.
+ const size_t kSymPathSize = 2048;
+ static wchar_t path_buffer[kSymPathSize + 1 + MAX_PATH];
+ if (!SymGetSearchPathW(GetCurrentProcess(), path_buffer, kSymPathSize)) {
+ Report("*** WARNING: Failed to SymGetSearchPathW ***\n");
+ return;
+ }
+ size_t sz = wcslen(path_buffer);
+ if (sz) {
+ CHECK_EQ(0, wcscat_s(path_buffer, L";"));
+ sz++;
+ }
+ DWORD res = GetModuleFileNameW(NULL, path_buffer + sz, MAX_PATH);
+ if (res == 0 || res == MAX_PATH) {
+ Report("*** WARNING: Failed to getting the EXE directory ***\n");
+ return;
+ }
+ // Write the zero character in place of the last backslash to get the
+ // directory of the main module at the end of path_buffer.
+ wchar_t *last_bslash = wcsrchr(path_buffer + sz, L'\\');
+ CHECK_NE(last_bslash, 0);
+ *last_bslash = L'\0';
+ if (!SymSetSearchPathW(GetCurrentProcess(), path_buffer)) {
+ Report("*** WARNING: Failed to SymSetSearchPathW\n");
+ return;
+ }
+}
+
+bool WinSymbolizerTool::SymbolizePC(uptr addr, SymbolizedStack *frame) {
+ InitializeDbgHelpIfNeeded();
+
+ // See http://msdn.microsoft.com/en-us/library/ms680578(VS.85).aspx
+ char buffer[sizeof(SYMBOL_INFO) + MAX_SYM_NAME * sizeof(CHAR)];
+ PSYMBOL_INFO symbol = (PSYMBOL_INFO)buffer;
+ symbol->SizeOfStruct = sizeof(SYMBOL_INFO);
+ symbol->MaxNameLen = MAX_SYM_NAME;
+ DWORD64 offset = 0;
+ BOOL got_objname = SymFromAddr(GetCurrentProcess(),
+ (DWORD64)addr, &offset, symbol);
+ if (!got_objname)
+ return false;
+
+ DWORD unused;
+ IMAGEHLP_LINE64 line_info;
+ line_info.SizeOfStruct = sizeof(IMAGEHLP_LINE64);
+ BOOL got_fileline = SymGetLineFromAddr64(GetCurrentProcess(), (DWORD64)addr,
+ &unused, &line_info);
+ frame->info.function = internal_strdup(symbol->Name);
+ frame->info.function_offset = (uptr)offset;
+ if (got_fileline) {
+ frame->info.file = internal_strdup(line_info.FileName);
+ frame->info.line = line_info.LineNumber;
+ }
+ // Only consider this a successful symbolization attempt if we got file info.
+ // Otherwise, try llvm-symbolizer.
+ return got_fileline;
+}
+
+const char *WinSymbolizerTool::Demangle(const char *name) {
+ CHECK(is_dbghelp_initialized);
+ static char demangle_buffer[1000];
+ if (name[0] == '\01' &&
+ UnDecorateSymbolName(name + 1, demangle_buffer, sizeof(demangle_buffer),
+ UNDNAME_NAME_ONLY))
+ return demangle_buffer;
+ else
+ return name;
+}
+
+const char *Symbolizer::PlatformDemangle(const char *name) {
+ return name;
+}
+
+namespace {
+struct ScopedHandle {
+ ScopedHandle() : h_(nullptr) {}
+ explicit ScopedHandle(HANDLE h) : h_(h) {}
+ ~ScopedHandle() {
+ if (h_)
+ ::CloseHandle(h_);
+ }
+ HANDLE get() { return h_; }
+ HANDLE *receive() { return &h_; }
+ HANDLE release() {
+ HANDLE h = h_;
+ h_ = nullptr;
+ return h;
+ }
+ HANDLE h_;
+};
+} // namespace
+
+bool SymbolizerProcess::StartSymbolizerSubprocess() {
+ // Create inherited pipes for stdin and stdout.
+ ScopedHandle stdin_read, stdin_write;
+ ScopedHandle stdout_read, stdout_write;
+ SECURITY_ATTRIBUTES attrs;
+ attrs.nLength = sizeof(SECURITY_ATTRIBUTES);
+ attrs.bInheritHandle = TRUE;
+ attrs.lpSecurityDescriptor = nullptr;
+ if (!::CreatePipe(stdin_read.receive(), stdin_write.receive(), &attrs, 0) ||
+ !::CreatePipe(stdout_read.receive(), stdout_write.receive(), &attrs, 0)) {
+ VReport(2, "WARNING: %s CreatePipe failed (error code: %d)\n",
+ SanitizerToolName, path_, GetLastError());
+ return false;
+ }
+
+ // Don't inherit the writing end of stdin or the reading end of stdout.
+ if (!SetHandleInformation(stdin_write.get(), HANDLE_FLAG_INHERIT, 0) ||
+ !SetHandleInformation(stdout_read.get(), HANDLE_FLAG_INHERIT, 0)) {
+ VReport(2, "WARNING: %s SetHandleInformation failed (error code: %d)\n",
+ SanitizerToolName, path_, GetLastError());
+ return false;
+ }
+
+ // Compute the command line. Wrap double quotes around everything.
+ const char *argv[kArgVMax];
+ GetArgV(path_, argv);
+ InternalScopedString command_line(kMaxPathLength * 3);
+ for (int i = 0; argv[i]; i++) {
+ const char *arg = argv[i];
+ int arglen = internal_strlen(arg);
+ // Check that tool command lines are simple and that complete escaping is
+ // unnecessary.
+ CHECK(!internal_strchr(arg, '"') && "quotes in args unsupported");
+ CHECK(!internal_strstr(arg, "\\\\") &&
+ "double backslashes in args unsupported");
+ CHECK(arglen > 0 && arg[arglen - 1] != '\\' &&
+ "args ending in backslash and empty args unsupported");
+ command_line.append("\"%s\" ", arg);
+ }
+ VReport(3, "Launching symbolizer command: %s\n", command_line.data());
+
+ // Launch llvm-symbolizer with stdin and stdout redirected.
+ STARTUPINFOA si;
+ memset(&si, 0, sizeof(si));
+ si.cb = sizeof(si);
+ si.dwFlags |= STARTF_USESTDHANDLES;
+ si.hStdInput = stdin_read.get();
+ si.hStdOutput = stdout_write.get();
+ PROCESS_INFORMATION pi;
+ memset(&pi, 0, sizeof(pi));
+ if (!CreateProcessA(path_, // Executable
+ command_line.data(), // Command line
+ nullptr, // Process handle not inheritable
+ nullptr, // Thread handle not inheritable
+ TRUE, // Set handle inheritance to TRUE
+ 0, // Creation flags
+ nullptr, // Use parent's environment block
+ nullptr, // Use parent's starting directory
+ &si, &pi)) {
+ VReport(2, "WARNING: %s failed to create process for %s (error code: %d)\n",
+ SanitizerToolName, path_, GetLastError());
+ return false;
+ }
+
+ // Process creation succeeded, so transfer handle ownership into the fields.
+ input_fd_ = stdout_read.release();
+ output_fd_ = stdin_write.release();
+
+ // The llvm-symbolizer process is responsible for quitting itself when the
+ // stdin pipe is closed, so we don't need these handles. Close them to prevent
+ // leaks. If we ever want to try to kill the symbolizer process from the
+ // parent, we'll want to hang on to these handles.
+ CloseHandle(pi.hProcess);
+ CloseHandle(pi.hThread);
+ return true;
+}
+
+static void ChooseSymbolizerTools(IntrusiveList<SymbolizerTool> *list,
+ LowLevelAllocator *allocator) {
+ if (!common_flags()->symbolize) {
+ VReport(2, "Symbolizer is disabled.\n");
+ return;
+ }
+
+ // Add llvm-symbolizer in case the binary has dwarf.
+ const char *user_path = common_flags()->external_symbolizer_path;
+ const char *path =
+ user_path ? user_path : FindPathToBinary("llvm-symbolizer.exe");
+ if (path) {
+ VReport(2, "Using llvm-symbolizer at %spath: %s\n",
+ user_path ? "user-specified " : "", path);
+ list->push_back(new(*allocator) LLVMSymbolizer(path, allocator));
+ } else {
+ if (user_path && user_path[0] == '\0') {
+ VReport(2, "External symbolizer is explicitly disabled.\n");
+ } else {
+ VReport(2, "External symbolizer is not present.\n");
+ }
+ }
+
+ // Add the dbghelp based symbolizer.
+ list->push_back(new(*allocator) WinSymbolizerTool());
+}
+
+Symbolizer *Symbolizer::PlatformInit() {
+ IntrusiveList<SymbolizerTool> list;
+ list.clear();
+ ChooseSymbolizerTools(&list, &symbolizer_allocator_);
+
+ return new(symbolizer_allocator_) Symbolizer(list);
+}
+
+void Symbolizer::LateInitialize() {
+ Symbolizer::GetOrInit();
+}
+
+} // namespace __sanitizer
+
+#endif // _WIN32
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_generic.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_generic.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_generic.inc (revision 351984)
@@ -0,0 +1,38 @@
+//===-- sanitizer_syscall_generic.inc ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Generic implementations of internal_syscall* and internal_iserror.
+//
+//===----------------------------------------------------------------------===//
+
+// NetBSD uses libc calls directly
+#if !SANITIZER_NETBSD
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_OPENBSD || SANITIZER_SOLARIS
+# define SYSCALL(name) SYS_ ## name
+#else
+# define SYSCALL(name) __NR_ ## name
+#endif
+
+#if defined(__x86_64__) && (SANITIZER_FREEBSD || SANITIZER_MAC)
+# define internal_syscall __syscall
+# else
+# define internal_syscall syscall
+#endif
+
+#endif
+
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval == (uptr)-1) {
+ if (rverrno)
+ *rverrno = errno;
+ return true;
+ } else {
+ return false;
+ }
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_aarch64.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_aarch64.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_aarch64.inc (revision 351984)
@@ -0,0 +1,137 @@
+//===-- sanitizer_syscall_linux_aarch64.inc --------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/aarch64.
+//
+//===----------------------------------------------------------------------===//
+
+#define SYSCALL(name) __NR_ ## name
+
+static uptr __internal_syscall(u64 nr) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0");
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall0(n) \
+ (__internal_syscall)(n)
+
+static uptr __internal_syscall(u64 nr, u64 arg1) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall1(n, a1) \
+ (__internal_syscall)(n, (u64)(a1))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall2(n, a1, a2) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall3(n, a1, a2, a3) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ register u64 x3 asm("x3") = arg4;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4, long arg5) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ register u64 x3 asm("x3") = arg4;
+ register u64 x4 asm("x4") = arg5;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5))
+
+static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
+ u64 arg4, long arg5, long arg6) {
+ register u64 x8 asm("x8") = nr;
+ register u64 x0 asm("x0") = arg1;
+ register u64 x1 asm("x1") = arg2;
+ register u64 x2 asm("x2") = arg3;
+ register u64 x3 asm("x3") = arg4;
+ register u64 x4 asm("x4") = arg5;
+ register u64 x5 asm("x5") = arg6;
+ asm volatile("svc 0"
+ : "=r"(x0)
+ : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5)
+ : "memory", "cc");
+ return x0;
+}
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
+ (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u64)(a5), (long)(a6))
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid cobbler errno.
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_aarch64.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_arm.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_arm.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_arm.inc (revision 351984)
@@ -0,0 +1,137 @@
+//===-- sanitizer_syscall_linux_arm.inc -------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/arm.
+//
+//===----------------------------------------------------------------------===//
+
+#define SYSCALL(name) __NR_ ## name
+
+static uptr __internal_syscall(u32 nr) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0");
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall0(n) \
+ (__internal_syscall)(n)
+
+static uptr __internal_syscall(u32 nr, u32 arg1) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall1(n, a1) \
+ (__internal_syscall)(n, (u32)(a1))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall2(n, a1, a2) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ register u32 r2 asm("r2") = arg3;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1), "r"(r2)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall3(n, a1, a2, a3) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,
+ u32 arg4) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ register u32 r2 asm("r2") = arg3;
+ register u32 r3 asm("r3") = arg4;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1), "r"(r2), "r"(r3)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall4(n, a1, a2, a3, a4) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,
+ u32 arg4, long arg5) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ register u32 r2 asm("r2") = arg3;
+ register u32 r3 asm("r3") = arg4;
+ register u32 r4 asm("r4") = arg5;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u32)(a5))
+
+static uptr __internal_syscall(u32 nr, u32 arg1, long arg2, long arg3,
+ u32 arg4, long arg5, long arg6) {
+ register u32 r8 asm("r7") = nr;
+ register u32 r0 asm("r0") = arg1;
+ register u32 r1 asm("r1") = arg2;
+ register u32 r2 asm("r2") = arg3;
+ register u32 r3 asm("r3") = arg4;
+ register u32 r4 asm("r4") = arg5;
+ register u32 r5 asm("r5") = arg6;
+ asm volatile("swi #0"
+ : "=r"(r0)
+ : "r"(r8), "0"(r0), "r"(r1), "r"(r2), "r"(r3), "r"(r4), "r"(r5)
+ : "memory", "cc");
+ return r0;
+}
+#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
+ (__internal_syscall)(n, (u32)(a1), (long)(a2), (long)(a3), (long)(a4), \
+ (u32)(a5), (long)(a6))
+
+#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
+#define __SYSCALL_NARGS(...) \
+ __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
+#define __SYSCALL_CONCAT_X(a, b) a##b
+#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
+#define __SYSCALL_DISP(b, ...) \
+ __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)
+
+// Helper function used to avoid cobbler errno.
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_arm.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscall_linux_x86_64.inc (revision 351984)
@@ -0,0 +1,90 @@
+//===-- sanitizer_syscall_linux_x86_64.inc ----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementations of internal_syscall and internal_iserror for Linux/x86_64.
+//
+//===----------------------------------------------------------------------===//
+
+#define SYSCALL(name) __NR_ ## name
+
+static uptr internal_syscall(u64 nr) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr) : "rcx", "r11",
+ "memory", "cc");
+ return retval;
+}
+
+template <typename T1>
+static uptr internal_syscall(u64 nr, T1 arg1) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1) :
+ "rcx", "r11", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2) : "rcx", "r11", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3) {
+ u64 retval;
+ asm volatile("syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3) : "rcx", "r11", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3, typename T4>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4) {
+ u64 retval;
+ asm volatile("mov %5, %%r10;"
+ "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4) :
+ "rcx", "r11", "r10", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,
+ T5 arg5) {
+ u64 retval;
+ asm volatile("mov %5, %%r10;"
+ "mov %6, %%r8;"
+ "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5) :
+ "rcx", "r11", "r10", "r8", "memory", "cc");
+ return retval;
+}
+
+template <typename T1, typename T2, typename T3, typename T4, typename T5,
+ typename T6>
+static uptr internal_syscall(u64 nr, T1 arg1, T2 arg2, T3 arg3, T4 arg4,
+ T5 arg5, T6 arg6) {
+ u64 retval;
+ asm volatile("mov %5, %%r10;"
+ "mov %6, %%r8;"
+ "mov %7, %%r9;"
+ "syscall" : "=a"(retval) : "a"(nr), "D"((u64)arg1),
+ "S"((u64)arg2), "d"((u64)arg3), "r"((u64)arg4), "r"((u64)arg5),
+ "r"((u64)arg6) : "rcx", "r11", "r10", "r8", "r9",
+ "memory", "cc");
+ return retval;
+}
+
+bool internal_iserror(uptr retval, int *rverrno) {
+ if (retval >= (uptr)-4095) {
+ if (rverrno)
+ *rverrno = -retval;
+ return true;
+ }
+ return false;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc (revision 351984)
@@ -0,0 +1,3781 @@
+//===-- sanitizer_syscalls_netbsd.inc ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common syscalls handlers for tools like AddressSanitizer,
+// ThreadSanitizer, MemorySanitizer, etc.
+//
+// This file should be included into the tool's interceptor file,
+// which has to define it's own macros:
+// COMMON_SYSCALL_PRE_READ_RANGE
+// Called in prehook for regions that will be read by the kernel and
+// must be initialized.
+// COMMON_SYSCALL_PRE_WRITE_RANGE
+// Called in prehook for regions that will be written to by the kernel
+// and must be addressable. The actual write range may be smaller than
+// reported in the prehook. See POST_WRITE_RANGE.
+// COMMON_SYSCALL_POST_READ_RANGE
+// Called in posthook for regions that were read by the kernel. Does
+// not make much sense.
+// COMMON_SYSCALL_POST_WRITE_RANGE
+// Called in posthook for regions that were written to by the kernel
+// and are now initialized.
+// COMMON_SYSCALL_ACQUIRE(addr)
+// Acquire memory visibility from addr.
+// COMMON_SYSCALL_RELEASE(addr)
+// Release memory visibility to addr.
+// COMMON_SYSCALL_FD_CLOSE(fd)
+// Called before closing file descriptor fd.
+// COMMON_SYSCALL_FD_ACQUIRE(fd)
+// Acquire memory visibility from fd.
+// COMMON_SYSCALL_FD_RELEASE(fd)
+// Release memory visibility to fd.
+// COMMON_SYSCALL_PRE_FORK()
+// Called before fork syscall.
+// COMMON_SYSCALL_POST_FORK(long long res)
+// Called after fork syscall.
+//
+// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
+//
+// Generated with: generate_netbsd_syscalls.awk
+// Generated date: 2018-10-30
+// Generated from: syscalls.master,v 1.293 2018/07/31 13:00:13 rjs Exp
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_NETBSD
+
+#include "sanitizer_libc.h"
+
+#define PRE_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_pre_impl_##name
+#define PRE_READ(p, s) COMMON_SYSCALL_PRE_READ_RANGE(p, s)
+#define PRE_WRITE(p, s) COMMON_SYSCALL_PRE_WRITE_RANGE(p, s)
+
+#define POST_SYSCALL(name) \
+ SANITIZER_INTERFACE_ATTRIBUTE void __sanitizer_syscall_post_impl_##name
+#define POST_READ(p, s) COMMON_SYSCALL_POST_READ_RANGE(p, s)
+#define POST_WRITE(p, s) COMMON_SYSCALL_POST_WRITE_RANGE(p, s)
+
+#ifndef COMMON_SYSCALL_ACQUIRE
+#define COMMON_SYSCALL_ACQUIRE(addr) ((void)(addr))
+#endif
+
+#ifndef COMMON_SYSCALL_RELEASE
+#define COMMON_SYSCALL_RELEASE(addr) ((void)(addr))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_CLOSE
+#define COMMON_SYSCALL_FD_CLOSE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_ACQUIRE
+#define COMMON_SYSCALL_FD_ACQUIRE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_FD_RELEASE
+#define COMMON_SYSCALL_FD_RELEASE(fd) ((void)(fd))
+#endif
+
+#ifndef COMMON_SYSCALL_PRE_FORK
+#define COMMON_SYSCALL_PRE_FORK() \
+ {}
+#endif
+
+#ifndef COMMON_SYSCALL_POST_FORK
+#define COMMON_SYSCALL_POST_FORK(res) \
+ {}
+#endif
+
+// FIXME: do some kind of PRE_READ for all syscall arguments (int(s) and such).
+
+extern "C" {
+#define SYS_MAXSYSARGS 8
+PRE_SYSCALL(syscall)(long long code_, long long args_[SYS_MAXSYSARGS]) {
+ /* Nothing to do */
+}
+POST_SYSCALL(syscall)
+(long long res, long long code_, long long args_[SYS_MAXSYSARGS]) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(exit)(long long rval_) { /* Nothing to do */ }
+POST_SYSCALL(exit)(long long res, long long rval_) { /* Nothing to do */ }
+PRE_SYSCALL(fork)(void) { COMMON_SYSCALL_PRE_FORK(); }
+POST_SYSCALL(fork)(long long res) { COMMON_SYSCALL_POST_FORK(res); }
+PRE_SYSCALL(read)(long long fd_, void *buf_, long long nbyte_) {
+ if (buf_) {
+ PRE_WRITE(buf_, nbyte_);
+ }
+}
+POST_SYSCALL(read)(long long res, long long fd_, void *buf_, long long nbyte_) {
+ if (res > 0) {
+ POST_WRITE(buf_, res);
+ }
+}
+PRE_SYSCALL(write)(long long fd_, void *buf_, long long nbyte_) {
+ if (buf_) {
+ PRE_READ(buf_, nbyte_);
+ }
+}
+POST_SYSCALL(write)
+(long long res, long long fd_, void *buf_, long long nbyte_) {
+ if (res > 0) {
+ POST_READ(buf_, res);
+ }
+}
+PRE_SYSCALL(open)(void *path_, long long flags_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(open)
+(long long res, void *path_, long long flags_, long long mode_) {
+ if (res > 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(close)(long long fd_) { COMMON_SYSCALL_FD_CLOSE((int)fd_); }
+POST_SYSCALL(close)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50_wait4)
+(long long pid_, void *status_, long long options_, void *rusage_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_wait4)
+(long long res, long long pid_, void *status_, long long options_,
+ void *rusage_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ocreat)(void *path_, long long mode_) { /* TODO */ }
+POST_SYSCALL(compat_43_ocreat)(long long res, void *path_, long long mode_) {
+ /* TODO */
+}
+PRE_SYSCALL(link)(void *path_, void *link_) {
+ const char *path = (const char *)path_;
+ const char *link = (const char *)link_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (link) {
+ PRE_READ(path, __sanitizer::internal_strlen(link) + 1);
+ }
+}
+POST_SYSCALL(link)(long long res, void *path_, void *link_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ const char *link = (const char *)link_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (link) {
+ POST_READ(path, __sanitizer::internal_strlen(link) + 1);
+ }
+ }
+}
+PRE_SYSCALL(unlink)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(unlink)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+/* syscall 11 has been skipped */
+PRE_SYSCALL(chdir)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chdir)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fchdir)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(fchdir)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50_mknod)(void *path_, long long mode_, long long dev_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_mknod)
+(long long res, void *path_, long long mode_, long long dev_) {
+ /* TODO */
+}
+PRE_SYSCALL(chmod)(void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chmod)(long long res, void *path_, long long mode_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(chown)(void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chown)
+(long long res, void *path_, long long uid_, long long gid_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(break)(void *nsize_) { /* Nothing to do */ }
+POST_SYSCALL(break)(long long res, void *nsize_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_20_getfsstat)
+(void *buf_, long long bufsize_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_20_getfsstat)
+(long long res, void *buf_, long long bufsize_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_olseek)
+(long long fd_, long long offset_, long long whence_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_olseek)
+(long long res, long long fd_, long long offset_, long long whence_) {
+ /* TODO */
+}
+PRE_SYSCALL(getpid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getpid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_40_mount)
+(void *type_, void *path_, long long flags_, void *data_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_40_mount)
+(long long res, void *type_, void *path_, long long flags_, void *data_) {
+ /* TODO */
+}
+PRE_SYSCALL(unmount)(void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(unmount)(long long res, void *path_, long long flags_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(setuid)(long long uid_) { /* Nothing to do */ }
+POST_SYSCALL(setuid)(long long res, long long uid_) { /* Nothing to do */ }
+PRE_SYSCALL(getuid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getuid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(geteuid)(void) { /* Nothing to do */ }
+POST_SYSCALL(geteuid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(ptrace)
+(long long req_, long long pid_, void *addr_, long long data_) {
+ if (req_ == ptrace_pt_io) {
+ struct __sanitizer_ptrace_io_desc *addr =
+ (struct __sanitizer_ptrace_io_desc *)addr_;
+ PRE_READ(addr, struct_ptrace_ptrace_io_desc_struct_sz);
+ if (addr->piod_op == ptrace_piod_write_d ||
+ addr->piod_op == ptrace_piod_write_i) {
+ PRE_READ(addr->piod_addr, addr->piod_len);
+ }
+ if (addr->piod_op == ptrace_piod_read_d ||
+ addr->piod_op == ptrace_piod_read_i ||
+ addr->piod_op == ptrace_piod_read_auxv) {
+ PRE_WRITE(addr->piod_addr, addr->piod_len);
+ }
+ } else if (req_ == ptrace_pt_lwpinfo) {
+ struct __sanitizer_ptrace_lwpinfo *addr =
+ (struct __sanitizer_ptrace_lwpinfo *)addr_;
+ PRE_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ PRE_WRITE(addr, struct_ptrace_ptrace_lwpinfo_struct_sz);
+ } else if (req_ == ptrace_pt_set_event_mask) {
+ PRE_READ(addr_, struct_ptrace_ptrace_event_struct_sz);
+ } else if (req_ == ptrace_pt_get_event_mask) {
+ PRE_WRITE(addr_, struct_ptrace_ptrace_event_struct_sz);
+ } else if (req_ == ptrace_pt_set_siginfo) {
+ PRE_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_get_siginfo) {
+ PRE_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_setregs) {
+ PRE_READ(addr_, struct_ptrace_reg_struct_sz);
+ } else if (req_ == ptrace_pt_getregs) {
+ PRE_WRITE(addr_, struct_ptrace_reg_struct_sz);
+ } else if (req_ == ptrace_pt_setfpregs) {
+ PRE_READ(addr_, struct_ptrace_fpreg_struct_sz);
+ } else if (req_ == ptrace_pt_getfpregs) {
+ PRE_WRITE(addr_, struct_ptrace_fpreg_struct_sz);
+ } else if (req_ == ptrace_pt_setdbregs) {
+ PRE_READ(addr_, struct_ptrace_dbreg_struct_sz);
+ } else if (req_ == ptrace_pt_getdbregs) {
+ PRE_WRITE(addr_, struct_ptrace_dbreg_struct_sz);
+ }
+}
+POST_SYSCALL(ptrace)
+(long long res, long long req_, long long pid_, void *addr_, long long data_) {
+ if (res == 0) {
+ if (req_ == ptrace_pt_io) {
+ struct __sanitizer_ptrace_io_desc *addr =
+ (struct __sanitizer_ptrace_io_desc *)addr_;
+ POST_READ(addr, struct_ptrace_ptrace_io_desc_struct_sz);
+ if (addr->piod_op == ptrace_piod_write_d ||
+ addr->piod_op == ptrace_piod_write_i) {
+ POST_READ(addr->piod_addr, addr->piod_len);
+ }
+ if (addr->piod_op == ptrace_piod_read_d ||
+ addr->piod_op == ptrace_piod_read_i ||
+ addr->piod_op == ptrace_piod_read_auxv) {
+ POST_WRITE(addr->piod_addr, addr->piod_len);
+ }
+ } else if (req_ == ptrace_pt_lwpinfo) {
+ struct __sanitizer_ptrace_lwpinfo *addr =
+ (struct __sanitizer_ptrace_lwpinfo *)addr_;
+ POST_READ(&addr->pl_lwpid, sizeof(__sanitizer_lwpid_t));
+ POST_WRITE(addr, struct_ptrace_ptrace_lwpinfo_struct_sz);
+ } else if (req_ == ptrace_pt_set_event_mask) {
+ POST_READ(addr_, struct_ptrace_ptrace_event_struct_sz);
+ } else if (req_ == ptrace_pt_get_event_mask) {
+ POST_WRITE(addr_, struct_ptrace_ptrace_event_struct_sz);
+ } else if (req_ == ptrace_pt_set_siginfo) {
+ POST_READ(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_get_siginfo) {
+ POST_WRITE(addr_, struct_ptrace_ptrace_siginfo_struct_sz);
+ } else if (req_ == ptrace_pt_setregs) {
+ POST_READ(addr_, struct_ptrace_reg_struct_sz);
+ } else if (req_ == ptrace_pt_getregs) {
+ POST_WRITE(addr_, struct_ptrace_reg_struct_sz);
+ } else if (req_ == ptrace_pt_setfpregs) {
+ POST_READ(addr_, struct_ptrace_fpreg_struct_sz);
+ } else if (req_ == ptrace_pt_getfpregs) {
+ POST_WRITE(addr_, struct_ptrace_fpreg_struct_sz);
+ } else if (req_ == ptrace_pt_setdbregs) {
+ POST_READ(addr_, struct_ptrace_dbreg_struct_sz);
+ } else if (req_ == ptrace_pt_getdbregs) {
+ POST_WRITE(addr_, struct_ptrace_dbreg_struct_sz);
+ }
+ }
+}
+PRE_SYSCALL(recvmsg)(long long s_, void *msg_, long long flags_) {
+ PRE_WRITE(msg_, sizeof(__sanitizer_msghdr));
+}
+POST_SYSCALL(recvmsg)
+(long long res, long long s_, void *msg_, long long flags_) {
+ if (res > 0) {
+ POST_WRITE(msg_, sizeof(__sanitizer_msghdr));
+ }
+}
+PRE_SYSCALL(sendmsg)(long long s_, void *msg_, long long flags_) {
+ PRE_READ(msg_, sizeof(__sanitizer_msghdr));
+}
+POST_SYSCALL(sendmsg)
+(long long res, long long s_, void *msg_, long long flags_) {
+ if (res > 0) {
+ POST_READ(msg_, sizeof(__sanitizer_msghdr));
+ }
+}
+PRE_SYSCALL(recvfrom)
+(long long s_, void *buf_, long long len_, long long flags_, void *from_,
+ void *fromlenaddr_) {
+ PRE_WRITE(buf_, len_);
+ PRE_WRITE(from_, struct_sockaddr_sz);
+ PRE_WRITE(fromlenaddr_, sizeof(__sanitizer_socklen_t));
+}
+POST_SYSCALL(recvfrom)
+(long long res, long long s_, void *buf_, long long len_, long long flags_,
+ void *from_, void *fromlenaddr_) {
+ if (res >= 0) {
+ POST_WRITE(buf_, res);
+ POST_WRITE(from_, struct_sockaddr_sz);
+ POST_WRITE(fromlenaddr_, sizeof(__sanitizer_socklen_t));
+ }
+}
+PRE_SYSCALL(accept)(long long s_, void *name_, void *anamelen_) {
+ PRE_WRITE(name_, struct_sockaddr_sz);
+ PRE_WRITE(anamelen_, sizeof(__sanitizer_socklen_t));
+}
+POST_SYSCALL(accept)
+(long long res, long long s_, void *name_, void *anamelen_) {
+ if (res == 0) {
+ POST_WRITE(name_, struct_sockaddr_sz);
+ POST_WRITE(anamelen_, sizeof(__sanitizer_socklen_t));
+ }
+}
+PRE_SYSCALL(getpeername)(long long fdes_, void *asa_, void *alen_) {
+ PRE_WRITE(asa_, struct_sockaddr_sz);
+ PRE_WRITE(alen_, sizeof(__sanitizer_socklen_t));
+}
+POST_SYSCALL(getpeername)
+(long long res, long long fdes_, void *asa_, void *alen_) {
+ if (res == 0) {
+ POST_WRITE(asa_, struct_sockaddr_sz);
+ POST_WRITE(alen_, sizeof(__sanitizer_socklen_t));
+ }
+}
+PRE_SYSCALL(getsockname)(long long fdes_, void *asa_, void *alen_) {
+ PRE_WRITE(asa_, struct_sockaddr_sz);
+ PRE_WRITE(alen_, sizeof(__sanitizer_socklen_t));
+}
+POST_SYSCALL(getsockname)
+(long long res, long long fdes_, void *asa_, void *alen_) {
+ if (res == 0) {
+ POST_WRITE(asa_, struct_sockaddr_sz);
+ POST_WRITE(alen_, sizeof(__sanitizer_socklen_t));
+ }
+}
+PRE_SYSCALL(access)(void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(access)(long long res, void *path_, long long flags_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(chflags)(void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chflags)(long long res, void *path_, long long flags_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fchflags)(long long fd_, long long flags_) { /* Nothing to do */ }
+POST_SYSCALL(fchflags)(long long res, long long fd_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(sync)(void) { /* Nothing to do */ }
+POST_SYSCALL(sync)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(kill)(long long pid_, long long signum_) { /* Nothing to do */ }
+POST_SYSCALL(kill)(long long res, long long pid_, long long signum_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_43_stat43)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_43_stat43)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(getppid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getppid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_43_lstat43)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_43_lstat43)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(dup)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(dup)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(pipe)(void) {
+ /* pipe returns two descriptors through two returned values */
+}
+POST_SYSCALL(pipe)(long long res) {
+ /* pipe returns two descriptors through two returned values */
+}
+PRE_SYSCALL(getegid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getegid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(profil)
+(void *samples_, long long size_, long long offset_, long long scale_) {
+ if (samples_) {
+ PRE_WRITE(samples_, size_);
+ }
+}
+POST_SYSCALL(profil)
+(long long res, void *samples_, long long size_, long long offset_,
+ long long scale_) {
+ if (res == 0) {
+ if (samples_) {
+ POST_WRITE(samples_, size_);
+ }
+ }
+}
+PRE_SYSCALL(ktrace)
+(void *fname_, long long ops_, long long facs_, long long pid_) {
+ const char *fname = (const char *)fname_;
+ if (fname) {
+ PRE_READ(fname, __sanitizer::internal_strlen(fname) + 1);
+ }
+}
+POST_SYSCALL(ktrace)
+(long long res, void *fname_, long long ops_, long long facs_, long long pid_) {
+ const char *fname = (const char *)fname_;
+ if (res == 0) {
+ if (fname) {
+ POST_READ(fname, __sanitizer::internal_strlen(fname) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_13_sigaction13)(long long signum_, void *nsa_, void *osa_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_13_sigaction13)
+(long long res, long long signum_, void *nsa_, void *osa_) {
+ /* TODO */
+}
+PRE_SYSCALL(getgid)(void) { /* Nothing to do */ }
+POST_SYSCALL(getgid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_13_sigprocmask13)(long long how_, long long mask_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_13_sigprocmask13)
+(long long res, long long how_, long long mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(__getlogin)(void *namebuf_, long long namelen_) {
+ if (namebuf_) {
+ PRE_WRITE(namebuf_, namelen_);
+ }
+}
+POST_SYSCALL(__getlogin)(long long res, void *namebuf_, long long namelen_) {
+ if (res == 0) {
+ if (namebuf_) {
+ POST_WRITE(namebuf_, namelen_);
+ }
+ }
+}
+PRE_SYSCALL(__setlogin)(void *namebuf_) {
+ const char *namebuf = (const char *)namebuf_;
+ if (namebuf) {
+ PRE_READ(namebuf, __sanitizer::internal_strlen(namebuf) + 1);
+ }
+}
+POST_SYSCALL(__setlogin)(long long res, void *namebuf_) {
+ if (res == 0) {
+ const char *namebuf = (const char *)namebuf_;
+ if (namebuf) {
+ POST_READ(namebuf, __sanitizer::internal_strlen(namebuf) + 1);
+ }
+ }
+}
+PRE_SYSCALL(acct)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(acct)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_13_sigpending13)(void) { /* TODO */ }
+POST_SYSCALL(compat_13_sigpending13)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_13_sigaltstack13)(void *nss_, void *oss_) { /* TODO */ }
+POST_SYSCALL(compat_13_sigaltstack13)(long long res, void *nss_, void *oss_) {
+ /* TODO */
+}
+PRE_SYSCALL(ioctl)(long long fd_, long long com_, void *data_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(ioctl)(long long res, long long fd_, long long com_, void *data_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_12_oreboot)(long long opt_) { /* TODO */ }
+POST_SYSCALL(compat_12_oreboot)(long long res, long long opt_) { /* TODO */ }
+PRE_SYSCALL(revoke)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(revoke)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(symlink)(void *path_, void *link_) {
+ const char *path = (const char *)path_;
+ const char *link = (const char *)link_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (link) {
+ PRE_READ(link, __sanitizer::internal_strlen(link) + 1);
+ }
+}
+POST_SYSCALL(symlink)(long long res, void *path_, void *link_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ const char *link = (const char *)link_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (link) {
+ POST_READ(link, __sanitizer::internal_strlen(link) + 1);
+ }
+ }
+}
+PRE_SYSCALL(readlink)(void *path_, void *buf_, long long count_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (buf_) {
+ PRE_WRITE(buf_, count_);
+ }
+}
+POST_SYSCALL(readlink)
+(long long res, void *path_, void *buf_, long long count_) {
+ if (res > 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (buf_) {
+ PRE_WRITE(buf_, res);
+ }
+ }
+}
+PRE_SYSCALL(execve)(void *path_, void *argp_, void *envp_) {
+ const char *path = (const char *)path_;
+ char **argp = (char **)argp_;
+ char **envp = (char **)envp_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (argp && argp[0]) {
+ char *a = argp[0];
+ while (a++) {
+ PRE_READ(a, __sanitizer::internal_strlen(a) + 1);
+ }
+ }
+ if (envp && envp[0]) {
+ char *e = envp[0];
+ while (e++) {
+ PRE_READ(e, __sanitizer::internal_strlen(e) + 1);
+ }
+ }
+}
+POST_SYSCALL(execve)(long long res, void *path_, void *argp_, void *envp_) {
+ /* If we are here, something went wrong */
+ const char *path = (const char *)path_;
+ char **argp = (char **)argp_;
+ char **envp = (char **)envp_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (argp && argp[0]) {
+ char *a = argp[0];
+ while (a++) {
+ POST_READ(a, __sanitizer::internal_strlen(a) + 1);
+ }
+ }
+ if (envp && envp[0]) {
+ char *e = envp[0];
+ while (e++) {
+ POST_READ(e, __sanitizer::internal_strlen(e) + 1);
+ }
+ }
+}
+PRE_SYSCALL(umask)(long long newmask_) { /* Nothing to do */ }
+POST_SYSCALL(umask)(long long res, long long newmask_) { /* Nothing to do */ }
+PRE_SYSCALL(chroot)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(chroot)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_43_fstat43)(long long fd_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_43_fstat43)(long long res, long long fd_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetkerninfo)
+(long long op_, void *where_, void *size_, long long arg_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogetkerninfo)
+(long long res, long long op_, void *where_, void *size_, long long arg_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetpagesize)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_ogetpagesize)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_12_msync)(void *addr_, long long len_) { /* TODO */ }
+POST_SYSCALL(compat_12_msync)(long long res, void *addr_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(vfork)(void) { /* Nothing to do */ }
+POST_SYSCALL(vfork)(long long res) { /* Nothing to do */ }
+/* syscall 67 has been skipped */
+/* syscall 68 has been skipped */
+/* syscall 69 has been skipped */
+/* syscall 70 has been skipped */
+PRE_SYSCALL(compat_43_ommap)
+(void *addr_, long long len_, long long prot_, long long flags_, long long fd_,
+ long long pos_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ommap)
+(long long res, void *addr_, long long len_, long long prot_, long long flags_,
+ long long fd_, long long pos_) {
+ /* TODO */
+}
+PRE_SYSCALL(vadvise)(long long anom_) { /* Nothing to do */ }
+POST_SYSCALL(vadvise)(long long res, long long anom_) { /* Nothing to do */ }
+PRE_SYSCALL(munmap)(void *addr_, long long len_) { /* Nothing to do */ }
+POST_SYSCALL(munmap)(long long res, void *addr_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(mprotect)(void *addr_, long long len_, long long prot_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mprotect)
+(long long res, void *addr_, long long len_, long long prot_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(madvise)(void *addr_, long long len_, long long behav_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(madvise)
+(long long res, void *addr_, long long len_, long long behav_) {
+ /* Nothing to do */
+}
+/* syscall 76 has been skipped */
+/* syscall 77 has been skipped */
+PRE_SYSCALL(mincore)(void *addr_, long long len_, void *vec_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mincore)(long long res, void *addr_, long long len_, void *vec_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(getgroups)(long long gidsetsize_, void *gidset_) {
+ unsigned int *gidset = (unsigned int *)gidset_;
+ if (gidset) {
+ PRE_WRITE(gidset, sizeof(*gidset) * gidsetsize_);
+ }
+}
+POST_SYSCALL(getgroups)(long long res, long long gidsetsize_, void *gidset_) {
+ if (res == 0) {
+ unsigned int *gidset = (unsigned int *)gidset_;
+ if (gidset) {
+ POST_WRITE(gidset, sizeof(*gidset) * gidsetsize_);
+ }
+ }
+}
+PRE_SYSCALL(setgroups)(long long gidsetsize_, void *gidset_) {
+ unsigned int *gidset = (unsigned int *)gidset_;
+ if (gidset) {
+ PRE_READ(gidset, sizeof(*gidset) * gidsetsize_);
+ }
+}
+POST_SYSCALL(setgroups)(long long res, long long gidsetsize_, void *gidset_) {
+ if (res == 0) {
+ unsigned int *gidset = (unsigned int *)gidset_;
+ if (gidset) {
+ POST_READ(gidset, sizeof(*gidset) * gidsetsize_);
+ }
+ }
+}
+PRE_SYSCALL(getpgrp)(void) { /* Nothing to do */ }
+POST_SYSCALL(getpgrp)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(setpgid)(long long pid_, long long pgid_) { /* Nothing to do */ }
+POST_SYSCALL(setpgid)(long long res, long long pid_, long long pgid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_setitimer)(long long which_, void *itv_, void *oitv_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_setitimer)
+(long long res, long long which_, void *itv_, void *oitv_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_owait)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_owait)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_12_oswapon)(void *name_) { /* TODO */ }
+POST_SYSCALL(compat_12_oswapon)(long long res, void *name_) { /* TODO */ }
+PRE_SYSCALL(compat_50_getitimer)(long long which_, void *itv_) { /* TODO */ }
+POST_SYSCALL(compat_50_getitimer)(long long res, long long which_, void *itv_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogethostname)(void *hostname_, long long len_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogethostname)
+(long long res, void *hostname_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osethostname)(void *hostname_, long long len_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_osethostname)
+(long long res, void *hostname_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetdtablesize)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_ogetdtablesize)(long long res) { /* TODO */ }
+PRE_SYSCALL(dup2)(long long from_, long long to_) { /* Nothing to do */ }
+POST_SYSCALL(dup2)(long long res, long long from_, long long to_) {
+ /* Nothing to do */
+}
+/* syscall 91 has been skipped */
+PRE_SYSCALL(fcntl)(long long fd_, long long cmd_, void *arg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fcntl)(long long res, long long fd_, long long cmd_, void *arg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_select)
+(long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_select)
+(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {
+ /* TODO */
+}
+/* syscall 94 has been skipped */
+PRE_SYSCALL(fsync)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(fsync)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(setpriority)(long long which_, long long who_, long long prio_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(setpriority)
+(long long res, long long which_, long long who_, long long prio_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_30_socket)
+(long long domain_, long long type_, long long protocol_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_30_socket)
+(long long res, long long domain_, long long type_, long long protocol_) {
+ /* TODO */
+}
+PRE_SYSCALL(connect)(long long s_, void *name_, long long namelen_) {
+ PRE_READ(name_, namelen_);
+}
+POST_SYSCALL(connect)
+(long long res, long long s_, void *name_, long long namelen_) {
+ if (res == 0) {
+ POST_READ(name_, namelen_);
+ }
+}
+PRE_SYSCALL(compat_43_oaccept)(long long s_, void *name_, void *anamelen_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_oaccept)
+(long long res, long long s_, void *name_, void *anamelen_) {
+ /* TODO */
+}
+PRE_SYSCALL(getpriority)(long long which_, long long who_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(getpriority)(long long res, long long which_, long long who_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_43_osend)
+(long long s_, void *buf_, long long len_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_osend)
+(long long res, long long s_, void *buf_, long long len_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_orecv)
+(long long s_, void *buf_, long long len_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_orecv)
+(long long res, long long s_, void *buf_, long long len_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_13_sigreturn13)(void *sigcntxp_) { /* TODO */ }
+POST_SYSCALL(compat_13_sigreturn13)(long long res, void *sigcntxp_) {
+ /* TODO */
+}
+PRE_SYSCALL(bind)(long long s_, void *name_, long long namelen_) {
+ PRE_READ(name_, namelen_);
+}
+POST_SYSCALL(bind)
+(long long res, long long s_, void *name_, long long namelen_) {
+ if (res == 0) {
+ PRE_READ(name_, namelen_);
+ }
+}
+PRE_SYSCALL(setsockopt)
+(long long s_, long long level_, long long name_, void *val_,
+ long long valsize_) {
+ if (val_) {
+ PRE_READ(val_, valsize_);
+ }
+}
+POST_SYSCALL(setsockopt)
+(long long res, long long s_, long long level_, long long name_, void *val_,
+ long long valsize_) {
+ if (res == 0) {
+ if (val_) {
+ POST_READ(val_, valsize_);
+ }
+ }
+}
+PRE_SYSCALL(listen)(long long s_, long long backlog_) { /* Nothing to do */ }
+POST_SYSCALL(listen)(long long res, long long s_, long long backlog_) {
+ /* Nothing to do */
+}
+/* syscall 107 has been skipped */
+PRE_SYSCALL(compat_43_osigvec)(long long signum_, void *nsv_, void *osv_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_osigvec)
+(long long res, long long signum_, void *nsv_, void *osv_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osigblock)(long long mask_) { /* TODO */ }
+POST_SYSCALL(compat_43_osigblock)(long long res, long long mask_) { /* TODO */ }
+PRE_SYSCALL(compat_43_osigsetmask)(long long mask_) { /* TODO */ }
+POST_SYSCALL(compat_43_osigsetmask)(long long res, long long mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_13_sigsuspend13)(long long mask_) { /* TODO */ }
+POST_SYSCALL(compat_13_sigsuspend13)(long long res, long long mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osigstack)(void *nss_, void *oss_) { /* TODO */ }
+POST_SYSCALL(compat_43_osigstack)(long long res, void *nss_, void *oss_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_orecvmsg)(long long s_, void *msg_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_orecvmsg)
+(long long res, long long s_, void *msg_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osendmsg)(long long s_, void *msg_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_osendmsg)
+(long long res, long long s_, void *msg_, long long flags_) {
+ /* TODO */
+}
+/* syscall 115 has been skipped */
+PRE_SYSCALL(compat_50_gettimeofday)(void *tp_, void *tzp_) { /* TODO */ }
+POST_SYSCALL(compat_50_gettimeofday)(long long res, void *tp_, void *tzp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_getrusage)(long long who_, void *rusage_) { /* TODO */ }
+POST_SYSCALL(compat_50_getrusage)
+(long long res, long long who_, void *rusage_) {
+ /* TODO */
+}
+PRE_SYSCALL(getsockopt)
+(long long s_, long long level_, long long name_, void *val_, void *avalsize_) {
+ /* TODO */
+}
+POST_SYSCALL(getsockopt)
+(long long res, long long s_, long long level_, long long name_, void *val_,
+ void *avalsize_) {
+ /* TODO */
+}
+/* syscall 119 has been skipped */
+PRE_SYSCALL(readv)(long long fd_, void *iovp_, long long iovcnt_) {
+ struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;
+ int i;
+ if (iovp) {
+ PRE_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);
+ for (i = 0; i < iovcnt_; i++) {
+ PRE_WRITE(iovp[i].iov_base, iovp[i].iov_len);
+ }
+ }
+}
+POST_SYSCALL(readv)
+(long long res, long long fd_, void *iovp_, long long iovcnt_) {
+ struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;
+ int i;
+ uptr m, n = res;
+ if (res > 0) {
+ if (iovp) {
+ POST_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);
+ for (i = 0; i < iovcnt_ && n > 0; i++) {
+ m = n > iovp[i].iov_len ? iovp[i].iov_len : n;
+ POST_WRITE(iovp[i].iov_base, m);
+ n -= m;
+ }
+ }
+ }
+}
+PRE_SYSCALL(writev)(long long fd_, void *iovp_, long long iovcnt_) {
+ struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;
+ int i;
+ if (iovp) {
+ PRE_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);
+ for (i = 0; i < iovcnt_; i++) {
+ PRE_READ(iovp[i].iov_base, iovp[i].iov_len);
+ }
+ }
+}
+POST_SYSCALL(writev)
+(long long res, long long fd_, void *iovp_, long long iovcnt_) {
+ struct __sanitizer_iovec *iovp = (struct __sanitizer_iovec *)iovp_;
+ int i;
+ uptr m, n = res;
+ if (res > 0) {
+ if (iovp) {
+ POST_READ(iovp, sizeof(struct __sanitizer_iovec) * iovcnt_);
+ for (i = 0; i < iovcnt_ && n > 0; i++) {
+ m = n > iovp[i].iov_len ? iovp[i].iov_len : n;
+ POST_READ(iovp[i].iov_base, m);
+ n -= m;
+ }
+ }
+ }
+}
+PRE_SYSCALL(compat_50_settimeofday)(void *tv_, void *tzp_) { /* TODO */ }
+POST_SYSCALL(compat_50_settimeofday)(long long res, void *tv_, void *tzp_) {
+ /* TODO */
+}
+PRE_SYSCALL(fchown)(long long fd_, long long uid_, long long gid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fchown)
+(long long res, long long fd_, long long uid_, long long gid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(fchmod)(long long fd_, long long mode_) { /* Nothing to do */ }
+POST_SYSCALL(fchmod)(long long res, long long fd_, long long mode_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_43_orecvfrom)
+(long long s_, void *buf_, long long len_, long long flags_, void *from_,
+ void *fromlenaddr_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_orecvfrom)
+(long long res, long long s_, void *buf_, long long len_, long long flags_,
+ void *from_, void *fromlenaddr_) {
+ /* TODO */
+}
+PRE_SYSCALL(setreuid)(long long ruid_, long long euid_) { /* Nothing to do */ }
+POST_SYSCALL(setreuid)(long long res, long long ruid_, long long euid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(setregid)(long long rgid_, long long egid_) { /* Nothing to do */ }
+POST_SYSCALL(setregid)(long long res, long long rgid_, long long egid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(rename)(void *from_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from) {
+ PRE_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ PRE_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+}
+POST_SYSCALL(rename)(long long res, void *from_, void *to_) {
+ if (res == 0) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from) {
+ POST_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ POST_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_43_otruncate)(void *path_, long long length_) { /* TODO */ }
+POST_SYSCALL(compat_43_otruncate)
+(long long res, void *path_, long long length_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_oftruncate)(long long fd_, long long length_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_oftruncate)
+(long long res, long long fd_, long long length_) {
+ /* TODO */
+}
+PRE_SYSCALL(flock)(long long fd_, long long how_) { /* Nothing to do */ }
+POST_SYSCALL(flock)(long long res, long long fd_, long long how_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(mkfifo)(void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mkfifo)(long long res, void *path_, long long mode_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(sendto)
+(long long s_, void *buf_, long long len_, long long flags_, void *to_,
+ long long tolen_) {
+ PRE_READ(buf_, len_);
+ PRE_READ(to_, tolen_);
+}
+POST_SYSCALL(sendto)
+(long long res, long long s_, void *buf_, long long len_, long long flags_,
+ void *to_, long long tolen_) {
+ if (res >= 0) {
+ POST_READ(buf_, len_);
+ POST_READ(to_, tolen_);
+ }
+}
+PRE_SYSCALL(shutdown)(long long s_, long long how_) { /* Nothing to do */ }
+POST_SYSCALL(shutdown)(long long res, long long s_, long long how_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(socketpair)
+(long long domain_, long long type_, long long protocol_, void *rsv_) {
+ PRE_WRITE(rsv_, 2 * sizeof(int));
+}
+POST_SYSCALL(socketpair)
+(long long res, long long domain_, long long type_, long long protocol_,
+ void *rsv_) {
+ if (res == 0) {
+ POST_WRITE(rsv_, 2 * sizeof(int));
+ }
+}
+PRE_SYSCALL(mkdir)(void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mkdir)(long long res, void *path_, long long mode_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(rmdir)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(rmdir)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_50_utimes)(void *path_, void *tptr_) { /* TODO */ }
+POST_SYSCALL(compat_50_utimes)(long long res, void *path_, void *tptr_) {
+ /* TODO */
+}
+/* syscall 139 has been skipped */
+PRE_SYSCALL(compat_50_adjtime)(void *delta_, void *olddelta_) { /* TODO */ }
+POST_SYSCALL(compat_50_adjtime)(long long res, void *delta_, void *olddelta_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetpeername)(long long fdes_, void *asa_, void *alen_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogetpeername)
+(long long res, long long fdes_, void *asa_, void *alen_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogethostid)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_ogethostid)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_43_osethostid)(long long hostid_) { /* TODO */ }
+POST_SYSCALL(compat_43_osethostid)(long long res, long long hostid_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_ogetrlimit)(long long which_, void *rlp_) { /* TODO */ }
+POST_SYSCALL(compat_43_ogetrlimit)
+(long long res, long long which_, void *rlp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_osetrlimit)(long long which_, void *rlp_) { /* TODO */ }
+POST_SYSCALL(compat_43_osetrlimit)
+(long long res, long long which_, void *rlp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_okillpg)(long long pgid_, long long signum_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_okillpg)
+(long long res, long long pgid_, long long signum_) {
+ /* TODO */
+}
+PRE_SYSCALL(setsid)(void) { /* Nothing to do */ }
+POST_SYSCALL(setsid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50_quotactl)
+(void *path_, long long cmd_, long long uid_, void *arg_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_quotactl)
+(long long res, void *path_, long long cmd_, long long uid_, void *arg_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_43_oquota)(void) { /* TODO */ }
+POST_SYSCALL(compat_43_oquota)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_43_ogetsockname)(long long fdec_, void *asa_, void *alen_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogetsockname)
+(long long res, long long fdec_, void *asa_, void *alen_) {
+ /* TODO */
+}
+/* syscall 151 has been skipped */
+/* syscall 152 has been skipped */
+/* syscall 153 has been skipped */
+/* syscall 154 has been skipped */
+PRE_SYSCALL(nfssvc)(long long flag_, void *argp_) { /* Nothing to do */ }
+POST_SYSCALL(nfssvc)(long long res, long long flag_, void *argp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_43_ogetdirentries)
+(long long fd_, void *buf_, long long count_, void *basep_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_43_ogetdirentries)
+(long long res, long long fd_, void *buf_, long long count_, void *basep_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_20_statfs)(void *path_, void *buf_) { /* TODO */ }
+POST_SYSCALL(compat_20_statfs)(long long res, void *path_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_20_fstatfs)(long long fd_, void *buf_) { /* TODO */ }
+POST_SYSCALL(compat_20_fstatfs)(long long res, long long fd_, void *buf_) {
+ /* TODO */
+}
+/* syscall 159 has been skipped */
+/* syscall 160 has been skipped */
+PRE_SYSCALL(compat_30_getfh)(void *fname_, void *fhp_) { /* TODO */ }
+POST_SYSCALL(compat_30_getfh)(long long res, void *fname_, void *fhp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_09_ogetdomainname)(void *domainname_, long long len_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_09_ogetdomainname)
+(long long res, void *domainname_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_09_osetdomainname)(void *domainname_, long long len_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_09_osetdomainname)
+(long long res, void *domainname_, long long len_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_09_ouname)(void *name_) { /* TODO */ }
+POST_SYSCALL(compat_09_ouname)(long long res, void *name_) { /* TODO */ }
+PRE_SYSCALL(sysarch)(long long op_, void *parms_) { /* TODO */ }
+POST_SYSCALL(sysarch)(long long res, long long op_, void *parms_) { /* TODO */ }
+/* syscall 166 has been skipped */
+/* syscall 167 has been skipped */
+/* syscall 168 has been skipped */
+#if !defined(_LP64)
+PRE_SYSCALL(compat_10_osemsys)
+(long long which_, long long a2_, long long a3_, long long a4_, long long a5_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_10_osemsys)
+(long long res, long long which_, long long a2_, long long a3_, long long a4_,
+ long long a5_) {
+ /* TODO */
+}
+#else
+/* syscall 169 has been skipped */
+#endif
+#if !defined(_LP64)
+PRE_SYSCALL(compat_10_omsgsys)
+(long long which_, long long a2_, long long a3_, long long a4_, long long a5_,
+ long long a6_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_10_omsgsys)
+(long long res, long long which_, long long a2_, long long a3_, long long a4_,
+ long long a5_, long long a6_) {
+ /* TODO */
+}
+#else
+/* syscall 170 has been skipped */
+#endif
+#if !defined(_LP64)
+PRE_SYSCALL(compat_10_oshmsys)
+(long long which_, long long a2_, long long a3_, long long a4_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_10_oshmsys)
+(long long res, long long which_, long long a2_, long long a3_, long long a4_) {
+ /* TODO */
+}
+#else
+/* syscall 171 has been skipped */
+#endif
+/* syscall 172 has been skipped */
+PRE_SYSCALL(pread)
+(long long fd_, void *buf_, long long nbyte_, long long PAD_,
+ long long offset_) {
+ if (buf_) {
+ PRE_WRITE(buf_, nbyte_);
+ }
+}
+POST_SYSCALL(pread)
+(long long res, long long fd_, void *buf_, long long nbyte_, long long PAD_,
+ long long offset_) {
+ if (res > 0) {
+ POST_WRITE(buf_, res);
+ }
+}
+PRE_SYSCALL(pwrite)
+(long long fd_, void *buf_, long long nbyte_, long long PAD_,
+ long long offset_) {
+ if (buf_) {
+ PRE_READ(buf_, nbyte_);
+ }
+}
+POST_SYSCALL(pwrite)
+(long long res, long long fd_, void *buf_, long long nbyte_, long long PAD_,
+ long long offset_) {
+ if (res > 0) {
+ POST_READ(buf_, res);
+ }
+}
+PRE_SYSCALL(compat_30_ntp_gettime)(void *ntvp_) { /* TODO */ }
+POST_SYSCALL(compat_30_ntp_gettime)(long long res, void *ntvp_) { /* TODO */ }
+#if defined(NTP) || !defined(_KERNEL_OPT)
+PRE_SYSCALL(ntp_adjtime)(void *tp_) { /* Nothing to do */ }
+POST_SYSCALL(ntp_adjtime)(long long res, void *tp_) { /* Nothing to do */ }
+#else
+/* syscall 176 has been skipped */
+#endif
+/* syscall 177 has been skipped */
+/* syscall 178 has been skipped */
+/* syscall 179 has been skipped */
+/* syscall 180 has been skipped */
+PRE_SYSCALL(setgid)(long long gid_) { /* Nothing to do */ }
+POST_SYSCALL(setgid)(long long res, long long gid_) { /* Nothing to do */ }
+PRE_SYSCALL(setegid)(long long egid_) { /* Nothing to do */ }
+POST_SYSCALL(setegid)(long long res, long long egid_) { /* Nothing to do */ }
+PRE_SYSCALL(seteuid)(long long euid_) { /* Nothing to do */ }
+POST_SYSCALL(seteuid)(long long res, long long euid_) { /* Nothing to do */ }
+PRE_SYSCALL(lfs_bmapv)(void *fsidp_, void *blkiov_, long long blkcnt_) {
+ /* TODO */
+}
+POST_SYSCALL(lfs_bmapv)
+(long long res, void *fsidp_, void *blkiov_, long long blkcnt_) {
+ /* TODO */
+}
+PRE_SYSCALL(lfs_markv)(void *fsidp_, void *blkiov_, long long blkcnt_) {
+ /* TODO */
+}
+POST_SYSCALL(lfs_markv)
+(long long res, void *fsidp_, void *blkiov_, long long blkcnt_) {
+ /* TODO */
+}
+PRE_SYSCALL(lfs_segclean)(void *fsidp_, long long segment_) { /* TODO */ }
+POST_SYSCALL(lfs_segclean)(long long res, void *fsidp_, long long segment_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_lfs_segwait)(void *fsidp_, void *tv_) { /* TODO */ }
+POST_SYSCALL(compat_50_lfs_segwait)(long long res, void *fsidp_, void *tv_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_12_stat12)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_12_stat12)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_12_fstat12)(long long fd_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_12_fstat12)(long long res, long long fd_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_12_lstat12)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_12_lstat12)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(pathconf)(void *path_, long long name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(pathconf)(long long res, void *path_, long long name_) {
+ if (res != -1) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fpathconf)(long long fd_, long long name_) { /* Nothing to do */ }
+POST_SYSCALL(fpathconf)(long long res, long long fd_, long long name_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(getsockopt2)
+(long long s_, long long level_, long long name_, void *val_, void *avalsize_) {
+ /* TODO */
+}
+POST_SYSCALL(getsockopt2)
+(long long res, long long s_, long long level_, long long name_, void *val_,
+ void *avalsize_) {
+ /* TODO */
+}
+PRE_SYSCALL(getrlimit)(long long which_, void *rlp_) {
+ PRE_WRITE(rlp_, struct_rlimit_sz);
+}
+POST_SYSCALL(getrlimit)(long long res, long long which_, void *rlp_) {
+ if (res == 0) {
+ POST_WRITE(rlp_, struct_rlimit_sz);
+ }
+}
+PRE_SYSCALL(setrlimit)(long long which_, void *rlp_) {
+ PRE_READ(rlp_, struct_rlimit_sz);
+}
+POST_SYSCALL(setrlimit)(long long res, long long which_, void *rlp_) {
+ if (res == 0) {
+ POST_READ(rlp_, struct_rlimit_sz);
+ }
+}
+PRE_SYSCALL(compat_12_getdirentries)
+(long long fd_, void *buf_, long long count_, void *basep_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_12_getdirentries)
+(long long res, long long fd_, void *buf_, long long count_, void *basep_) {
+ /* TODO */
+}
+PRE_SYSCALL(mmap)
+(void *addr_, long long len_, long long prot_, long long flags_, long long fd_,
+ long long PAD_, long long pos_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mmap)
+(long long res, void *addr_, long long len_, long long prot_, long long flags_,
+ long long fd_, long long PAD_, long long pos_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__syscall)(long long code_, long long args_[SYS_MAXSYSARGS]) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__syscall)
+(long long res, long long code_, long long args_[SYS_MAXSYSARGS]) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(lseek)
+(long long fd_, long long PAD_, long long offset_, long long whence_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(lseek)
+(long long res, long long fd_, long long PAD_, long long offset_,
+ long long whence_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(truncate)(void *path_, long long PAD_, long long length_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(truncate)
+(long long res, void *path_, long long PAD_, long long length_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(ftruncate)(long long fd_, long long PAD_, long long length_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(ftruncate)
+(long long res, long long fd_, long long PAD_, long long length_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__sysctl)
+(void *name_, long long namelen_, void *oldv_, void *oldlenp_, void *newv_,
+ long long newlen_) {
+ const int *name = (const int *)name_;
+ if (name) {
+ PRE_READ(name, namelen_ * sizeof(*name));
+ }
+ if (newv_) {
+ PRE_READ(name, newlen_);
+ }
+}
+POST_SYSCALL(__sysctl)
+(long long res, void *name_, long long namelen_, void *oldv_, void *oldlenp_,
+ void *newv_, long long newlen_) {
+ if (res == 0) {
+ const int *name = (const int *)name_;
+ if (name) {
+ POST_READ(name, namelen_ * sizeof(*name));
+ }
+ if (newv_) {
+ POST_READ(name, newlen_);
+ }
+ }
+}
+PRE_SYSCALL(mlock)(void *addr_, long long len_) { /* Nothing to do */ }
+POST_SYSCALL(mlock)(long long res, void *addr_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(munlock)(void *addr_, long long len_) { /* Nothing to do */ }
+POST_SYSCALL(munlock)(long long res, void *addr_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(undelete)(void *path_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(undelete)(long long res, void *path_) {
+ if (res == 0) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(compat_50_futimes)(long long fd_, void *tptr_) { /* TODO */ }
+POST_SYSCALL(compat_50_futimes)(long long res, long long fd_, void *tptr_) {
+ /* TODO */
+}
+PRE_SYSCALL(getpgid)(long long pid_) { /* Nothing to do */ }
+POST_SYSCALL(getpgid)(long long res, long long pid_) { /* Nothing to do */ }
+PRE_SYSCALL(reboot)(long long opt_, void *bootstr_) {
+ const char *bootstr = (const char *)bootstr_;
+ if (bootstr) {
+ PRE_READ(bootstr, __sanitizer::internal_strlen(bootstr) + 1);
+ }
+}
+POST_SYSCALL(reboot)(long long res, long long opt_, void *bootstr_) {
+ /* This call should never return */
+ const char *bootstr = (const char *)bootstr_;
+ if (bootstr) {
+ POST_READ(bootstr, __sanitizer::internal_strlen(bootstr) + 1);
+ }
+}
+PRE_SYSCALL(poll)(void *fds_, long long nfds_, long long timeout_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(poll)
+(long long res, void *fds_, long long nfds_, long long timeout_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(afssys)
+(long long id_, long long a1_, long long a2_, long long a3_, long long a4_,
+ long long a5_, long long a6_) {
+ /* TODO */
+}
+POST_SYSCALL(afssys)
+(long long res, long long id_, long long a1_, long long a2_, long long a3_,
+ long long a4_, long long a5_, long long a6_) {
+ /* TODO */
+}
+/* syscall 211 has been skipped */
+/* syscall 212 has been skipped */
+/* syscall 213 has been skipped */
+/* syscall 214 has been skipped */
+/* syscall 215 has been skipped */
+/* syscall 216 has been skipped */
+/* syscall 217 has been skipped */
+/* syscall 218 has been skipped */
+/* syscall 219 has been skipped */
+PRE_SYSCALL(compat_14___semctl)
+(long long semid_, long long semnum_, long long cmd_, void *arg_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_14___semctl)
+(long long res, long long semid_, long long semnum_, long long cmd_,
+ void *arg_) {
+ /* TODO */
+}
+PRE_SYSCALL(semget)(long long key_, long long nsems_, long long semflg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(semget)
+(long long res, long long key_, long long nsems_, long long semflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(semop)(long long semid_, void *sops_, long long nsops_) {
+ if (sops_) {
+ PRE_READ(sops_, nsops_ * struct_sembuf_sz);
+ }
+}
+POST_SYSCALL(semop)
+(long long res, long long semid_, void *sops_, long long nsops_) {
+ if (res == 0) {
+ if (sops_) {
+ POST_READ(sops_, nsops_ * struct_sembuf_sz);
+ }
+ }
+}
+PRE_SYSCALL(semconfig)(long long flag_) { /* Nothing to do */ }
+POST_SYSCALL(semconfig)(long long res, long long flag_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_14_msgctl)(long long msqid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_14_msgctl)
+(long long res, long long msqid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(msgget)(long long key_, long long msgflg_) { /* Nothing to do */ }
+POST_SYSCALL(msgget)(long long res, long long key_, long long msgflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(msgsnd)
+(long long msqid_, void *msgp_, long long msgsz_, long long msgflg_) {
+ if (msgp_) {
+ PRE_READ(msgp_, msgsz_);
+ }
+}
+POST_SYSCALL(msgsnd)
+(long long res, long long msqid_, void *msgp_, long long msgsz_,
+ long long msgflg_) {
+ if (res == 0) {
+ if (msgp_) {
+ POST_READ(msgp_, msgsz_);
+ }
+ }
+}
+PRE_SYSCALL(msgrcv)
+(long long msqid_, void *msgp_, long long msgsz_, long long msgtyp_,
+ long long msgflg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(msgrcv)
+(long long res, long long msqid_, void *msgp_, long long msgsz_,
+ long long msgtyp_, long long msgflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(shmat)(long long shmid_, void *shmaddr_, long long shmflg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(shmat)
+(long long res, long long shmid_, void *shmaddr_, long long shmflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_14_shmctl)(long long shmid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_14_shmctl)
+(long long res, long long shmid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(shmdt)(void *shmaddr_) { /* Nothing to do */ }
+POST_SYSCALL(shmdt)(long long res, void *shmaddr_) { /* Nothing to do */ }
+PRE_SYSCALL(shmget)(long long key_, long long size_, long long shmflg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(shmget)
+(long long res, long long key_, long long size_, long long shmflg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_clock_gettime)(long long clock_id_, void *tp_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_clock_gettime)
+(long long res, long long clock_id_, void *tp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_clock_settime)(long long clock_id_, void *tp_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_clock_settime)
+(long long res, long long clock_id_, void *tp_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_clock_getres)(long long clock_id_, void *tp_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_clock_getres)
+(long long res, long long clock_id_, void *tp_) {
+ /* TODO */
+}
+PRE_SYSCALL(timer_create)(long long clock_id_, void *evp_, void *timerid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(timer_create)
+(long long res, long long clock_id_, void *evp_, void *timerid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(timer_delete)(long long timerid_) { /* Nothing to do */ }
+POST_SYSCALL(timer_delete)(long long res, long long timerid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_timer_settime)
+(long long timerid_, long long flags_, void *value_, void *ovalue_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_timer_settime)
+(long long res, long long timerid_, long long flags_, void *value_,
+ void *ovalue_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_timer_gettime)(long long timerid_, void *value_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_timer_gettime)
+(long long res, long long timerid_, void *value_) {
+ /* TODO */
+}
+PRE_SYSCALL(timer_getoverrun)(long long timerid_) { /* Nothing to do */ }
+POST_SYSCALL(timer_getoverrun)(long long res, long long timerid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_nanosleep)(void *rqtp_, void *rmtp_) { /* TODO */ }
+POST_SYSCALL(compat_50_nanosleep)(long long res, void *rqtp_, void *rmtp_) {
+ /* TODO */
+}
+PRE_SYSCALL(fdatasync)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(fdatasync)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(mlockall)(long long flags_) { /* Nothing to do */ }
+POST_SYSCALL(mlockall)(long long res, long long flags_) { /* Nothing to do */ }
+PRE_SYSCALL(munlockall)(void) { /* Nothing to do */ }
+POST_SYSCALL(munlockall)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50___sigtimedwait)(void *set_, void *info_, void *timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50___sigtimedwait)
+(long long res, void *set_, void *info_, void *timeout_) {
+ /* TODO */
+}
+PRE_SYSCALL(sigqueueinfo)(long long pid_, void *info_) {
+ if (info_) {
+ PRE_READ(info_, siginfo_t_sz);
+ }
+}
+POST_SYSCALL(sigqueueinfo)(long long res, long long pid_, void *info_) {}
+PRE_SYSCALL(modctl)(long long cmd_, void *arg_) { /* TODO */ }
+POST_SYSCALL(modctl)(long long res, long long cmd_, void *arg_) { /* TODO */ }
+PRE_SYSCALL(_ksem_init)(long long value_, void *idp_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_init)(long long res, long long value_, void *idp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_ksem_open)
+(void *name_, long long oflag_, long long mode_, long long value_, void *idp_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(_ksem_open)
+(long long res, void *name_, long long oflag_, long long mode_,
+ long long value_, void *idp_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(_ksem_unlink)(void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(_ksem_unlink)(long long res, void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(_ksem_close)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_close)(long long res, long long id_) { /* Nothing to do */ }
+PRE_SYSCALL(_ksem_post)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_post)(long long res, long long id_) { /* Nothing to do */ }
+PRE_SYSCALL(_ksem_wait)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_wait)(long long res, long long id_) { /* Nothing to do */ }
+PRE_SYSCALL(_ksem_trywait)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_trywait)(long long res, long long id_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_ksem_getvalue)(long long id_, void *value_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_getvalue)(long long res, long long id_, void *value_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_ksem_destroy)(long long id_) { /* Nothing to do */ }
+POST_SYSCALL(_ksem_destroy)(long long res, long long id_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_ksem_timedwait)(long long id_, void *abstime_) {
+ if (abstime_) {
+ PRE_READ(abstime_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(_ksem_timedwait)(long long res, long long id_, void *abstime_) {}
+PRE_SYSCALL(mq_open)
+(void *name_, long long oflag_, long long mode_, void *attr_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(mq_open)
+(long long res, void *name_, long long oflag_, long long mode_, void *attr_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(mq_close)(long long mqdes_) { /* Nothing to do */ }
+POST_SYSCALL(mq_close)(long long res, long long mqdes_) { /* Nothing to do */ }
+PRE_SYSCALL(mq_unlink)(void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(mq_unlink)(long long res, void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(mq_getattr)(long long mqdes_, void *mqstat_) { /* Nothing to do */ }
+POST_SYSCALL(mq_getattr)(long long res, long long mqdes_, void *mqstat_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(mq_setattr)(long long mqdes_, void *mqstat_, void *omqstat_) {
+ if (mqstat_) {
+ PRE_READ(mqstat_, struct_mq_attr_sz);
+ }
+}
+POST_SYSCALL(mq_setattr)
+(long long res, long long mqdes_, void *mqstat_, void *omqstat_) {}
+PRE_SYSCALL(mq_notify)(long long mqdes_, void *notification_) {
+ if (notification_) {
+ PRE_READ(notification_, struct_sigevent_sz);
+ }
+}
+POST_SYSCALL(mq_notify)(long long res, long long mqdes_, void *notification_) {}
+PRE_SYSCALL(mq_send)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_) {
+ if (msg_ptr_) {
+ PRE_READ(msg_ptr_, msg_len_);
+ }
+}
+POST_SYSCALL(mq_send)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ long long msg_prio_) {}
+PRE_SYSCALL(mq_receive)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mq_receive)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ void *msg_prio_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50_mq_timedsend)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_,
+ void *abs_timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_mq_timedsend)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ long long msg_prio_, void *abs_timeout_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_mq_timedreceive)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_,
+ void *abs_timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_mq_timedreceive)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ void *msg_prio_, void *abs_timeout_) {
+ /* TODO */
+}
+/* syscall 267 has been skipped */
+/* syscall 268 has been skipped */
+/* syscall 269 has been skipped */
+PRE_SYSCALL(__posix_rename)(void *from_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from_) {
+ PRE_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ PRE_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+}
+POST_SYSCALL(__posix_rename)(long long res, void *from_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from) {
+ POST_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ POST_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+}
+PRE_SYSCALL(swapctl)(long long cmd_, void *arg_, long long misc_) { /* TODO */ }
+POST_SYSCALL(swapctl)
+(long long res, long long cmd_, void *arg_, long long misc_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_30_getdents)(long long fd_, void *buf_, long long count_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_30_getdents)
+(long long res, long long fd_, void *buf_, long long count_) {
+ /* TODO */
+}
+PRE_SYSCALL(minherit)(void *addr_, long long len_, long long inherit_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(minherit)
+(long long res, void *addr_, long long len_, long long inherit_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(lchmod)(void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lchmod)(long long res, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(lchown)(void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lchown)
+(long long res, void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(compat_50_lutimes)(void *path_, void *tptr_) { /* TODO */ }
+POST_SYSCALL(compat_50_lutimes)(long long res, void *path_, void *tptr_) {
+ /* TODO */
+}
+PRE_SYSCALL(__msync13)(void *addr_, long long len_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__msync13)
+(long long res, void *addr_, long long len_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_30___stat13)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_30___stat13)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_30___fstat13)(long long fd_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_30___fstat13)(long long res, long long fd_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_30___lstat13)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_30___lstat13)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(__sigaltstack14)(void *nss_, void *oss_) {
+ if (nss_) {
+ PRE_READ(nss_, struct_sigaltstack_sz);
+ }
+ if (oss_) {
+ PRE_READ(oss_, struct_sigaltstack_sz);
+ }
+}
+POST_SYSCALL(__sigaltstack14)(long long res, void *nss_, void *oss_) {}
+PRE_SYSCALL(__vfork14)(void) { /* Nothing to do */ }
+POST_SYSCALL(__vfork14)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(__posix_chown)(void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__posix_chown)
+(long long res, void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(__posix_fchown)(long long fd_, long long uid_, long long gid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__posix_fchown)
+(long long res, long long fd_, long long uid_, long long gid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__posix_lchown)(void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__posix_lchown)
+(long long res, void *path_, long long uid_, long long gid_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(getsid)(long long pid_) { /* Nothing to do */ }
+POST_SYSCALL(getsid)(long long res, long long pid_) { /* Nothing to do */ }
+PRE_SYSCALL(__clone)(long long flags_, void *stack_) { /* Nothing to do */ }
+POST_SYSCALL(__clone)(long long res, long long flags_, void *stack_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(fktrace)
+(long long fd_, long long ops_, long long facs_, long long pid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fktrace)
+(long long res, long long fd_, long long ops_, long long facs_,
+ long long pid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(preadv)
+(long long fd_, void *iovp_, long long iovcnt_, long long PAD_,
+ long long offset_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(preadv)
+(long long res, long long fd_, void *iovp_, long long iovcnt_, long long PAD_,
+ long long offset_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(pwritev)
+(long long fd_, void *iovp_, long long iovcnt_, long long PAD_,
+ long long offset_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(pwritev)
+(long long res, long long fd_, void *iovp_, long long iovcnt_, long long PAD_,
+ long long offset_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_16___sigaction14)
+(long long signum_, void *nsa_, void *osa_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_16___sigaction14)
+(long long res, long long signum_, void *nsa_, void *osa_) {
+ /* TODO */
+}
+PRE_SYSCALL(__sigpending14)(void *set_) { /* Nothing to do */ }
+POST_SYSCALL(__sigpending14)(long long res, void *set_) { /* Nothing to do */ }
+PRE_SYSCALL(__sigprocmask14)(long long how_, void *set_, void *oset_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__sigprocmask14)
+(long long res, long long how_, void *set_, void *oset_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__sigsuspend14)(void *set_) {
+ if (set_) {
+ PRE_READ(set_, sizeof(__sanitizer_sigset_t));
+ }
+}
+POST_SYSCALL(__sigsuspend14)(long long res, void *set_) {
+ if (set_) {
+ PRE_READ(set_, sizeof(__sanitizer_sigset_t));
+ }
+}
+PRE_SYSCALL(compat_16___sigreturn14)(void *sigcntxp_) { /* TODO */ }
+POST_SYSCALL(compat_16___sigreturn14)(long long res, void *sigcntxp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__getcwd)(void *bufp_, long long length_) { /* Nothing to do */ }
+POST_SYSCALL(__getcwd)(long long res, void *bufp_, long long length_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(fchroot)(long long fd_) { /* Nothing to do */ }
+POST_SYSCALL(fchroot)(long long res, long long fd_) { /* Nothing to do */ }
+PRE_SYSCALL(compat_30_fhopen)(void *fhp_, long long flags_) { /* TODO */ }
+POST_SYSCALL(compat_30_fhopen)(long long res, void *fhp_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_30_fhstat)(void *fhp_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_30_fhstat)(long long res, void *fhp_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_20_fhstatfs)(void *fhp_, void *buf_) { /* TODO */ }
+POST_SYSCALL(compat_20_fhstatfs)(long long res, void *fhp_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_____semctl13)
+(long long semid_, long long semnum_, long long cmd_, void *arg_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_____semctl13)
+(long long res, long long semid_, long long semnum_, long long cmd_,
+ void *arg_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___msgctl13)
+(long long msqid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50___msgctl13)
+(long long res, long long msqid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___shmctl13)
+(long long shmid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50___shmctl13)
+(long long res, long long shmid_, long long cmd_, void *buf_) {
+ /* TODO */
+}
+PRE_SYSCALL(lchflags)(void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lchflags)(long long res, void *path_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(issetugid)(void) { /* Nothing to do */ }
+POST_SYSCALL(issetugid)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(utrace)(void *label_, void *addr_, long long len_) {
+ const char *label = (const char *)label_;
+ if (label) {
+ PRE_READ(label, __sanitizer::internal_strlen(label) + 1);
+ }
+ if (addr_) {
+ PRE_READ(addr_, len_);
+ }
+}
+POST_SYSCALL(utrace)(long long res, void *label_, void *addr_, long long len_) {
+ const char *label = (const char *)label_;
+ if (label) {
+ POST_READ(label, __sanitizer::internal_strlen(label) + 1);
+ }
+ if (addr_) {
+ POST_READ(addr_, len_);
+ }
+}
+PRE_SYSCALL(getcontext)(void *ucp_) { /* Nothing to do */ }
+POST_SYSCALL(getcontext)(long long res, void *ucp_) { /* Nothing to do */ }
+PRE_SYSCALL(setcontext)(void *ucp_) {
+ if (ucp_) {
+ PRE_READ(ucp_, ucontext_t_sz);
+ }
+}
+POST_SYSCALL(setcontext)(long long res, void *ucp_) {}
+PRE_SYSCALL(_lwp_create)(void *ucp_, long long flags_, void *new_lwp_) {
+ if (ucp_) {
+ PRE_READ(ucp_, ucontext_t_sz);
+ }
+}
+POST_SYSCALL(_lwp_create)
+(long long res, void *ucp_, long long flags_, void *new_lwp_) {}
+PRE_SYSCALL(_lwp_exit)(void) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_exit)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(_lwp_self)(void) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_self)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(_lwp_wait)(long long wait_for_, void *departed_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_lwp_wait)(long long res, long long wait_for_, void *departed_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_suspend)(long long target_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_suspend)(long long res, long long target_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_continue)(long long target_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_continue)(long long res, long long target_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_wakeup)(long long target_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_wakeup)(long long res, long long target_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_getprivate)(void) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_getprivate)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(_lwp_setprivate)(void *ptr_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_setprivate)(long long res, void *ptr_) { /* Nothing to do */ }
+PRE_SYSCALL(_lwp_kill)(long long target_, long long signo_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_lwp_kill)(long long res, long long target_, long long signo_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_detach)(long long target_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_detach)(long long res, long long target_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_50__lwp_park)
+(void *ts_, long long unpark_, void *hint_, void *unparkhint_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50__lwp_park)
+(long long res, void *ts_, long long unpark_, void *hint_, void *unparkhint_) {
+ /* TODO */
+}
+PRE_SYSCALL(_lwp_unpark)(long long target_, void *hint_) { /* Nothing to do */ }
+POST_SYSCALL(_lwp_unpark)(long long res, long long target_, void *hint_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_unpark_all)(void *targets_, long long ntargets_, void *hint_) {
+ if (targets_) {
+ PRE_READ(targets_, ntargets_ * sizeof(__sanitizer_lwpid_t));
+ }
+}
+POST_SYSCALL(_lwp_unpark_all)
+(long long res, void *targets_, long long ntargets_, void *hint_) {}
+PRE_SYSCALL(_lwp_setname)(long long target_, void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ PRE_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+POST_SYSCALL(_lwp_setname)(long long res, long long target_, void *name_) {
+ const char *name = (const char *)name_;
+ if (name) {
+ POST_READ(name, __sanitizer::internal_strlen(name) + 1);
+ }
+}
+PRE_SYSCALL(_lwp_getname)(long long target_, void *name_, long long len_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_lwp_getname)
+(long long res, long long target_, void *name_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_lwp_ctl)(long long features_, void **address_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_lwp_ctl)(long long res, long long features_, void **address_) {
+ /* Nothing to do */
+}
+/* syscall 326 has been skipped */
+/* syscall 327 has been skipped */
+/* syscall 328 has been skipped */
+/* syscall 329 has been skipped */
+PRE_SYSCALL(compat_60_sa_register)
+(void *newv_, void **oldv_, long long flags_, long long stackinfo_offset_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_60_sa_register)
+(long long res, void *newv_, void **oldv_, long long flags_,
+ long long stackinfo_offset_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_60_sa_stacks)(long long num_, void *stacks_) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_stacks)
+(long long res, long long num_, void *stacks_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_60_sa_enable)(void) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_enable)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_60_sa_setconcurrency)(long long concurrency_) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_setconcurrency)
+(long long res, long long concurrency_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_60_sa_yield)(void) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_yield)(long long res) { /* TODO */ }
+PRE_SYSCALL(compat_60_sa_preempt)(long long sa_id_) { /* TODO */ }
+POST_SYSCALL(compat_60_sa_preempt)(long long res, long long sa_id_) {
+ /* TODO */
+}
+/* syscall 336 has been skipped */
+/* syscall 337 has been skipped */
+/* syscall 338 has been skipped */
+/* syscall 339 has been skipped */
+PRE_SYSCALL(__sigaction_sigtramp)
+(long long signum_, void *nsa_, void *osa_, void *tramp_, long long vers_) {
+ if (nsa_) {
+ PRE_READ(nsa_, sizeof(__sanitizer_sigaction));
+ }
+}
+POST_SYSCALL(__sigaction_sigtramp)
+(long long res, long long signum_, void *nsa_, void *osa_, void *tramp_,
+ long long vers_) {
+ if (nsa_) {
+ PRE_READ(nsa_, sizeof(__sanitizer_sigaction));
+ }
+}
+/* syscall 341 has been skipped */
+/* syscall 342 has been skipped */
+PRE_SYSCALL(rasctl)(void *addr_, long long len_, long long op_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(rasctl)
+(long long res, void *addr_, long long len_, long long op_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(kqueue)(void) { /* Nothing to do */ }
+POST_SYSCALL(kqueue)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(compat_50_kevent)
+(long long fd_, void *changelist_, long long nchanges_, void *eventlist_,
+ long long nevents_, void *timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_kevent)
+(long long res, long long fd_, void *changelist_, long long nchanges_,
+ void *eventlist_, long long nevents_, void *timeout_) {
+ /* TODO */
+}
+PRE_SYSCALL(_sched_setparam)
+(long long pid_, long long lid_, long long policy_, void *params_) {
+ if (params_) {
+ PRE_READ(params_, struct_sched_param_sz);
+ }
+}
+POST_SYSCALL(_sched_setparam)
+(long long res, long long pid_, long long lid_, long long policy_,
+ void *params_) {
+ if (params_) {
+ PRE_READ(params_, struct_sched_param_sz);
+ }
+}
+PRE_SYSCALL(_sched_getparam)
+(long long pid_, long long lid_, void *policy_, void *params_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_sched_getparam)
+(long long res, long long pid_, long long lid_, void *policy_, void *params_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_sched_setaffinity)
+(long long pid_, long long lid_, long long size_, void *cpuset_) {
+ if (cpuset_) {
+ PRE_READ(cpuset_, size_);
+ }
+}
+POST_SYSCALL(_sched_setaffinity)
+(long long res, long long pid_, long long lid_, long long size_,
+ void *cpuset_) {
+ if (cpuset_) {
+ PRE_READ(cpuset_, size_);
+ }
+}
+PRE_SYSCALL(_sched_getaffinity)
+(long long pid_, long long lid_, long long size_, void *cpuset_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_sched_getaffinity)
+(long long res, long long pid_, long long lid_, long long size_,
+ void *cpuset_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(sched_yield)(void) { /* Nothing to do */ }
+POST_SYSCALL(sched_yield)(long long res) { /* Nothing to do */ }
+PRE_SYSCALL(_sched_protect)(long long priority_) { /* Nothing to do */ }
+POST_SYSCALL(_sched_protect)(long long res, long long priority_) {
+ /* Nothing to do */
+}
+/* syscall 352 has been skipped */
+/* syscall 353 has been skipped */
+PRE_SYSCALL(fsync_range)
+(long long fd_, long long flags_, long long start_, long long length_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fsync_range)
+(long long res, long long fd_, long long flags_, long long start_,
+ long long length_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(uuidgen)(void *store_, long long count_) { /* Nothing to do */ }
+POST_SYSCALL(uuidgen)(long long res, void *store_, long long count_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(getvfsstat)(void *buf_, long long bufsize_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(getvfsstat)
+(long long res, void *buf_, long long bufsize_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(statvfs1)(void *path_, void *buf_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(statvfs1)
+(long long res, void *path_, void *buf_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(fstatvfs1)(long long fd_, void *buf_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fstatvfs1)
+(long long res, long long fd_, void *buf_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(compat_30_fhstatvfs1)(void *fhp_, void *buf_, long long flags_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_30_fhstatvfs1)
+(long long res, void *fhp_, void *buf_, long long flags_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattrctl)
+(void *path_, long long cmd_, void *filename_, long long attrnamespace_,
+ void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattrctl)
+(long long res, void *path_, long long cmd_, void *filename_,
+ long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_set_file)
+(void *path_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_set_file)
+(long long res, void *path_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_get_file)
+(void *path_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_get_file)
+(long long res, void *path_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_delete_file)
+(void *path_, long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_delete_file)
+(long long res, void *path_, long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_set_fd)
+(long long fd_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ /* TODO */
+}
+POST_SYSCALL(extattr_set_fd)
+(long long res, long long fd_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattr_get_fd)
+(long long fd_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ /* TODO */
+}
+POST_SYSCALL(extattr_get_fd)
+(long long res, long long fd_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattr_delete_fd)
+(long long fd_, long long attrnamespace_, void *attrname_) {
+ /* TODO */
+}
+POST_SYSCALL(extattr_delete_fd)
+(long long res, long long fd_, long long attrnamespace_, void *attrname_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattr_set_link)
+(void *path_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_set_link)
+(long long res, void *path_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_get_link)
+(void *path_, long long attrnamespace_, void *attrname_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_get_link)
+(long long res, void *path_, long long attrnamespace_, void *attrname_,
+ void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_delete_link)
+(void *path_, long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_delete_link)
+(long long res, void *path_, long long attrnamespace_, void *attrname_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_list_fd)
+(long long fd_, long long attrnamespace_, void *data_, long long nbytes_) {
+ /* TODO */
+}
+POST_SYSCALL(extattr_list_fd)
+(long long res, long long fd_, long long attrnamespace_, void *data_,
+ long long nbytes_) {
+ /* TODO */
+}
+PRE_SYSCALL(extattr_list_file)
+(void *path_, long long attrnamespace_, void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_list_file)
+(long long res, void *path_, long long attrnamespace_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(extattr_list_link)
+(void *path_, long long attrnamespace_, void *data_, long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(extattr_list_link)
+(long long res, void *path_, long long attrnamespace_, void *data_,
+ long long nbytes_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(compat_50_pselect)
+(long long nd_, void *in_, void *ou_, void *ex_, void *ts_, void *mask_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_pselect)
+(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *ts_,
+ void *mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50_pollts)
+(void *fds_, long long nfds_, void *ts_, void *mask_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_pollts)
+(long long res, void *fds_, long long nfds_, void *ts_, void *mask_) {
+ /* TODO */
+}
+PRE_SYSCALL(setxattr)
+(void *path_, void *name_, void *value_, long long size_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(setxattr)
+(long long res, void *path_, void *name_, void *value_, long long size_,
+ long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(lsetxattr)
+(void *path_, void *name_, void *value_, long long size_, long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lsetxattr)
+(long long res, void *path_, void *name_, void *value_, long long size_,
+ long long flags_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(fsetxattr)
+(long long fd_, void *name_, void *value_, long long size_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fsetxattr)
+(long long res, long long fd_, void *name_, void *value_, long long size_,
+ long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(getxattr)(void *path_, void *name_, void *value_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(getxattr)
+(long long res, void *path_, void *name_, void *value_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(lgetxattr)
+(void *path_, void *name_, void *value_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lgetxattr)
+(long long res, void *path_, void *name_, void *value_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(fgetxattr)
+(long long fd_, void *name_, void *value_, long long size_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fgetxattr)
+(long long res, long long fd_, void *name_, void *value_, long long size_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(listxattr)(void *path_, void *list_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(listxattr)
+(long long res, void *path_, void *list_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(llistxattr)(void *path_, void *list_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(llistxattr)
+(long long res, void *path_, void *list_, long long size_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(flistxattr)(long long fd_, void *list_, long long size_) {
+ /* TODO */
+}
+POST_SYSCALL(flistxattr)
+(long long res, long long fd_, void *list_, long long size_) {
+ /* TODO */
+}
+PRE_SYSCALL(removexattr)(void *path_, void *name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(removexattr)(long long res, void *path_, void *name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(lremovexattr)(void *path_, void *name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(lremovexattr)(long long res, void *path_, void *name_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(fremovexattr)(long long fd_, void *name_) { /* TODO */ }
+POST_SYSCALL(fremovexattr)(long long res, long long fd_, void *name_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___stat30)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_50___stat30)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___fstat30)(long long fd_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_50___fstat30)(long long res, long long fd_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___lstat30)(void *path_, void *ub_) { /* TODO */ }
+POST_SYSCALL(compat_50___lstat30)(long long res, void *path_, void *ub_) {
+ /* TODO */
+}
+PRE_SYSCALL(__getdents30)(long long fd_, void *buf_, long long count_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__getdents30)
+(long long res, long long fd_, void *buf_, long long count_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(posix_fadvise)(long long) { /* Nothing to do */ }
+POST_SYSCALL(posix_fadvise)(long long res, long long) { /* Nothing to do */ }
+PRE_SYSCALL(compat_30___fhstat30)(void *fhp_, void *sb_) { /* TODO */ }
+POST_SYSCALL(compat_30___fhstat30)(long long res, void *fhp_, void *sb_) {
+ /* TODO */
+}
+PRE_SYSCALL(compat_50___ntp_gettime30)(void *ntvp_) { /* TODO */ }
+POST_SYSCALL(compat_50___ntp_gettime30)(long long res, void *ntvp_) {
+ /* TODO */
+}
+PRE_SYSCALL(__socket30)
+(long long domain_, long long type_, long long protocol_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__socket30)
+(long long res, long long domain_, long long type_, long long protocol_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__getfh30)(void *fname_, void *fhp_, void *fh_size_) {
+ const char *fname = (const char *)fname_;
+ if (fname) {
+ PRE_READ(fname, __sanitizer::internal_strlen(fname) + 1);
+ }
+}
+POST_SYSCALL(__getfh30)
+(long long res, void *fname_, void *fhp_, void *fh_size_) {
+ const char *fname = (const char *)fname_;
+ if (res == 0) {
+ if (fname) {
+ POST_READ(fname, __sanitizer::internal_strlen(fname) + 1);
+ }
+ }
+}
+PRE_SYSCALL(__fhopen40)(void *fhp_, long long fh_size_, long long flags_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(__fhopen40)
+(long long res, void *fhp_, long long fh_size_, long long flags_) {}
+PRE_SYSCALL(__fhstatvfs140)
+(void *fhp_, long long fh_size_, void *buf_, long long flags_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(__fhstatvfs140)
+(long long res, void *fhp_, long long fh_size_, void *buf_, long long flags_) {}
+PRE_SYSCALL(compat_50___fhstat40)(void *fhp_, long long fh_size_, void *sb_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(compat_50___fhstat40)
+(long long res, void *fhp_, long long fh_size_, void *sb_) {}
+PRE_SYSCALL(aio_cancel)(long long fildes_, void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_cancel)(long long res, long long fildes_, void *aiocbp_) {}
+PRE_SYSCALL(aio_error)(void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_error)(long long res, void *aiocbp_) {}
+PRE_SYSCALL(aio_fsync)(long long op_, void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_fsync)(long long res, long long op_, void *aiocbp_) {}
+PRE_SYSCALL(aio_read)(void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_read)(long long res, void *aiocbp_) {}
+PRE_SYSCALL(aio_return)(void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_return)(long long res, void *aiocbp_) {}
+PRE_SYSCALL(compat_50_aio_suspend)
+(void *list_, long long nent_, void *timeout_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_50_aio_suspend)
+(long long res, void *list_, long long nent_, void *timeout_) {
+ /* TODO */
+}
+PRE_SYSCALL(aio_write)(void *aiocbp_) {
+ if (aiocbp_) {
+ PRE_READ(aiocbp_, sizeof(struct __sanitizer_aiocb));
+ }
+}
+POST_SYSCALL(aio_write)(long long res, void *aiocbp_) {}
+PRE_SYSCALL(lio_listio)
+(long long mode_, void *list_, long long nent_, void *sig_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(lio_listio)
+(long long res, long long mode_, void *list_, long long nent_, void *sig_) {
+ /* Nothing to do */
+}
+/* syscall 407 has been skipped */
+/* syscall 408 has been skipped */
+/* syscall 409 has been skipped */
+PRE_SYSCALL(__mount50)
+(void *type_, void *path_, long long flags_, void *data_, long long data_len_) {
+ const char *type = (const char *)type_;
+ const char *path = (const char *)path_;
+ if (type) {
+ PRE_READ(type, __sanitizer::internal_strlen(type) + 1);
+ }
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (data_) {
+ PRE_READ(data_, data_len_);
+ }
+}
+POST_SYSCALL(__mount50)
+(long long res, void *type_, void *path_, long long flags_, void *data_,
+ long long data_len_) {
+ const char *type = (const char *)type_;
+ const char *path = (const char *)path_;
+ if (type) {
+ POST_READ(type, __sanitizer::internal_strlen(type) + 1);
+ }
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (data_) {
+ POST_READ(data_, data_len_);
+ }
+}
+PRE_SYSCALL(mremap)
+(void *old_address_, long long old_size_, void *new_address_,
+ long long new_size_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(mremap)
+(long long res, void *old_address_, long long old_size_, void *new_address_,
+ long long new_size_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(pset_create)(void *psid_) { /* Nothing to do */ }
+POST_SYSCALL(pset_create)(long long res, void *psid_) { /* Nothing to do */ }
+PRE_SYSCALL(pset_destroy)(long long psid_) { /* Nothing to do */ }
+POST_SYSCALL(pset_destroy)(long long res, long long psid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(pset_assign)(long long psid_, long long cpuid_, void *opsid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(pset_assign)
+(long long res, long long psid_, long long cpuid_, void *opsid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(_pset_bind)
+(long long idtype_, long long first_id_, long long second_id_, long long psid_,
+ void *opsid_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(_pset_bind)
+(long long res, long long idtype_, long long first_id_, long long second_id_,
+ long long psid_, void *opsid_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__posix_fadvise50)
+(long long fd_, long long PAD_, long long offset_, long long len_,
+ long long advice_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__posix_fadvise50)
+(long long res, long long fd_, long long PAD_, long long offset_,
+ long long len_, long long advice_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__select50)
+(long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__select50)
+(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *tv_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__gettimeofday50)(void *tp_, void *tzp_) { /* Nothing to do */ }
+POST_SYSCALL(__gettimeofday50)(long long res, void *tp_, void *tzp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__settimeofday50)(void *tv_, void *tzp_) {
+ if (tv_) {
+ PRE_READ(tv_, timeval_sz);
+ }
+ if (tzp_) {
+ PRE_READ(tzp_, struct_timezone_sz);
+ }
+}
+POST_SYSCALL(__settimeofday50)(long long res, void *tv_, void *tzp_) {}
+PRE_SYSCALL(__utimes50)(void *path_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr) {
+ PRE_READ(tptr[0], struct_timespec_sz);
+ PRE_READ(tptr[1], struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__utimes50)(long long res, void *path_, void *tptr_) {}
+PRE_SYSCALL(__adjtime50)(void *delta_, void *olddelta_) {
+ if (delta_) {
+ PRE_READ(delta_, timeval_sz);
+ }
+}
+POST_SYSCALL(__adjtime50)(long long res, void *delta_, void *olddelta_) {}
+PRE_SYSCALL(__lfs_segwait50)(void *fsidp_, void *tv_) { /* TODO */ }
+POST_SYSCALL(__lfs_segwait50)(long long res, void *fsidp_, void *tv_) {
+ /* TODO */
+}
+PRE_SYSCALL(__futimes50)(long long fd_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ if (tptr) {
+ PRE_READ(tptr[0], struct_timespec_sz);
+ PRE_READ(tptr[1], struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__futimes50)(long long res, long long fd_, void *tptr_) {}
+PRE_SYSCALL(__lutimes50)(void *path_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr) {
+ PRE_READ(tptr[0], struct_timespec_sz);
+ PRE_READ(tptr[1], struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__lutimes50)(long long res, void *path_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr) {
+ POST_READ(tptr[0], struct_timespec_sz);
+ POST_READ(tptr[1], struct_timespec_sz);
+ }
+}
+PRE_SYSCALL(__setitimer50)(long long which_, void *itv_, void *oitv_) {
+ struct __sanitizer_itimerval *itv = (struct __sanitizer_itimerval *)itv_;
+ if (itv) {
+ PRE_READ(&itv->it_interval.tv_sec, sizeof(__sanitizer_time_t));
+ PRE_READ(&itv->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t));
+ PRE_READ(&itv->it_value.tv_sec, sizeof(__sanitizer_time_t));
+ PRE_READ(&itv->it_value.tv_usec, sizeof(__sanitizer_suseconds_t));
+ }
+}
+POST_SYSCALL(__setitimer50)
+(long long res, long long which_, void *itv_, void *oitv_) {}
+PRE_SYSCALL(__getitimer50)(long long which_, void *itv_) { /* Nothing to do */ }
+POST_SYSCALL(__getitimer50)(long long res, long long which_, void *itv_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__clock_gettime50)(long long clock_id_, void *tp_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__clock_gettime50)(long long res, long long clock_id_, void *tp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__clock_settime50)(long long clock_id_, void *tp_) {
+ if (tp_) {
+ PRE_READ(tp_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__clock_settime50)
+(long long res, long long clock_id_, void *tp_) {}
+PRE_SYSCALL(__clock_getres50)(long long clock_id_, void *tp_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__clock_getres50)(long long res, long long clock_id_, void *tp_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__nanosleep50)(void *rqtp_, void *rmtp_) {
+ if (rqtp_) {
+ PRE_READ(rqtp_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__nanosleep50)(long long res, void *rqtp_, void *rmtp_) {}
+PRE_SYSCALL(____sigtimedwait50)(void *set_, void *info_, void *timeout_) {
+ if (set_) {
+ PRE_READ(set_, sizeof(__sanitizer_sigset_t));
+ }
+ if (timeout_) {
+ PRE_READ(timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(____sigtimedwait50)
+(long long res, void *set_, void *info_, void *timeout_) {}
+PRE_SYSCALL(__mq_timedsend50)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, long long msg_prio_,
+ void *abs_timeout_) {
+ if (msg_ptr_) {
+ PRE_READ(msg_ptr_, msg_len_);
+ }
+ if (abs_timeout_) {
+ PRE_READ(abs_timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__mq_timedsend50)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ long long msg_prio_, void *abs_timeout_) {}
+PRE_SYSCALL(__mq_timedreceive50)
+(long long mqdes_, void *msg_ptr_, long long msg_len_, void *msg_prio_,
+ void *abs_timeout_) {
+ if (msg_ptr_) {
+ PRE_READ(msg_ptr_, msg_len_);
+ }
+ if (abs_timeout_) {
+ PRE_READ(abs_timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__mq_timedreceive50)
+(long long res, long long mqdes_, void *msg_ptr_, long long msg_len_,
+ void *msg_prio_, void *abs_timeout_) {}
+PRE_SYSCALL(compat_60__lwp_park)
+(void *ts_, long long unpark_, void *hint_, void *unparkhint_) {
+ /* TODO */
+}
+POST_SYSCALL(compat_60__lwp_park)
+(long long res, void *ts_, long long unpark_, void *hint_, void *unparkhint_) {
+ /* TODO */
+}
+PRE_SYSCALL(__kevent50)
+(long long fd_, void *changelist_, long long nchanges_, void *eventlist_,
+ long long nevents_, void *timeout_) {
+ if (changelist_) {
+ PRE_READ(changelist_, nchanges_ * struct_kevent_sz);
+ }
+ if (timeout_) {
+ PRE_READ(timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__kevent50)
+(long long res, long long fd_, void *changelist_, long long nchanges_,
+ void *eventlist_, long long nevents_, void *timeout_) {}
+PRE_SYSCALL(__pselect50)
+(long long nd_, void *in_, void *ou_, void *ex_, void *ts_, void *mask_) {
+ if (ts_) {
+ PRE_READ(ts_, struct_timespec_sz);
+ }
+ if (mask_) {
+ PRE_READ(mask_, sizeof(struct __sanitizer_sigset_t));
+ }
+}
+POST_SYSCALL(__pselect50)
+(long long res, long long nd_, void *in_, void *ou_, void *ex_, void *ts_,
+ void *mask_) {}
+PRE_SYSCALL(__pollts50)(void *fds_, long long nfds_, void *ts_, void *mask_) {
+ if (ts_) {
+ PRE_READ(ts_, struct_timespec_sz);
+ }
+ if (mask_) {
+ PRE_READ(mask_, sizeof(struct __sanitizer_sigset_t));
+ }
+}
+POST_SYSCALL(__pollts50)
+(long long res, void *fds_, long long nfds_, void *ts_, void *mask_) {}
+PRE_SYSCALL(__aio_suspend50)(void *list_, long long nent_, void *timeout_) {
+ int i;
+ const struct aiocb *const *list = (const struct aiocb *const *)list_;
+ if (list) {
+ for (i = 0; i < nent_; i++) {
+ if (list[i]) {
+ PRE_READ(list[i], sizeof(struct __sanitizer_aiocb));
+ }
+ }
+ }
+ if (timeout_) {
+ PRE_READ(timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(__aio_suspend50)
+(long long res, void *list_, long long nent_, void *timeout_) {}
+PRE_SYSCALL(__stat50)(void *path_, void *ub_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__stat50)(long long res, void *path_, void *ub_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(__fstat50)(long long fd_, void *sb_) { /* Nothing to do */ }
+POST_SYSCALL(__fstat50)(long long res, long long fd_, void *sb_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__lstat50)(void *path_, void *ub_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__lstat50)(long long res, void *path_, void *ub_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(____semctl50)
+(long long semid_, long long semnum_, long long cmd_, void *arg_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(____semctl50)
+(long long res, long long semid_, long long semnum_, long long cmd_,
+ void *arg_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__shmctl50)(long long shmid_, long long cmd_, void *buf_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__shmctl50)
+(long long res, long long shmid_, long long cmd_, void *buf_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__msgctl50)(long long msqid_, long long cmd_, void *buf_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__msgctl50)
+(long long res, long long msqid_, long long cmd_, void *buf_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__getrusage50)(long long who_, void *rusage_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__getrusage50)(long long res, long long who_, void *rusage_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__timer_settime50)
+(long long timerid_, long long flags_, void *value_, void *ovalue_) {
+ struct __sanitizer_itimerval *value = (struct __sanitizer_itimerval *)value_;
+ if (value) {
+ PRE_READ(&value->it_interval.tv_sec, sizeof(__sanitizer_time_t));
+ PRE_READ(&value->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t));
+ PRE_READ(&value->it_value.tv_sec, sizeof(__sanitizer_time_t));
+ PRE_READ(&value->it_value.tv_usec, sizeof(__sanitizer_suseconds_t));
+ }
+}
+POST_SYSCALL(__timer_settime50)
+(long long res, long long timerid_, long long flags_, void *value_,
+ void *ovalue_) {
+ struct __sanitizer_itimerval *value = (struct __sanitizer_itimerval *)value_;
+ if (res == 0) {
+ if (value) {
+ POST_READ(&value->it_interval.tv_sec, sizeof(__sanitizer_time_t));
+ POST_READ(&value->it_interval.tv_usec, sizeof(__sanitizer_suseconds_t));
+ POST_READ(&value->it_value.tv_sec, sizeof(__sanitizer_time_t));
+ POST_READ(&value->it_value.tv_usec, sizeof(__sanitizer_suseconds_t));
+ }
+ }
+}
+PRE_SYSCALL(__timer_gettime50)(long long timerid_, void *value_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__timer_gettime50)
+(long long res, long long timerid_, void *value_) {
+ /* Nothing to do */
+}
+#if defined(NTP) || !defined(_KERNEL_OPT)
+PRE_SYSCALL(__ntp_gettime50)(void *ntvp_) { /* Nothing to do */ }
+POST_SYSCALL(__ntp_gettime50)(long long res, void *ntvp_) {
+ /* Nothing to do */
+}
+#else
+/* syscall 448 has been skipped */
+#endif
+PRE_SYSCALL(__wait450)
+(long long pid_, void *status_, long long options_, void *rusage_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(__wait450)
+(long long res, long long pid_, void *status_, long long options_,
+ void *rusage_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(__mknod50)(void *path_, long long mode_, long long dev_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__mknod50)
+(long long res, void *path_, long long mode_, long long dev_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(__fhstat50)(void *fhp_, long long fh_size_, void *sb_) {
+ if (fhp_) {
+ PRE_READ(fhp_, fh_size_);
+ }
+}
+POST_SYSCALL(__fhstat50)
+(long long res, void *fhp_, long long fh_size_, void *sb_) {
+ if (res == 0) {
+ if (fhp_) {
+ POST_READ(fhp_, fh_size_);
+ }
+ }
+}
+/* syscall 452 has been skipped */
+PRE_SYSCALL(pipe2)(void *fildes_, long long flags_) { /* Nothing to do */ }
+POST_SYSCALL(pipe2)(long long res, void *fildes_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(dup3)(long long from_, long long to_, long long flags_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(dup3)
+(long long res, long long from_, long long to_, long long flags_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(kqueue1)(long long flags_) { /* Nothing to do */ }
+POST_SYSCALL(kqueue1)(long long res, long long flags_) { /* Nothing to do */ }
+PRE_SYSCALL(paccept)
+(long long s_, void *name_, void *anamelen_, void *mask_, long long flags_) {
+ if (mask_) {
+ PRE_READ(mask_, sizeof(__sanitizer_sigset_t));
+ }
+}
+POST_SYSCALL(paccept)
+(long long res, long long s_, void *name_, void *anamelen_, void *mask_,
+ long long flags_) {
+ if (res >= 0) {
+ if (mask_) {
+ PRE_READ(mask_, sizeof(__sanitizer_sigset_t));
+ }
+ }
+}
+PRE_SYSCALL(linkat)
+(long long fd1_, void *name1_, long long fd2_, void *name2_, long long flags_) {
+ const char *name1 = (const char *)name1_;
+ const char *name2 = (const char *)name2_;
+ if (name1) {
+ PRE_READ(name1, __sanitizer::internal_strlen(name1) + 1);
+ }
+ if (name2) {
+ PRE_READ(name2, __sanitizer::internal_strlen(name2) + 1);
+ }
+}
+POST_SYSCALL(linkat)
+(long long res, long long fd1_, void *name1_, long long fd2_, void *name2_,
+ long long flags_) {
+ const char *name1 = (const char *)name1_;
+ const char *name2 = (const char *)name2_;
+ if (res == 0) {
+ if (name1) {
+ POST_READ(name1, __sanitizer::internal_strlen(name1) + 1);
+ }
+ if (name2) {
+ POST_READ(name2, __sanitizer::internal_strlen(name2) + 1);
+ }
+ }
+}
+PRE_SYSCALL(renameat)
+(long long fromfd_, void *from_, long long tofd_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (from) {
+ PRE_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ PRE_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+}
+POST_SYSCALL(renameat)
+(long long res, long long fromfd_, void *from_, long long tofd_, void *to_) {
+ const char *from = (const char *)from_;
+ const char *to = (const char *)to_;
+ if (res == 0) {
+ if (from) {
+ POST_READ(from, __sanitizer::internal_strlen(from) + 1);
+ }
+ if (to) {
+ POST_READ(to, __sanitizer::internal_strlen(to) + 1);
+ }
+ }
+}
+PRE_SYSCALL(mkfifoat)(long long fd_, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mkfifoat)
+(long long res, long long fd_, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(mknodat)
+(long long fd_, void *path_, long long mode_, long long PAD_, long long dev_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mknodat)
+(long long res, long long fd_, void *path_, long long mode_, long long PAD_,
+ long long dev_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(mkdirat)(long long fd_, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(mkdirat)
+(long long res, long long fd_, void *path_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(faccessat)
+(long long fd_, void *path_, long long amode_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(faccessat)
+(long long res, long long fd_, void *path_, long long amode_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fchmodat)
+(long long fd_, void *path_, long long mode_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(fchmodat)
+(long long res, long long fd_, void *path_, long long mode_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fchownat)
+(long long fd_, void *path_, long long owner_, long long group_,
+ long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(fchownat)
+(long long res, long long fd_, void *path_, long long owner_, long long group_,
+ long long flag_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(fexecve)(long long fd_, void *argp_, void *envp_) { /* TODO */ }
+POST_SYSCALL(fexecve)(long long res, long long fd_, void *argp_, void *envp_) {
+ /* TODO */
+}
+PRE_SYSCALL(fstatat)(long long fd_, void *path_, void *buf_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(fstatat)
+(long long res, long long fd_, void *path_, void *buf_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+PRE_SYSCALL(utimensat)
+(long long fd_, void *path_, void *tptr_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr_) {
+ PRE_READ(tptr_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(utimensat)
+(long long res, long long fd_, void *path_, void *tptr_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (res > 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ if (tptr_) {
+ POST_READ(tptr_, struct_timespec_sz);
+ }
+ }
+}
+PRE_SYSCALL(openat)
+(long long fd_, void *path_, long long oflags_, long long mode_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(openat)
+(long long res, long long fd_, void *path_, long long oflags_,
+ long long mode_) {
+ const char *path = (const char *)path_;
+ if (res > 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(readlinkat)
+(long long fd_, void *path_, void *buf_, long long bufsize_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(readlinkat)
+(long long res, long long fd_, void *path_, void *buf_, long long bufsize_) {
+ const char *path = (const char *)path_;
+ if (res > 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(symlinkat)(void *path1_, long long fd_, void *path2_) {
+ const char *path1 = (const char *)path1_;
+ const char *path2 = (const char *)path2_;
+ if (path1) {
+ PRE_READ(path1, __sanitizer::internal_strlen(path1) + 1);
+ }
+ if (path2) {
+ PRE_READ(path2, __sanitizer::internal_strlen(path2) + 1);
+ }
+}
+POST_SYSCALL(symlinkat)
+(long long res, void *path1_, long long fd_, void *path2_) {
+ const char *path1 = (const char *)path1_;
+ const char *path2 = (const char *)path2_;
+ if (res == 0) {
+ if (path1) {
+ POST_READ(path1, __sanitizer::internal_strlen(path1) + 1);
+ }
+ if (path2) {
+ POST_READ(path2, __sanitizer::internal_strlen(path2) + 1);
+ }
+ }
+}
+PRE_SYSCALL(unlinkat)(long long fd_, void *path_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(unlinkat)
+(long long res, long long fd_, void *path_, long long flag_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(futimens)(long long fd_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ if (tptr) {
+ PRE_READ(tptr[0], struct_timespec_sz);
+ PRE_READ(tptr[1], struct_timespec_sz);
+ }
+}
+POST_SYSCALL(futimens)(long long res, long long fd_, void *tptr_) {
+ struct __sanitizer_timespec **tptr = (struct __sanitizer_timespec **)tptr_;
+ if (res == 0) {
+ if (tptr) {
+ POST_READ(tptr[0], struct_timespec_sz);
+ POST_READ(tptr[1], struct_timespec_sz);
+ }
+ }
+}
+PRE_SYSCALL(__quotactl)(void *path_, void *args_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(__quotactl)(long long res, void *path_, void *args_) {
+ const char *path = (const char *)path_;
+ if (res == 0) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(posix_spawn)
+(void *pid_, void *path_, void *file_actions_, void *attrp_, void *argv_,
+ void *envp_) {
+ const char *path = (const char *)path_;
+ if (path) {
+ PRE_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+}
+POST_SYSCALL(posix_spawn)
+(long long res, void *pid_, void *path_, void *file_actions_, void *attrp_,
+ void *argv_, void *envp_) {
+ const char *path = (const char *)path_;
+ if (pid_) {
+ if (path) {
+ POST_READ(path, __sanitizer::internal_strlen(path) + 1);
+ }
+ }
+}
+PRE_SYSCALL(recvmmsg)
+(long long s_, void *mmsg_, long long vlen_, long long flags_, void *timeout_) {
+ if (timeout_) {
+ PRE_READ(timeout_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(recvmmsg)
+(long long res, long long s_, void *mmsg_, long long vlen_, long long flags_,
+ void *timeout_) {
+ if (res >= 0) {
+ if (timeout_) {
+ POST_READ(timeout_, struct_timespec_sz);
+ }
+ }
+}
+PRE_SYSCALL(sendmmsg)
+(long long s_, void *mmsg_, long long vlen_, long long flags_) {
+ struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_;
+ if (mmsg) {
+ PRE_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) *
+ (vlen_ > 1024 ? 1024 : vlen_));
+ }
+}
+POST_SYSCALL(sendmmsg)
+(long long res, long long s_, void *mmsg_, long long vlen_, long long flags_) {
+ struct __sanitizer_mmsghdr *mmsg = (struct __sanitizer_mmsghdr *)mmsg_;
+ if (res >= 0) {
+ if (mmsg) {
+ POST_READ(mmsg, sizeof(struct __sanitizer_mmsghdr) *
+ (vlen_ > 1024 ? 1024 : vlen_));
+ }
+ }
+}
+PRE_SYSCALL(clock_nanosleep)
+(long long clock_id_, long long flags_, void *rqtp_, void *rmtp_) {
+ if (rqtp_) {
+ PRE_READ(rqtp_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(clock_nanosleep)
+(long long res, long long clock_id_, long long flags_, void *rqtp_,
+ void *rmtp_) {
+ if (rqtp_) {
+ POST_READ(rqtp_, struct_timespec_sz);
+ }
+}
+PRE_SYSCALL(___lwp_park60)
+(long long clock_id_, long long flags_, void *ts_, long long unpark_,
+ void *hint_, void *unparkhint_) {
+ if (ts_) {
+ PRE_READ(ts_, struct_timespec_sz);
+ }
+}
+POST_SYSCALL(___lwp_park60)
+(long long res, long long clock_id_, long long flags_, void *ts_,
+ long long unpark_, void *hint_, void *unparkhint_) {
+ if (res == 0) {
+ if (ts_) {
+ POST_READ(ts_, struct_timespec_sz);
+ }
+ }
+}
+PRE_SYSCALL(posix_fallocate)
+(long long fd_, long long PAD_, long long pos_, long long len_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(posix_fallocate)
+(long long res, long long fd_, long long PAD_, long long pos_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(fdiscard)
+(long long fd_, long long PAD_, long long pos_, long long len_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(fdiscard)
+(long long res, long long fd_, long long PAD_, long long pos_, long long len_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(wait6)
+(long long idtype_, long long id_, void *status_, long long options_,
+ void *wru_, void *info_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(wait6)
+(long long res, long long idtype_, long long id_, void *status_,
+ long long options_, void *wru_, void *info_) {
+ /* Nothing to do */
+}
+PRE_SYSCALL(clock_getcpuclockid2)
+(long long idtype_, long long id_, void *clock_id_) {
+ /* Nothing to do */
+}
+POST_SYSCALL(clock_getcpuclockid2)
+(long long res, long long idtype_, long long id_, void *clock_id_) {
+ /* Nothing to do */
+}
+#undef SYS_MAXSYSARGS
+} // extern "C"
+
+#undef PRE_SYSCALL
+#undef PRE_READ
+#undef PRE_WRITE
+#undef POST_SYSCALL
+#undef POST_READ
+#undef POST_WRITE
+
+#endif // SANITIZER_NETBSD
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_syscalls_netbsd.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_termination.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_termination.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_termination.cc (revision 351984)
@@ -0,0 +1,94 @@
+//===-- sanitizer_termination.cc --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains the Sanitizer termination functions CheckFailed and Die,
+/// and the callback functionalities associated with them.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common.h"
+#include "sanitizer_libc.h"
+
+namespace __sanitizer {
+
+static const int kMaxNumOfInternalDieCallbacks = 5;
+static DieCallbackType InternalDieCallbacks[kMaxNumOfInternalDieCallbacks];
+
+bool AddDieCallback(DieCallbackType callback) {
+ for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
+ if (InternalDieCallbacks[i] == nullptr) {
+ InternalDieCallbacks[i] = callback;
+ return true;
+ }
+ }
+ return false;
+}
+
+bool RemoveDieCallback(DieCallbackType callback) {
+ for (int i = 0; i < kMaxNumOfInternalDieCallbacks; i++) {
+ if (InternalDieCallbacks[i] == callback) {
+ internal_memmove(&InternalDieCallbacks[i], &InternalDieCallbacks[i + 1],
+ sizeof(InternalDieCallbacks[0]) *
+ (kMaxNumOfInternalDieCallbacks - i - 1));
+ InternalDieCallbacks[kMaxNumOfInternalDieCallbacks - 1] = nullptr;
+ return true;
+ }
+ }
+ return false;
+}
+
+static DieCallbackType UserDieCallback;
+void SetUserDieCallback(DieCallbackType callback) {
+ UserDieCallback = callback;
+}
+
+void NORETURN Die() {
+ if (UserDieCallback)
+ UserDieCallback();
+ for (int i = kMaxNumOfInternalDieCallbacks - 1; i >= 0; i--) {
+ if (InternalDieCallbacks[i])
+ InternalDieCallbacks[i]();
+ }
+ if (common_flags()->abort_on_error)
+ Abort();
+ internal__exit(common_flags()->exitcode);
+}
+
+static CheckFailedCallbackType CheckFailedCallback;
+void SetCheckFailedCallback(CheckFailedCallbackType callback) {
+ CheckFailedCallback = callback;
+}
+
+const int kSecondsToSleepWhenRecursiveCheckFailed = 2;
+
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ static atomic_uint32_t num_calls;
+ if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) > 10) {
+ SleepForSeconds(kSecondsToSleepWhenRecursiveCheckFailed);
+ Trap();
+ }
+
+ if (CheckFailedCallback) {
+ CheckFailedCallback(file, line, cond, v1, v2);
+ }
+ Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
+ v1, v2);
+ Die();
+}
+
+} // namespace __sanitizer
+
+using namespace __sanitizer; // NOLINT
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_set_death_callback(void (*callback)(void)) {
+ SetUserDieCallback(callback);
+}
+} // extern "C"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_termination.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_thread_registry.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_thread_registry.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_thread_registry.cc (revision 351984)
@@ -0,0 +1,351 @@
+//===-- sanitizer_thread_registry.cc --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// General thread bookkeeping functionality.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_thread_registry.h"
+
+namespace __sanitizer {
+
+ThreadContextBase::ThreadContextBase(u32 tid)
+ : tid(tid), unique_id(0), reuse_count(), os_id(0), user_id(0),
+ status(ThreadStatusInvalid), detached(false),
+ thread_type(ThreadType::Regular), parent_tid(0), next(0) {
+ name[0] = '\0';
+ atomic_store(&thread_destroyed, 0, memory_order_release);
+}
+
+ThreadContextBase::~ThreadContextBase() {
+ // ThreadContextBase should never be deleted.
+ CHECK(0);
+}
+
+void ThreadContextBase::SetName(const char *new_name) {
+ name[0] = '\0';
+ if (new_name) {
+ internal_strncpy(name, new_name, sizeof(name));
+ name[sizeof(name) - 1] = '\0';
+ }
+}
+
+void ThreadContextBase::SetDead() {
+ CHECK(status == ThreadStatusRunning ||
+ status == ThreadStatusFinished);
+ status = ThreadStatusDead;
+ user_id = 0;
+ OnDead();
+}
+
+void ThreadContextBase::SetDestroyed() {
+ atomic_store(&thread_destroyed, 1, memory_order_release);
+}
+
+bool ThreadContextBase::GetDestroyed() {
+ return !!atomic_load(&thread_destroyed, memory_order_acquire);
+}
+
+void ThreadContextBase::SetJoined(void *arg) {
+ // FIXME(dvyukov): print message and continue (it's user error).
+ CHECK_EQ(false, detached);
+ CHECK_EQ(ThreadStatusFinished, status);
+ status = ThreadStatusDead;
+ user_id = 0;
+ OnJoined(arg);
+}
+
+void ThreadContextBase::SetFinished() {
+ // ThreadRegistry::FinishThread calls here in ThreadStatusCreated state
+ // for a thread that never actually started. In that case the thread
+ // should go to ThreadStatusFinished regardless of whether it was created
+ // as detached.
+ if (!detached || status == ThreadStatusCreated) status = ThreadStatusFinished;
+ OnFinished();
+}
+
+void ThreadContextBase::SetStarted(tid_t _os_id, ThreadType _thread_type,
+ void *arg) {
+ status = ThreadStatusRunning;
+ os_id = _os_id;
+ thread_type = _thread_type;
+ OnStarted(arg);
+}
+
+void ThreadContextBase::SetCreated(uptr _user_id, u64 _unique_id,
+ bool _detached, u32 _parent_tid, void *arg) {
+ status = ThreadStatusCreated;
+ user_id = _user_id;
+ unique_id = _unique_id;
+ detached = _detached;
+ // Parent tid makes no sense for the main thread.
+ if (tid != 0)
+ parent_tid = _parent_tid;
+ OnCreated(arg);
+}
+
+void ThreadContextBase::Reset() {
+ status = ThreadStatusInvalid;
+ SetName(0);
+ atomic_store(&thread_destroyed, 0, memory_order_release);
+ OnReset();
+}
+
+// ThreadRegistry implementation.
+
+const u32 ThreadRegistry::kUnknownTid = ~0U;
+
+ThreadRegistry::ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
+ u32 thread_quarantine_size, u32 max_reuse)
+ : context_factory_(factory),
+ max_threads_(max_threads),
+ thread_quarantine_size_(thread_quarantine_size),
+ max_reuse_(max_reuse),
+ mtx_(),
+ n_contexts_(0),
+ total_threads_(0),
+ alive_threads_(0),
+ max_alive_threads_(0),
+ running_threads_(0) {
+ threads_ = (ThreadContextBase **)MmapOrDie(max_threads_ * sizeof(threads_[0]),
+ "ThreadRegistry");
+ dead_threads_.clear();
+ invalid_threads_.clear();
+}
+
+void ThreadRegistry::GetNumberOfThreads(uptr *total, uptr *running,
+ uptr *alive) {
+ BlockingMutexLock l(&mtx_);
+ if (total) *total = n_contexts_;
+ if (running) *running = running_threads_;
+ if (alive) *alive = alive_threads_;
+}
+
+uptr ThreadRegistry::GetMaxAliveThreads() {
+ BlockingMutexLock l(&mtx_);
+ return max_alive_threads_;
+}
+
+u32 ThreadRegistry::CreateThread(uptr user_id, bool detached, u32 parent_tid,
+ void *arg) {
+ BlockingMutexLock l(&mtx_);
+ u32 tid = kUnknownTid;
+ ThreadContextBase *tctx = QuarantinePop();
+ if (tctx) {
+ tid = tctx->tid;
+ } else if (n_contexts_ < max_threads_) {
+ // Allocate new thread context and tid.
+ tid = n_contexts_++;
+ tctx = context_factory_(tid);
+ threads_[tid] = tctx;
+ } else {
+#if !SANITIZER_GO
+ Report("%s: Thread limit (%u threads) exceeded. Dying.\n",
+ SanitizerToolName, max_threads_);
+#else
+ Printf("race: limit on %u simultaneously alive goroutines is exceeded,"
+ " dying\n", max_threads_);
+#endif
+ Die();
+ }
+ CHECK_NE(tctx, 0);
+ CHECK_NE(tid, kUnknownTid);
+ CHECK_LT(tid, max_threads_);
+ CHECK_EQ(tctx->status, ThreadStatusInvalid);
+ alive_threads_++;
+ if (max_alive_threads_ < alive_threads_) {
+ max_alive_threads_++;
+ CHECK_EQ(alive_threads_, max_alive_threads_);
+ }
+ tctx->SetCreated(user_id, total_threads_++, detached,
+ parent_tid, arg);
+ return tid;
+}
+
+void ThreadRegistry::RunCallbackForEachThreadLocked(ThreadCallback cb,
+ void *arg) {
+ CheckLocked();
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx == 0)
+ continue;
+ cb(tctx, arg);
+ }
+}
+
+u32 ThreadRegistry::FindThread(FindThreadCallback cb, void *arg) {
+ BlockingMutexLock l(&mtx_);
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx != 0 && cb(tctx, arg))
+ return tctx->tid;
+ }
+ return kUnknownTid;
+}
+
+ThreadContextBase *
+ThreadRegistry::FindThreadContextLocked(FindThreadCallback cb, void *arg) {
+ CheckLocked();
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx != 0 && cb(tctx, arg))
+ return tctx;
+ }
+ return 0;
+}
+
+static bool FindThreadContextByOsIdCallback(ThreadContextBase *tctx,
+ void *arg) {
+ return (tctx->os_id == (uptr)arg && tctx->status != ThreadStatusInvalid &&
+ tctx->status != ThreadStatusDead);
+}
+
+ThreadContextBase *ThreadRegistry::FindThreadContextByOsIDLocked(tid_t os_id) {
+ return FindThreadContextLocked(FindThreadContextByOsIdCallback,
+ (void *)os_id);
+}
+
+void ThreadRegistry::SetThreadName(u32 tid, const char *name) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(SANITIZER_FUCHSIA ? ThreadStatusCreated : ThreadStatusRunning,
+ tctx->status);
+ tctx->SetName(name);
+}
+
+void ThreadRegistry::SetThreadNameByUserId(uptr user_id, const char *name) {
+ BlockingMutexLock l(&mtx_);
+ for (u32 tid = 0; tid < n_contexts_; tid++) {
+ ThreadContextBase *tctx = threads_[tid];
+ if (tctx != 0 && tctx->user_id == user_id &&
+ tctx->status != ThreadStatusInvalid) {
+ tctx->SetName(name);
+ return;
+ }
+ }
+}
+
+void ThreadRegistry::DetachThread(u32 tid, void *arg) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ if (tctx->status == ThreadStatusInvalid) {
+ Report("%s: Detach of non-existent thread\n", SanitizerToolName);
+ return;
+ }
+ tctx->OnDetached(arg);
+ if (tctx->status == ThreadStatusFinished) {
+ tctx->SetDead();
+ QuarantinePush(tctx);
+ } else {
+ tctx->detached = true;
+ }
+}
+
+void ThreadRegistry::JoinThread(u32 tid, void *arg) {
+ bool destroyed = false;
+ do {
+ {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ if (tctx->status == ThreadStatusInvalid) {
+ Report("%s: Join of non-existent thread\n", SanitizerToolName);
+ return;
+ }
+ if ((destroyed = tctx->GetDestroyed())) {
+ tctx->SetJoined(arg);
+ QuarantinePush(tctx);
+ }
+ }
+ if (!destroyed)
+ internal_sched_yield();
+ } while (!destroyed);
+}
+
+// Normally this is called when the thread is about to exit. If
+// called in ThreadStatusCreated state, then this thread was never
+// really started. We just did CreateThread for a prospective new
+// thread before trying to create it, and then failed to actually
+// create it, and so never called StartThread.
+void ThreadRegistry::FinishThread(u32 tid) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_GT(alive_threads_, 0);
+ alive_threads_--;
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ bool dead = tctx->detached;
+ if (tctx->status == ThreadStatusRunning) {
+ CHECK_GT(running_threads_, 0);
+ running_threads_--;
+ } else {
+ // The thread never really existed.
+ CHECK_EQ(tctx->status, ThreadStatusCreated);
+ dead = true;
+ }
+ tctx->SetFinished();
+ if (dead) {
+ tctx->SetDead();
+ QuarantinePush(tctx);
+ }
+ tctx->SetDestroyed();
+}
+
+void ThreadRegistry::StartThread(u32 tid, tid_t os_id, ThreadType thread_type,
+ void *arg) {
+ BlockingMutexLock l(&mtx_);
+ running_threads_++;
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_EQ(ThreadStatusCreated, tctx->status);
+ tctx->SetStarted(os_id, thread_type, arg);
+}
+
+void ThreadRegistry::QuarantinePush(ThreadContextBase *tctx) {
+ if (tctx->tid == 0)
+ return; // Don't reuse the main thread. It's a special snowflake.
+ dead_threads_.push_back(tctx);
+ if (dead_threads_.size() <= thread_quarantine_size_)
+ return;
+ tctx = dead_threads_.front();
+ dead_threads_.pop_front();
+ CHECK_EQ(tctx->status, ThreadStatusDead);
+ tctx->Reset();
+ tctx->reuse_count++;
+ if (max_reuse_ > 0 && tctx->reuse_count >= max_reuse_)
+ return;
+ invalid_threads_.push_back(tctx);
+}
+
+ThreadContextBase *ThreadRegistry::QuarantinePop() {
+ if (invalid_threads_.size() == 0)
+ return 0;
+ ThreadContextBase *tctx = invalid_threads_.front();
+ invalid_threads_.pop_front();
+ return tctx;
+}
+
+void ThreadRegistry::SetThreadUserId(u32 tid, uptr user_id) {
+ BlockingMutexLock l(&mtx_);
+ CHECK_LT(tid, n_contexts_);
+ ThreadContextBase *tctx = threads_[tid];
+ CHECK_NE(tctx, 0);
+ CHECK_NE(tctx->status, ThreadStatusInvalid);
+ CHECK_NE(tctx->status, ThreadStatusDead);
+ CHECK_EQ(tctx->user_id, 0);
+ tctx->user_id = user_id;
+}
+
+} // namespace __sanitizer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_thread_registry.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_thread_registry.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_thread_registry.h (revision 351984)
@@ -0,0 +1,160 @@
+//===-- sanitizer_thread_registry.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// General thread bookkeeping functionality.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_THREAD_REGISTRY_H
+#define SANITIZER_THREAD_REGISTRY_H
+
+#include "sanitizer_common.h"
+#include "sanitizer_list.h"
+#include "sanitizer_mutex.h"
+
+namespace __sanitizer {
+
+enum ThreadStatus {
+ ThreadStatusInvalid, // Non-existent thread, data is invalid.
+ ThreadStatusCreated, // Created but not yet running.
+ ThreadStatusRunning, // The thread is currently running.
+ ThreadStatusFinished, // Joinable thread is finished but not yet joined.
+ ThreadStatusDead // Joined, but some info is still available.
+};
+
+enum class ThreadType {
+ Regular, // Normal thread
+ Worker, // macOS Grand Central Dispatch (GCD) worker thread
+ Fiber, // Fiber
+};
+
+// Generic thread context. Specific sanitizer tools may inherit from it.
+// If thread is dead, context may optionally be reused for a new thread.
+class ThreadContextBase {
+ public:
+ explicit ThreadContextBase(u32 tid);
+ ~ThreadContextBase(); // Should never be called.
+
+ const u32 tid; // Thread ID. Main thread should have tid = 0.
+ u64 unique_id; // Unique thread ID.
+ u32 reuse_count; // Number of times this tid was reused.
+ tid_t os_id; // PID (used for reporting).
+ uptr user_id; // Some opaque user thread id (e.g. pthread_t).
+ char name[64]; // As annotated by user.
+
+ ThreadStatus status;
+ bool detached;
+ ThreadType thread_type;
+
+ u32 parent_tid;
+ ThreadContextBase *next; // For storing thread contexts in a list.
+
+ atomic_uint32_t thread_destroyed; // To address race of Joined vs Finished
+
+ void SetName(const char *new_name);
+
+ void SetDead();
+ void SetJoined(void *arg);
+ void SetFinished();
+ void SetStarted(tid_t _os_id, ThreadType _thread_type, void *arg);
+ void SetCreated(uptr _user_id, u64 _unique_id, bool _detached,
+ u32 _parent_tid, void *arg);
+ void Reset();
+
+ void SetDestroyed();
+ bool GetDestroyed();
+
+ // The following methods may be overriden by subclasses.
+ // Some of them take opaque arg that may be optionally be used
+ // by subclasses.
+ virtual void OnDead() {}
+ virtual void OnJoined(void *arg) {}
+ virtual void OnFinished() {}
+ virtual void OnStarted(void *arg) {}
+ virtual void OnCreated(void *arg) {}
+ virtual void OnReset() {}
+ virtual void OnDetached(void *arg) {}
+};
+
+typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
+
+class ThreadRegistry {
+ public:
+ static const u32 kUnknownTid;
+
+ ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
+ u32 thread_quarantine_size, u32 max_reuse = 0);
+ void GetNumberOfThreads(uptr *total = nullptr, uptr *running = nullptr,
+ uptr *alive = nullptr);
+ uptr GetMaxAliveThreads();
+
+ void Lock() { mtx_.Lock(); }
+ void CheckLocked() { mtx_.CheckLocked(); }
+ void Unlock() { mtx_.Unlock(); }
+
+ // Should be guarded by ThreadRegistryLock.
+ ThreadContextBase *GetThreadLocked(u32 tid) {
+ DCHECK_LT(tid, n_contexts_);
+ return threads_[tid];
+ }
+
+ u32 CreateThread(uptr user_id, bool detached, u32 parent_tid, void *arg);
+
+ typedef void (*ThreadCallback)(ThreadContextBase *tctx, void *arg);
+ // Invokes callback with a specified arg for each thread context.
+ // Should be guarded by ThreadRegistryLock.
+ void RunCallbackForEachThreadLocked(ThreadCallback cb, void *arg);
+
+ typedef bool (*FindThreadCallback)(ThreadContextBase *tctx, void *arg);
+ // Finds a thread using the provided callback. Returns kUnknownTid if no
+ // thread is found.
+ u32 FindThread(FindThreadCallback cb, void *arg);
+ // Should be guarded by ThreadRegistryLock. Return 0 if no thread
+ // is found.
+ ThreadContextBase *FindThreadContextLocked(FindThreadCallback cb,
+ void *arg);
+ ThreadContextBase *FindThreadContextByOsIDLocked(tid_t os_id);
+
+ void SetThreadName(u32 tid, const char *name);
+ void SetThreadNameByUserId(uptr user_id, const char *name);
+ void DetachThread(u32 tid, void *arg);
+ void JoinThread(u32 tid, void *arg);
+ void FinishThread(u32 tid);
+ void StartThread(u32 tid, tid_t os_id, ThreadType thread_type, void *arg);
+ void SetThreadUserId(u32 tid, uptr user_id);
+
+ private:
+ const ThreadContextFactory context_factory_;
+ const u32 max_threads_;
+ const u32 thread_quarantine_size_;
+ const u32 max_reuse_;
+
+ BlockingMutex mtx_;
+
+ u32 n_contexts_; // Number of created thread contexts,
+ // at most max_threads_.
+ u64 total_threads_; // Total number of created threads. May be greater than
+ // max_threads_ if contexts were reused.
+ uptr alive_threads_; // Created or running.
+ uptr max_alive_threads_;
+ uptr running_threads_;
+
+ ThreadContextBase **threads_; // Array of thread contexts is leaked.
+ IntrusiveList<ThreadContextBase> dead_threads_;
+ IntrusiveList<ThreadContextBase> invalid_threads_;
+
+ void QuarantinePush(ThreadContextBase *tctx);
+ ThreadContextBase *QuarantinePop();
+};
+
+typedef GenericScopedLock<ThreadRegistry> ThreadRegistryLock;
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_THREAD_REGISTRY_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_tls_get_addr.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_tls_get_addr.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_tls_get_addr.cc (revision 351984)
@@ -0,0 +1,154 @@
+//===-- sanitizer_tls_get_addr.cc -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_tls_get_addr.h"
+
+#include "sanitizer_flags.h"
+#include "sanitizer_platform_interceptors.h"
+
+namespace __sanitizer {
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+// The actual parameter that comes to __tls_get_addr
+// is a pointer to a struct with two words in it:
+struct TlsGetAddrParam {
+ uptr dso_id;
+ uptr offset;
+};
+
+// Glibc starting from 2.19 allocates tls using __signal_safe_memalign,
+// which has such header.
+struct Glibc_2_19_tls_header {
+ uptr size;
+ uptr start;
+};
+
+// This must be static TLS
+__attribute__((tls_model("initial-exec")))
+static __thread DTLS dtls;
+
+// Make sure we properly destroy the DTLS objects:
+// this counter should never get too large.
+static atomic_uintptr_t number_of_live_dtls;
+
+static const uptr kDestroyedThread = -1;
+
+static inline void DTLS_Deallocate(DTLS::DTV *dtv, uptr size) {
+ if (!size) return;
+ VReport(2, "__tls_get_addr: DTLS_Deallocate %p %zd\n", dtv, size);
+ UnmapOrDie(dtv, size * sizeof(DTLS::DTV));
+ atomic_fetch_sub(&number_of_live_dtls, 1, memory_order_relaxed);
+}
+
+static inline void DTLS_Resize(uptr new_size) {
+ if (dtls.dtv_size >= new_size) return;
+ new_size = RoundUpToPowerOfTwo(new_size);
+ new_size = Max(new_size, 4096UL / sizeof(DTLS::DTV));
+ DTLS::DTV *new_dtv =
+ (DTLS::DTV *)MmapOrDie(new_size * sizeof(DTLS::DTV), "DTLS_Resize");
+ uptr num_live_dtls =
+ atomic_fetch_add(&number_of_live_dtls, 1, memory_order_relaxed);
+ VReport(2, "__tls_get_addr: DTLS_Resize %p %zd\n", &dtls, num_live_dtls);
+ CHECK_LT(num_live_dtls, 1 << 20);
+ uptr old_dtv_size = dtls.dtv_size;
+ DTLS::DTV *old_dtv = dtls.dtv;
+ if (old_dtv_size)
+ internal_memcpy(new_dtv, dtls.dtv, dtls.dtv_size * sizeof(DTLS::DTV));
+ dtls.dtv = new_dtv;
+ dtls.dtv_size = new_size;
+ if (old_dtv_size)
+ DTLS_Deallocate(old_dtv, old_dtv_size);
+}
+
+void DTLS_Destroy() {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VReport(2, "__tls_get_addr: DTLS_Destroy %p %zd\n", &dtls, dtls.dtv_size);
+ uptr s = dtls.dtv_size;
+ dtls.dtv_size = kDestroyedThread; // Do this before unmap for AS-safety.
+ DTLS_Deallocate(dtls.dtv, s);
+}
+
+#if defined(__powerpc64__) || defined(__mips__)
+// This is glibc's TLS_DTV_OFFSET:
+// "Dynamic thread vector pointers point 0x8000 past the start of each
+// TLS block."
+static const uptr kDtvOffset = 0x8000;
+#else
+static const uptr kDtvOffset = 0;
+#endif
+
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg_void, void *res,
+ uptr static_tls_begin, uptr static_tls_end) {
+ if (!common_flags()->intercept_tls_get_addr) return 0;
+ TlsGetAddrParam *arg = reinterpret_cast<TlsGetAddrParam *>(arg_void);
+ uptr dso_id = arg->dso_id;
+ if (dtls.dtv_size == kDestroyedThread) return 0;
+ DTLS_Resize(dso_id + 1);
+ if (dtls.dtv[dso_id].beg) return 0;
+ uptr tls_size = 0;
+ uptr tls_beg = reinterpret_cast<uptr>(res) - arg->offset - kDtvOffset;
+ VReport(2, "__tls_get_addr: %p {%p,%p} => %p; tls_beg: %p; sp: %p "
+ "num_live_dtls %zd\n",
+ arg, arg->dso_id, arg->offset, res, tls_beg, &tls_beg,
+ atomic_load(&number_of_live_dtls, memory_order_relaxed));
+ if (dtls.last_memalign_ptr == tls_beg) {
+ tls_size = dtls.last_memalign_size;
+ VReport(2, "__tls_get_addr: glibc <=2.18 suspected; tls={%p,%p}\n",
+ tls_beg, tls_size);
+ } else if (tls_beg >= static_tls_begin && tls_beg < static_tls_end) {
+ // This is the static TLS block which was initialized / unpoisoned at thread
+ // creation.
+ VReport(2, "__tls_get_addr: static tls: %p\n", tls_beg);
+ tls_size = 0;
+ } else if ((tls_beg % 4096) == sizeof(Glibc_2_19_tls_header)) {
+ // We may want to check gnu_get_libc_version().
+ Glibc_2_19_tls_header *header = (Glibc_2_19_tls_header *)tls_beg - 1;
+ tls_size = header->size;
+ tls_beg = header->start;
+ VReport(2, "__tls_get_addr: glibc >=2.19 suspected; tls={%p %p}\n",
+ tls_beg, tls_size);
+ } else {
+ VReport(2, "__tls_get_addr: Can't guess glibc version\n");
+ // This may happen inside the DTOR of main thread, so just ignore it.
+ tls_size = 0;
+ }
+ dtls.dtv[dso_id].beg = tls_beg;
+ dtls.dtv[dso_id].size = tls_size;
+ return dtls.dtv + dso_id;
+}
+
+void DTLS_on_libc_memalign(void *ptr, uptr size) {
+ if (!common_flags()->intercept_tls_get_addr) return;
+ VReport(2, "DTLS_on_libc_memalign: %p %p\n", ptr, size);
+ dtls.last_memalign_ptr = reinterpret_cast<uptr>(ptr);
+ dtls.last_memalign_size = size;
+}
+
+DTLS *DTLS_Get() { return &dtls; }
+
+bool DTLSInDestruction(DTLS *dtls) {
+ return dtls->dtv_size == kDestroyedThread;
+}
+
+#else
+void DTLS_on_libc_memalign(void *ptr, uptr size) {}
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res,
+ unsigned long, unsigned long) { return 0; }
+DTLS *DTLS_Get() { return 0; }
+void DTLS_Destroy() {}
+bool DTLSInDestruction(DTLS *dtls) {
+ UNREACHABLE("dtls is unsupported on this platform!");
+}
+
+#endif // SANITIZER_INTERCEPT_TLS_GET_ADDR
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_tls_get_addr.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_tls_get_addr.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_tls_get_addr.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_tls_get_addr.h (revision 351984)
@@ -0,0 +1,62 @@
+//===-- sanitizer_tls_get_addr.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Handle the __tls_get_addr call.
+//
+// All this magic is specific to glibc and is required to workaround
+// the lack of interface that would tell us about the Dynamic TLS (DTLS).
+// https://sourceware.org/bugzilla/show_bug.cgi?id=16291
+//
+// The matters get worse because the glibc implementation changed between
+// 2.18 and 2.19:
+// https://groups.google.com/forum/#!topic/address-sanitizer/BfwYD8HMxTM
+//
+// Before 2.19, every DTLS chunk is allocated with __libc_memalign,
+// which we intercept and thus know where is the DTLS.
+// Since 2.19, DTLS chunks are allocated with __signal_safe_memalign,
+// which is an internal function that wraps a mmap call, neither of which
+// we can intercept. Luckily, __signal_safe_memalign has a simple parseable
+// header which we can use.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_TLS_GET_ADDR_H
+#define SANITIZER_TLS_GET_ADDR_H
+
+#include "sanitizer_common.h"
+
+namespace __sanitizer {
+
+struct DTLS {
+ // Array of DTLS chunks for the current Thread.
+ // If beg == 0, the chunk is unused.
+ struct DTV {
+ uptr beg, size;
+ };
+
+ uptr dtv_size;
+ DTV *dtv; // dtv_size elements, allocated by MmapOrDie.
+
+ // Auxiliary fields, don't access them outside sanitizer_tls_get_addr.cc
+ uptr last_memalign_size;
+ uptr last_memalign_ptr;
+};
+
+// Returns pointer and size of a linker-allocated TLS block.
+// Each block is returned exactly once.
+DTLS::DTV *DTLS_on_tls_get_addr(void *arg, void *res, uptr static_tls_begin,
+ uptr static_tls_end);
+void DTLS_on_libc_memalign(void *ptr, uptr size);
+DTLS *DTLS_Get();
+void DTLS_Destroy(); // Make sure to call this before the thread is destroyed.
+// Returns true if DTLS of suspended thread is in destruction process.
+bool DTLSInDestruction(DTLS *dtls);
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_TLS_GET_ADDR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_tls_get_addr.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_type_traits.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_type_traits.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_type_traits.cc (revision 351984)
@@ -0,0 +1,20 @@
+//===-- sanitizer_type_traits.cc --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements a subset of C++ type traits. This is so we can avoid depending
+// on system C++ headers.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_type_traits.h"
+
+namespace __sanitizer {
+
+const bool true_type::value;
+const bool false_type::value;
+
+} // namespace __sanitizer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_type_traits.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_type_traits.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_type_traits.h (revision 351984)
@@ -0,0 +1,62 @@
+//===-- sanitizer_type_traits.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implements a subset of C++ type traits. This is so we can avoid depending
+// on system C++ headers.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_TYPE_TRAITS_H
+#define SANITIZER_TYPE_TRAITS_H
+
+namespace __sanitizer {
+
+struct true_type {
+ static const bool value = true;
+};
+
+struct false_type {
+ static const bool value = false;
+};
+
+// is_same<T, U>
+//
+// Type trait to compare if types are the same.
+// E.g.
+//
+// ```
+// is_same<int,int>::value - True
+// is_same<int,char>::value - False
+// ```
+template <typename T, typename U>
+struct is_same : public false_type {};
+
+template <typename T>
+struct is_same<T, T> : public true_type {};
+
+// conditional<B, T, F>
+//
+// Defines type as T if B is true or as F otherwise.
+// E.g. the following is true
+//
+// ```
+// is_same<int, conditional<true, int, double>::type>::value
+// is_same<double, conditional<false, int, double>::type>::value
+// ```
+template <bool B, class T, class F>
+struct conditional {
+ using type = T;
+};
+
+template <class T, class F>
+struct conditional<false, T, F> {
+ using type = F;
+};
+
+} // namespace __sanitizer
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc (revision 351984)
@@ -0,0 +1,176 @@
+//===-- sanitizer_unwind_linux_libcdep.cc ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains the unwind.h-based (aka "slow") stack unwinding routines
+// available to the tools on Linux, Android, NetBSD, FreeBSD, and Solaris.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD || \
+ SANITIZER_SOLARIS
+#include "sanitizer_common.h"
+#include "sanitizer_stacktrace.h"
+
+#if SANITIZER_ANDROID
+#include <dlfcn.h> // for dlopen()
+#endif
+
+#if SANITIZER_FREEBSD
+#define _GNU_SOURCE // to declare _Unwind_Backtrace() from <unwind.h>
+#endif
+#include <unwind.h>
+
+namespace __sanitizer {
+
+//---------------------------- UnwindSlow --------------------------------------
+
+typedef struct {
+ uptr absolute_pc;
+ uptr stack_top;
+ uptr stack_size;
+} backtrace_frame_t;
+
+extern "C" {
+typedef void *(*acquire_my_map_info_list_func)();
+typedef void (*release_my_map_info_list_func)(void *map);
+typedef sptr (*unwind_backtrace_signal_arch_func)(
+ void *siginfo, void *sigcontext, void *map_info_list,
+ backtrace_frame_t *backtrace, uptr ignore_depth, uptr max_depth);
+acquire_my_map_info_list_func acquire_my_map_info_list;
+release_my_map_info_list_func release_my_map_info_list;
+unwind_backtrace_signal_arch_func unwind_backtrace_signal_arch;
+} // extern "C"
+
+#if SANITIZER_ANDROID
+void SanitizerInitializeUnwinder() {
+ if (AndroidGetApiLevel() >= ANDROID_LOLLIPOP_MR1) return;
+
+ // Pre-lollipop Android can not unwind through signal handler frames with
+ // libgcc unwinder, but it has a libcorkscrew.so library with the necessary
+ // workarounds.
+ void *p = dlopen("libcorkscrew.so", RTLD_LAZY);
+ if (!p) {
+ VReport(1,
+ "Failed to open libcorkscrew.so. You may see broken stack traces "
+ "in SEGV reports.");
+ return;
+ }
+ acquire_my_map_info_list =
+ (acquire_my_map_info_list_func)(uptr)dlsym(p, "acquire_my_map_info_list");
+ release_my_map_info_list =
+ (release_my_map_info_list_func)(uptr)dlsym(p, "release_my_map_info_list");
+ unwind_backtrace_signal_arch = (unwind_backtrace_signal_arch_func)(uptr)dlsym(
+ p, "unwind_backtrace_signal_arch");
+ if (!acquire_my_map_info_list || !release_my_map_info_list ||
+ !unwind_backtrace_signal_arch) {
+ VReport(1,
+ "Failed to find one of the required symbols in libcorkscrew.so. "
+ "You may see broken stack traces in SEGV reports.");
+ acquire_my_map_info_list = 0;
+ unwind_backtrace_signal_arch = 0;
+ release_my_map_info_list = 0;
+ }
+}
+#endif
+
+#if defined(__arm__) && !SANITIZER_NETBSD
+// NetBSD uses dwarf EH
+#define UNWIND_STOP _URC_END_OF_STACK
+#define UNWIND_CONTINUE _URC_NO_REASON
+#else
+#define UNWIND_STOP _URC_NORMAL_STOP
+#define UNWIND_CONTINUE _URC_NO_REASON
+#endif
+
+uptr Unwind_GetIP(struct _Unwind_Context *ctx) {
+#if defined(__arm__) && !SANITIZER_MAC
+ uptr val;
+ _Unwind_VRS_Result res = _Unwind_VRS_Get(ctx, _UVRSC_CORE,
+ 15 /* r15 = PC */, _UVRSD_UINT32, &val);
+ CHECK(res == _UVRSR_OK && "_Unwind_VRS_Get failed");
+ // Clear the Thumb bit.
+ return val & ~(uptr)1;
+#else
+ return (uptr)_Unwind_GetIP(ctx);
+#endif
+}
+
+struct UnwindTraceArg {
+ BufferedStackTrace *stack;
+ u32 max_depth;
+};
+
+_Unwind_Reason_Code Unwind_Trace(struct _Unwind_Context *ctx, void *param) {
+ UnwindTraceArg *arg = (UnwindTraceArg*)param;
+ CHECK_LT(arg->stack->size, arg->max_depth);
+ uptr pc = Unwind_GetIP(ctx);
+ const uptr kPageSize = GetPageSizeCached();
+ // Let's assume that any pointer in the 0th page (i.e. <0x1000 on i386 and
+ // x86_64) is invalid and stop unwinding here. If we're adding support for
+ // a platform where this isn't true, we need to reconsider this check.
+ if (pc < kPageSize) return UNWIND_STOP;
+ arg->stack->trace_buffer[arg->stack->size++] = pc;
+ if (arg->stack->size == arg->max_depth) return UNWIND_STOP;
+ return UNWIND_CONTINUE;
+}
+
+void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
+ CHECK_GE(max_depth, 2);
+ size = 0;
+ UnwindTraceArg arg = {this, Min(max_depth + 1, kStackTraceMax)};
+ _Unwind_Backtrace(Unwind_Trace, &arg);
+ // We need to pop a few frames so that pc is on top.
+ uptr to_pop = LocatePcInTrace(pc);
+ // trace_buffer[0] belongs to the current function so we always pop it,
+ // unless there is only 1 frame in the stack trace (1 frame is always better
+ // than 0!).
+ // 1-frame stacks don't normally happen, but this depends on the actual
+ // unwinder implementation (libgcc, libunwind, etc) which is outside of our
+ // control.
+ if (to_pop == 0 && size > 1)
+ to_pop = 1;
+ PopStackFrames(to_pop);
+#if defined(__GNUC__) && defined(__sparc__)
+ // __builtin_return_address returns the address of the call instruction
+ // on the SPARC and not the return address, so we need to compensate.
+ trace_buffer[0] = GetNextInstructionPc(pc);
+#else
+ trace_buffer[0] = pc;
+#endif
+}
+
+void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
+ CHECK(context);
+ CHECK_GE(max_depth, 2);
+ if (!unwind_backtrace_signal_arch) {
+ UnwindSlow(pc, max_depth);
+ return;
+ }
+
+ void *map = acquire_my_map_info_list();
+ CHECK(map);
+ InternalMmapVector<backtrace_frame_t> frames(kStackTraceMax);
+ // siginfo argument appears to be unused.
+ sptr res = unwind_backtrace_signal_arch(/* siginfo */ 0, context, map,
+ frames.data(),
+ /* ignore_depth */ 0, max_depth);
+ release_my_map_info_list(map);
+ if (res < 0) return;
+ CHECK_LE((uptr)res, kStackTraceMax);
+
+ size = 0;
+ // +2 compensate for libcorkscrew unwinder returning addresses of call
+ // instructions instead of raw return addresses.
+ for (sptr i = 0; i < res; ++i)
+ trace_buffer[size++] = frames[i].absolute_pc + 2;
+}
+
+} // namespace __sanitizer
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD ||
+ // SANITIZER_SOLARIS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_unwind_linux_libcdep.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_unwind_win.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_unwind_win.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_unwind_win.cc (revision 351984)
@@ -0,0 +1,75 @@
+//===-- sanitizer_unwind_win.cc -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Sanitizer unwind Windows specific functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#define NOGDI
+#include <windows.h>
+
+#include "sanitizer_dbghelp.h" // for StackWalk64
+#include "sanitizer_stacktrace.h"
+#include "sanitizer_symbolizer.h" // for InitializeDbgHelpIfNeeded
+
+using namespace __sanitizer;
+
+#if !SANITIZER_GO
+void BufferedStackTrace::UnwindSlow(uptr pc, u32 max_depth) {
+ CHECK_GE(max_depth, 2);
+ // FIXME: CaptureStackBackTrace might be too slow for us.
+ // FIXME: Compare with StackWalk64.
+ // FIXME: Look at LLVMUnhandledExceptionFilter in Signals.inc
+ size = CaptureStackBackTrace(1, Min(max_depth, kStackTraceMax),
+ (void **)&trace_buffer[0], 0);
+ if (size == 0)
+ return;
+
+ // Skip the RTL frames by searching for the PC in the stacktrace.
+ uptr pc_location = LocatePcInTrace(pc);
+ PopStackFrames(pc_location);
+}
+
+void BufferedStackTrace::UnwindSlow(uptr pc, void *context, u32 max_depth) {
+ CHECK(context);
+ CHECK_GE(max_depth, 2);
+ CONTEXT ctx = *(CONTEXT *)context;
+ STACKFRAME64 stack_frame;
+ memset(&stack_frame, 0, sizeof(stack_frame));
+
+ InitializeDbgHelpIfNeeded();
+
+ size = 0;
+#if defined(_WIN64)
+ int machine_type = IMAGE_FILE_MACHINE_AMD64;
+ stack_frame.AddrPC.Offset = ctx.Rip;
+ stack_frame.AddrFrame.Offset = ctx.Rbp;
+ stack_frame.AddrStack.Offset = ctx.Rsp;
+#else
+ int machine_type = IMAGE_FILE_MACHINE_I386;
+ stack_frame.AddrPC.Offset = ctx.Eip;
+ stack_frame.AddrFrame.Offset = ctx.Ebp;
+ stack_frame.AddrStack.Offset = ctx.Esp;
+#endif
+ stack_frame.AddrPC.Mode = AddrModeFlat;
+ stack_frame.AddrFrame.Mode = AddrModeFlat;
+ stack_frame.AddrStack.Mode = AddrModeFlat;
+ while (StackWalk64(machine_type, GetCurrentProcess(), GetCurrentThread(),
+ &stack_frame, &ctx, NULL, SymFunctionTableAccess64,
+ SymGetModuleBase64, NULL) &&
+ size < Min(max_depth, kStackTraceMax)) {
+ trace_buffer[size++] = (uptr)stack_frame.AddrPC.Offset;
+ }
+}
+#endif // #if !SANITIZER_GO
+
+#endif // SANITIZER_WINDOWS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_unwind_win.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_vector.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_vector.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_vector.h (revision 351984)
@@ -0,0 +1,128 @@
+//===-- sanitizer_vector.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizers run-time libraries.
+//
+//===----------------------------------------------------------------------===//
+
+// Low-fat STL-like vector container.
+
+#ifndef SANITIZER_VECTOR_H
+#define SANITIZER_VECTOR_H
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+namespace __sanitizer {
+
+template<typename T>
+class Vector {
+ public:
+ explicit Vector()
+ : begin_()
+ , end_()
+ , last_() {
+ }
+
+ ~Vector() {
+ if (begin_)
+ InternalFree(begin_);
+ }
+
+ void Reset() {
+ if (begin_)
+ InternalFree(begin_);
+ begin_ = 0;
+ end_ = 0;
+ last_ = 0;
+ }
+
+ uptr Size() const {
+ return end_ - begin_;
+ }
+
+ T &operator[](uptr i) {
+ DCHECK_LT(i, end_ - begin_);
+ return begin_[i];
+ }
+
+ const T &operator[](uptr i) const {
+ DCHECK_LT(i, end_ - begin_);
+ return begin_[i];
+ }
+
+ T *PushBack() {
+ EnsureSize(Size() + 1);
+ T *p = &end_[-1];
+ internal_memset(p, 0, sizeof(*p));
+ return p;
+ }
+
+ T *PushBack(const T& v) {
+ EnsureSize(Size() + 1);
+ T *p = &end_[-1];
+ internal_memcpy(p, &v, sizeof(*p));
+ return p;
+ }
+
+ void PopBack() {
+ DCHECK_GT(end_, begin_);
+ end_--;
+ }
+
+ void Resize(uptr size) {
+ if (size == 0) {
+ end_ = begin_;
+ return;
+ }
+ uptr old_size = Size();
+ if (size <= old_size) {
+ end_ = begin_ + size;
+ return;
+ }
+ EnsureSize(size);
+ if (old_size < size) {
+ for (uptr i = old_size; i < size; i++)
+ internal_memset(&begin_[i], 0, sizeof(begin_[i]));
+ }
+ }
+
+ private:
+ T *begin_;
+ T *end_;
+ T *last_;
+
+ void EnsureSize(uptr size) {
+ if (size <= Size())
+ return;
+ if (size <= (uptr)(last_ - begin_)) {
+ end_ = begin_ + size;
+ return;
+ }
+ uptr cap0 = last_ - begin_;
+ uptr cap = cap0 * 5 / 4; // 25% growth
+ if (cap == 0)
+ cap = 16;
+ if (cap < size)
+ cap = size;
+ T *p = (T*)InternalAlloc(cap * sizeof(T));
+ if (cap0) {
+ internal_memcpy(p, begin_, cap0 * sizeof(T));
+ InternalFree(begin_);
+ }
+ begin_ = p;
+ end_ = begin_ + size;
+ last_ = begin_ + cap;
+ }
+
+ Vector(const Vector&);
+ void operator=(const Vector&);
+};
+} // namespace __sanitizer
+
+#endif // #ifndef SANITIZER_VECTOR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_vector.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win.cc (revision 351984)
@@ -0,0 +1,1115 @@
+//===-- sanitizer_win.cc --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between AddressSanitizer and ThreadSanitizer
+// run-time libraries and implements windows-specific functions from
+// sanitizer_libc.h.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#define WIN32_LEAN_AND_MEAN
+#define NOGDI
+#include <windows.h>
+#include <io.h>
+#include <psapi.h>
+#include <stdlib.h>
+
+#include "sanitizer_common.h"
+#include "sanitizer_file.h"
+#include "sanitizer_libc.h"
+#include "sanitizer_mutex.h"
+#include "sanitizer_placement_new.h"
+#include "sanitizer_win_defs.h"
+
+#if defined(PSAPI_VERSION) && PSAPI_VERSION == 1
+#pragma comment(lib, "psapi")
+#endif
+#if SANITIZER_WIN_TRACE
+#include <traceloggingprovider.h>
+// Windows trace logging provider init
+#pragma comment(lib, "advapi32.lib")
+TRACELOGGING_DECLARE_PROVIDER(g_asan_provider);
+// GUID must be the same in utils/AddressSanitizerLoggingProvider.wprp
+TRACELOGGING_DEFINE_PROVIDER(g_asan_provider, "AddressSanitizerLoggingProvider",
+ (0x6c6c766d, 0x3846, 0x4e6a, 0xa4, 0xfb, 0x5b,
+ 0x53, 0x0b, 0xd0, 0xf3, 0xfa));
+#else
+#define TraceLoggingUnregister(x)
+#endif
+
+// A macro to tell the compiler that this part of the code cannot be reached,
+// if the compiler supports this feature. Since we're using this in
+// code that is called when terminating the process, the expansion of the
+// macro should not terminate the process to avoid infinite recursion.
+#if defined(__clang__)
+# define BUILTIN_UNREACHABLE() __builtin_unreachable()
+#elif defined(__GNUC__) && \
+ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 5))
+# define BUILTIN_UNREACHABLE() __builtin_unreachable()
+#elif defined(_MSC_VER)
+# define BUILTIN_UNREACHABLE() __assume(0)
+#else
+# define BUILTIN_UNREACHABLE()
+#endif
+
+namespace __sanitizer {
+
+#include "sanitizer_syscall_generic.inc"
+
+// --------------------- sanitizer_common.h
+uptr GetPageSize() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwPageSize;
+}
+
+uptr GetMmapGranularity() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwAllocationGranularity;
+}
+
+uptr GetMaxUserVirtualAddress() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return (uptr)si.lpMaximumApplicationAddress;
+}
+
+uptr GetMaxVirtualAddress() {
+ return GetMaxUserVirtualAddress();
+}
+
+bool FileExists(const char *filename) {
+ return ::GetFileAttributesA(filename) != INVALID_FILE_ATTRIBUTES;
+}
+
+uptr internal_getpid() {
+ return GetProcessId(GetCurrentProcess());
+}
+
+// In contrast to POSIX, on Windows GetCurrentThreadId()
+// returns a system-unique identifier.
+tid_t GetTid() {
+ return GetCurrentThreadId();
+}
+
+uptr GetThreadSelf() {
+ return GetTid();
+}
+
+#if !SANITIZER_GO
+void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
+ uptr *stack_bottom) {
+ CHECK(stack_top);
+ CHECK(stack_bottom);
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK_NE(VirtualQuery(&mbi /* on stack */, &mbi, sizeof(mbi)), 0);
+ // FIXME: is it possible for the stack to not be a single allocation?
+ // Are these values what ASan expects to get (reserved, not committed;
+ // including stack guard page) ?
+ *stack_top = (uptr)mbi.BaseAddress + mbi.RegionSize;
+ *stack_bottom = (uptr)mbi.AllocationBase;
+}
+#endif // #if !SANITIZER_GO
+
+void *MmapOrDie(uptr size, const char *mem_type, bool raw_report) {
+ void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (rv == 0)
+ ReportMmapFailureAndDie(size, mem_type, "allocate",
+ GetLastError(), raw_report);
+ return rv;
+}
+
+void UnmapOrDie(void *addr, uptr size) {
+ if (!size || !addr)
+ return;
+
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK(VirtualQuery(addr, &mbi, sizeof(mbi)));
+
+ // MEM_RELEASE can only be used to unmap whole regions previously mapped with
+ // VirtualAlloc. So we first try MEM_RELEASE since it is better, and if that
+ // fails try MEM_DECOMMIT.
+ if (VirtualFree(addr, 0, MEM_RELEASE) == 0) {
+ if (VirtualFree(addr, size, MEM_DECOMMIT) == 0) {
+ Report("ERROR: %s failed to "
+ "deallocate 0x%zx (%zd) bytes at address %p (error code: %d)\n",
+ SanitizerToolName, size, size, addr, GetLastError());
+ CHECK("unable to unmap" && 0);
+ }
+ }
+}
+
+static void *ReturnNullptrOnOOMOrDie(uptr size, const char *mem_type,
+ const char *mmap_type) {
+ error_t last_error = GetLastError();
+ if (last_error == ERROR_NOT_ENOUGH_MEMORY)
+ return nullptr;
+ ReportMmapFailureAndDie(size, mem_type, mmap_type, last_error);
+}
+
+void *MmapOrDieOnFatalError(uptr size, const char *mem_type) {
+ void *rv = VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (rv == 0)
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
+ return rv;
+}
+
+// We want to map a chunk of address space aligned to 'alignment'.
+void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
+ const char *mem_type) {
+ CHECK(IsPowerOfTwo(size));
+ CHECK(IsPowerOfTwo(alignment));
+
+ // Windows will align our allocations to at least 64K.
+ alignment = Max(alignment, GetMmapGranularity());
+
+ uptr mapped_addr =
+ (uptr)VirtualAlloc(0, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ if (!mapped_addr)
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
+
+ // If we got it right on the first try, return. Otherwise, unmap it and go to
+ // the slow path.
+ if (IsAligned(mapped_addr, alignment))
+ return (void*)mapped_addr;
+ if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
+ ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
+
+ // If we didn't get an aligned address, overallocate, find an aligned address,
+ // unmap, and try to allocate at that aligned address.
+ int retries = 0;
+ const int kMaxRetries = 10;
+ for (; retries < kMaxRetries &&
+ (mapped_addr == 0 || !IsAligned(mapped_addr, alignment));
+ retries++) {
+ // Overallocate size + alignment bytes.
+ mapped_addr =
+ (uptr)VirtualAlloc(0, size + alignment, MEM_RESERVE, PAGE_NOACCESS);
+ if (!mapped_addr)
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
+
+ // Find the aligned address.
+ uptr aligned_addr = RoundUpTo(mapped_addr, alignment);
+
+ // Free the overallocation.
+ if (VirtualFree((void *)mapped_addr, 0, MEM_RELEASE) == 0)
+ ReportMmapFailureAndDie(size, mem_type, "deallocate", GetLastError());
+
+ // Attempt to allocate exactly the number of bytes we need at the aligned
+ // address. This may fail for a number of reasons, in which case we continue
+ // the loop.
+ mapped_addr = (uptr)VirtualAlloc((void *)aligned_addr, size,
+ MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
+ }
+
+ // Fail if we can't make this work quickly.
+ if (retries == kMaxRetries && mapped_addr == 0)
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate aligned");
+
+ return (void *)mapped_addr;
+}
+
+bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) {
+ // FIXME: is this really "NoReserve"? On Win32 this does not matter much,
+ // but on Win64 it does.
+ (void)name; // unsupported
+#if !SANITIZER_GO && SANITIZER_WINDOWS64
+ // On asan/Windows64, use MEM_COMMIT would result in error
+ // 1455:ERROR_COMMITMENT_LIMIT.
+ // Asan uses exception handler to commit page on demand.
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE, PAGE_READWRITE);
+#else
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size, MEM_RESERVE | MEM_COMMIT,
+ PAGE_READWRITE);
+#endif
+ if (p == 0) {
+ Report("ERROR: %s failed to "
+ "allocate %p (%zd) bytes at %p (error code: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, GetLastError());
+ return false;
+ }
+ return true;
+}
+
+// Memory space mapped by 'MmapFixedOrDie' must have been reserved by
+// 'MmapFixedNoAccess'.
+void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name) {
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_COMMIT, PAGE_READWRITE);
+ if (p == 0) {
+ char mem_type[30];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ ReportMmapFailureAndDie(size, mem_type, "allocate", GetLastError());
+ }
+ return p;
+}
+
+// Uses fixed_addr for now.
+// Will use offset instead once we've implemented this function for real.
+uptr ReservedAddressRange::Map(uptr fixed_addr, uptr size, const char *name) {
+ return reinterpret_cast<uptr>(MmapFixedOrDieOnFatalError(fixed_addr, size));
+}
+
+uptr ReservedAddressRange::MapOrDie(uptr fixed_addr, uptr size,
+ const char *name) {
+ return reinterpret_cast<uptr>(MmapFixedOrDie(fixed_addr, size));
+}
+
+void ReservedAddressRange::Unmap(uptr addr, uptr size) {
+ // Only unmap if it covers the entire range.
+ CHECK((addr == reinterpret_cast<uptr>(base_)) && (size == size_));
+ // We unmap the whole range, just null out the base.
+ base_ = nullptr;
+ size_ = 0;
+ UnmapOrDie(reinterpret_cast<void*>(addr), size);
+}
+
+void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, const char *name) {
+ void *p = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_COMMIT, PAGE_READWRITE);
+ if (p == 0) {
+ char mem_type[30];
+ internal_snprintf(mem_type, sizeof(mem_type), "memory at address 0x%zx",
+ fixed_addr);
+ return ReturnNullptrOnOOMOrDie(size, mem_type, "allocate");
+ }
+ return p;
+}
+
+void *MmapNoReserveOrDie(uptr size, const char *mem_type) {
+ // FIXME: make this really NoReserve?
+ return MmapOrDie(size, mem_type);
+}
+
+uptr ReservedAddressRange::Init(uptr size, const char *name, uptr fixed_addr) {
+ base_ = fixed_addr ? MmapFixedNoAccess(fixed_addr, size) : MmapNoAccess(size);
+ size_ = size;
+ name_ = name;
+ (void)os_handle_; // unsupported
+ return reinterpret_cast<uptr>(base_);
+}
+
+
+void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) {
+ (void)name; // unsupported
+ void *res = VirtualAlloc((LPVOID)fixed_addr, size,
+ MEM_RESERVE, PAGE_NOACCESS);
+ if (res == 0)
+ Report("WARNING: %s failed to "
+ "mprotect %p (%zd) bytes at %p (error code: %d)\n",
+ SanitizerToolName, size, size, fixed_addr, GetLastError());
+ return res;
+}
+
+void *MmapNoAccess(uptr size) {
+ void *res = VirtualAlloc(nullptr, size, MEM_RESERVE, PAGE_NOACCESS);
+ if (res == 0)
+ Report("WARNING: %s failed to "
+ "mprotect %p (%zd) bytes (error code: %d)\n",
+ SanitizerToolName, size, size, GetLastError());
+ return res;
+}
+
+bool MprotectNoAccess(uptr addr, uptr size) {
+ DWORD old_protection;
+ return VirtualProtect((LPVOID)addr, size, PAGE_NOACCESS, &old_protection);
+}
+
+void ReleaseMemoryPagesToOS(uptr beg, uptr end) {
+ // This is almost useless on 32-bits.
+ // FIXME: add madvise-analog when we move to 64-bits.
+}
+
+bool NoHugePagesInRegion(uptr addr, uptr size) {
+ // FIXME: probably similar to ReleaseMemoryToOS.
+ return true;
+}
+
+bool DontDumpShadowMemory(uptr addr, uptr length) {
+ // This is almost useless on 32-bits.
+ // FIXME: add madvise-analog when we move to 64-bits.
+ return true;
+}
+
+uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
+ uptr *largest_gap_found,
+ uptr *max_occupied_addr) {
+ uptr address = 0;
+ while (true) {
+ MEMORY_BASIC_INFORMATION info;
+ if (!::VirtualQuery((void*)address, &info, sizeof(info)))
+ return 0;
+
+ if (info.State == MEM_FREE) {
+ uptr shadow_address = RoundUpTo((uptr)info.BaseAddress + left_padding,
+ alignment);
+ if (shadow_address + size < (uptr)info.BaseAddress + info.RegionSize)
+ return shadow_address;
+ }
+
+ // Move to the next region.
+ address = (uptr)info.BaseAddress + info.RegionSize;
+ }
+ return 0;
+}
+
+bool MemoryRangeIsAvailable(uptr range_start, uptr range_end) {
+ MEMORY_BASIC_INFORMATION mbi;
+ CHECK(VirtualQuery((void *)range_start, &mbi, sizeof(mbi)));
+ return mbi.Protect == PAGE_NOACCESS &&
+ (uptr)mbi.BaseAddress + mbi.RegionSize >= range_end;
+}
+
+void *MapFileToMemory(const char *file_name, uptr *buff_size) {
+ UNIMPLEMENTED();
+}
+
+void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset) {
+ UNIMPLEMENTED();
+}
+
+static const int kMaxEnvNameLength = 128;
+static const DWORD kMaxEnvValueLength = 32767;
+
+namespace {
+
+struct EnvVariable {
+ char name[kMaxEnvNameLength];
+ char value[kMaxEnvValueLength];
+};
+
+} // namespace
+
+static const int kEnvVariables = 5;
+static EnvVariable env_vars[kEnvVariables];
+static int num_env_vars;
+
+const char *GetEnv(const char *name) {
+ // Note: this implementation caches the values of the environment variables
+ // and limits their quantity.
+ for (int i = 0; i < num_env_vars; i++) {
+ if (0 == internal_strcmp(name, env_vars[i].name))
+ return env_vars[i].value;
+ }
+ CHECK_LT(num_env_vars, kEnvVariables);
+ DWORD rv = GetEnvironmentVariableA(name, env_vars[num_env_vars].value,
+ kMaxEnvValueLength);
+ if (rv > 0 && rv < kMaxEnvValueLength) {
+ CHECK_LT(internal_strlen(name), kMaxEnvNameLength);
+ internal_strncpy(env_vars[num_env_vars].name, name, kMaxEnvNameLength);
+ num_env_vars++;
+ return env_vars[num_env_vars - 1].value;
+ }
+ return 0;
+}
+
+const char *GetPwd() {
+ UNIMPLEMENTED();
+}
+
+u32 GetUid() {
+ UNIMPLEMENTED();
+}
+
+namespace {
+struct ModuleInfo {
+ const char *filepath;
+ uptr base_address;
+ uptr end_address;
+};
+
+#if !SANITIZER_GO
+int CompareModulesBase(const void *pl, const void *pr) {
+ const ModuleInfo *l = (const ModuleInfo *)pl, *r = (const ModuleInfo *)pr;
+ if (l->base_address < r->base_address)
+ return -1;
+ return l->base_address > r->base_address;
+}
+#endif
+} // namespace
+
+#if !SANITIZER_GO
+void DumpProcessMap() {
+ Report("Dumping process modules:\n");
+ ListOfModules modules;
+ modules.init();
+ uptr num_modules = modules.size();
+
+ InternalMmapVector<ModuleInfo> module_infos(num_modules);
+ for (size_t i = 0; i < num_modules; ++i) {
+ module_infos[i].filepath = modules[i].full_name();
+ module_infos[i].base_address = modules[i].ranges().front()->beg;
+ module_infos[i].end_address = modules[i].ranges().back()->end;
+ }
+ qsort(module_infos.data(), num_modules, sizeof(ModuleInfo),
+ CompareModulesBase);
+
+ for (size_t i = 0; i < num_modules; ++i) {
+ const ModuleInfo &mi = module_infos[i];
+ if (mi.end_address != 0) {
+ Printf("\t%p-%p %s\n", mi.base_address, mi.end_address,
+ mi.filepath[0] ? mi.filepath : "[no name]");
+ } else if (mi.filepath[0]) {
+ Printf("\t??\?-??? %s\n", mi.filepath);
+ } else {
+ Printf("\t???\n");
+ }
+ }
+}
+#endif
+
+void PrintModuleMap() { }
+
+void DisableCoreDumperIfNecessary() {
+ // Do nothing.
+}
+
+void ReExec() {
+ UNIMPLEMENTED();
+}
+
+void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+
+bool StackSizeIsUnlimited() {
+ UNIMPLEMENTED();
+}
+
+void SetStackSizeLimitInBytes(uptr limit) {
+ UNIMPLEMENTED();
+}
+
+bool AddressSpaceIsUnlimited() {
+ UNIMPLEMENTED();
+}
+
+void SetAddressSpaceUnlimited() {
+ UNIMPLEMENTED();
+}
+
+bool IsPathSeparator(const char c) {
+ return c == '\\' || c == '/';
+}
+
+static bool IsAlpha(char c) {
+ c = ToLower(c);
+ return c >= 'a' && c <= 'z';
+}
+
+bool IsAbsolutePath(const char *path) {
+ return path != nullptr && IsAlpha(path[0]) && path[1] == ':' &&
+ IsPathSeparator(path[2]);
+}
+
+void SleepForSeconds(int seconds) {
+ Sleep(seconds * 1000);
+}
+
+void SleepForMillis(int millis) {
+ Sleep(millis);
+}
+
+u64 NanoTime() {
+ static LARGE_INTEGER frequency = {};
+ LARGE_INTEGER counter;
+ if (UNLIKELY(frequency.QuadPart == 0)) {
+ QueryPerformanceFrequency(&frequency);
+ CHECK_NE(frequency.QuadPart, 0);
+ }
+ QueryPerformanceCounter(&counter);
+ counter.QuadPart *= 1000ULL * 1000000ULL;
+ counter.QuadPart /= frequency.QuadPart;
+ return counter.QuadPart;
+}
+
+u64 MonotonicNanoTime() { return NanoTime(); }
+
+void Abort() {
+ internal__exit(3);
+}
+
+#if !SANITIZER_GO
+// Read the file to extract the ImageBase field from the PE header. If ASLR is
+// disabled and this virtual address is available, the loader will typically
+// load the image at this address. Therefore, we call it the preferred base. Any
+// addresses in the DWARF typically assume that the object has been loaded at
+// this address.
+static uptr GetPreferredBase(const char *modname) {
+ fd_t fd = OpenFile(modname, RdOnly, nullptr);
+ if (fd == kInvalidFd)
+ return 0;
+ FileCloser closer(fd);
+
+ // Read just the DOS header.
+ IMAGE_DOS_HEADER dos_header;
+ uptr bytes_read;
+ if (!ReadFromFile(fd, &dos_header, sizeof(dos_header), &bytes_read) ||
+ bytes_read != sizeof(dos_header))
+ return 0;
+
+ // The file should start with the right signature.
+ if (dos_header.e_magic != IMAGE_DOS_SIGNATURE)
+ return 0;
+
+ // The layout at e_lfanew is:
+ // "PE\0\0"
+ // IMAGE_FILE_HEADER
+ // IMAGE_OPTIONAL_HEADER
+ // Seek to e_lfanew and read all that data.
+ char buf[4 + sizeof(IMAGE_FILE_HEADER) + sizeof(IMAGE_OPTIONAL_HEADER)];
+ if (::SetFilePointer(fd, dos_header.e_lfanew, nullptr, FILE_BEGIN) ==
+ INVALID_SET_FILE_POINTER)
+ return 0;
+ if (!ReadFromFile(fd, &buf[0], sizeof(buf), &bytes_read) ||
+ bytes_read != sizeof(buf))
+ return 0;
+
+ // Check for "PE\0\0" before the PE header.
+ char *pe_sig = &buf[0];
+ if (internal_memcmp(pe_sig, "PE\0\0", 4) != 0)
+ return 0;
+
+ // Skip over IMAGE_FILE_HEADER. We could do more validation here if we wanted.
+ IMAGE_OPTIONAL_HEADER *pe_header =
+ (IMAGE_OPTIONAL_HEADER *)(pe_sig + 4 + sizeof(IMAGE_FILE_HEADER));
+
+ // Check for more magic in the PE header.
+ if (pe_header->Magic != IMAGE_NT_OPTIONAL_HDR_MAGIC)
+ return 0;
+
+ // Finally, return the ImageBase.
+ return (uptr)pe_header->ImageBase;
+}
+
+void ListOfModules::init() {
+ clearOrInit();
+ HANDLE cur_process = GetCurrentProcess();
+
+ // Query the list of modules. Start by assuming there are no more than 256
+ // modules and retry if that's not sufficient.
+ HMODULE *hmodules = 0;
+ uptr modules_buffer_size = sizeof(HMODULE) * 256;
+ DWORD bytes_required;
+ while (!hmodules) {
+ hmodules = (HMODULE *)MmapOrDie(modules_buffer_size, __FUNCTION__);
+ CHECK(EnumProcessModules(cur_process, hmodules, modules_buffer_size,
+ &bytes_required));
+ if (bytes_required > modules_buffer_size) {
+ // Either there turned out to be more than 256 hmodules, or new hmodules
+ // could have loaded since the last try. Retry.
+ UnmapOrDie(hmodules, modules_buffer_size);
+ hmodules = 0;
+ modules_buffer_size = bytes_required;
+ }
+ }
+
+ // |num_modules| is the number of modules actually present,
+ size_t num_modules = bytes_required / sizeof(HMODULE);
+ for (size_t i = 0; i < num_modules; ++i) {
+ HMODULE handle = hmodules[i];
+ MODULEINFO mi;
+ if (!GetModuleInformation(cur_process, handle, &mi, sizeof(mi)))
+ continue;
+
+ // Get the UTF-16 path and convert to UTF-8.
+ wchar_t modname_utf16[kMaxPathLength];
+ int modname_utf16_len =
+ GetModuleFileNameW(handle, modname_utf16, kMaxPathLength);
+ if (modname_utf16_len == 0)
+ modname_utf16[0] = '\0';
+ char module_name[kMaxPathLength];
+ int module_name_len =
+ ::WideCharToMultiByte(CP_UTF8, 0, modname_utf16, modname_utf16_len + 1,
+ &module_name[0], kMaxPathLength, NULL, NULL);
+ module_name[module_name_len] = '\0';
+
+ uptr base_address = (uptr)mi.lpBaseOfDll;
+ uptr end_address = (uptr)mi.lpBaseOfDll + mi.SizeOfImage;
+
+ // Adjust the base address of the module so that we get a VA instead of an
+ // RVA when computing the module offset. This helps llvm-symbolizer find the
+ // right DWARF CU. In the common case that the image is loaded at it's
+ // preferred address, we will now print normal virtual addresses.
+ uptr preferred_base = GetPreferredBase(&module_name[0]);
+ uptr adjusted_base = base_address - preferred_base;
+
+ LoadedModule cur_module;
+ cur_module.set(module_name, adjusted_base);
+ // We add the whole module as one single address range.
+ cur_module.addAddressRange(base_address, end_address, /*executable*/ true,
+ /*writable*/ true);
+ modules_.push_back(cur_module);
+ }
+ UnmapOrDie(hmodules, modules_buffer_size);
+}
+
+void ListOfModules::fallbackInit() { clear(); }
+
+// We can't use atexit() directly at __asan_init time as the CRT is not fully
+// initialized at this point. Place the functions into a vector and use
+// atexit() as soon as it is ready for use (i.e. after .CRT$XIC initializers).
+InternalMmapVectorNoCtor<void (*)(void)> atexit_functions;
+
+int Atexit(void (*function)(void)) {
+ atexit_functions.push_back(function);
+ return 0;
+}
+
+static int RunAtexit() {
+ TraceLoggingUnregister(g_asan_provider);
+ int ret = 0;
+ for (uptr i = 0; i < atexit_functions.size(); ++i) {
+ ret |= atexit(atexit_functions[i]);
+ }
+ return ret;
+}
+
+#pragma section(".CRT$XID", long, read) // NOLINT
+__declspec(allocate(".CRT$XID")) int (*__run_atexit)() = RunAtexit;
+#endif
+
+// ------------------ sanitizer_libc.h
+fd_t OpenFile(const char *filename, FileAccessMode mode, error_t *last_error) {
+ // FIXME: Use the wide variants to handle Unicode filenames.
+ fd_t res;
+ if (mode == RdOnly) {
+ res = CreateFileA(filename, GENERIC_READ,
+ FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE,
+ nullptr, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, nullptr);
+ } else if (mode == WrOnly) {
+ res = CreateFileA(filename, GENERIC_WRITE, 0, nullptr, CREATE_ALWAYS,
+ FILE_ATTRIBUTE_NORMAL, nullptr);
+ } else {
+ UNIMPLEMENTED();
+ }
+ CHECK(res != kStdoutFd || kStdoutFd == kInvalidFd);
+ CHECK(res != kStderrFd || kStderrFd == kInvalidFd);
+ if (res == kInvalidFd && last_error)
+ *last_error = GetLastError();
+ return res;
+}
+
+void CloseFile(fd_t fd) {
+ CloseHandle(fd);
+}
+
+bool ReadFromFile(fd_t fd, void *buff, uptr buff_size, uptr *bytes_read,
+ error_t *error_p) {
+ CHECK(fd != kInvalidFd);
+
+ // bytes_read can't be passed directly to ReadFile:
+ // uptr is unsigned long long on 64-bit Windows.
+ unsigned long num_read_long;
+
+ bool success = ::ReadFile(fd, buff, buff_size, &num_read_long, nullptr);
+ if (!success && error_p)
+ *error_p = GetLastError();
+ if (bytes_read)
+ *bytes_read = num_read_long;
+ return success;
+}
+
+bool SupportsColoredOutput(fd_t fd) {
+ // FIXME: support colored output.
+ return false;
+}
+
+bool WriteToFile(fd_t fd, const void *buff, uptr buff_size, uptr *bytes_written,
+ error_t *error_p) {
+ CHECK(fd != kInvalidFd);
+
+ // Handle null optional parameters.
+ error_t dummy_error;
+ error_p = error_p ? error_p : &dummy_error;
+ uptr dummy_bytes_written;
+ bytes_written = bytes_written ? bytes_written : &dummy_bytes_written;
+
+ // Initialize output parameters in case we fail.
+ *error_p = 0;
+ *bytes_written = 0;
+
+ // Map the conventional Unix fds 1 and 2 to Windows handles. They might be
+ // closed, in which case this will fail.
+ if (fd == kStdoutFd || fd == kStderrFd) {
+ fd = GetStdHandle(fd == kStdoutFd ? STD_OUTPUT_HANDLE : STD_ERROR_HANDLE);
+ if (fd == 0) {
+ *error_p = ERROR_INVALID_HANDLE;
+ return false;
+ }
+ }
+
+ DWORD bytes_written_32;
+ if (!WriteFile(fd, buff, buff_size, &bytes_written_32, 0)) {
+ *error_p = GetLastError();
+ return false;
+ } else {
+ *bytes_written = bytes_written_32;
+ return true;
+ }
+}
+
+uptr internal_sched_yield() {
+ Sleep(0);
+ return 0;
+}
+
+void internal__exit(int exitcode) {
+ TraceLoggingUnregister(g_asan_provider);
+ // ExitProcess runs some finalizers, so use TerminateProcess to avoid that.
+ // The debugger doesn't stop on TerminateProcess like it does on ExitProcess,
+ // so add our own breakpoint here.
+ if (::IsDebuggerPresent())
+ __debugbreak();
+ TerminateProcess(GetCurrentProcess(), exitcode);
+ BUILTIN_UNREACHABLE();
+}
+
+uptr internal_ftruncate(fd_t fd, uptr size) {
+ UNIMPLEMENTED();
+}
+
+uptr GetRSS() {
+ PROCESS_MEMORY_COUNTERS counters;
+ if (!GetProcessMemoryInfo(GetCurrentProcess(), &counters, sizeof(counters)))
+ return 0;
+ return counters.WorkingSetSize;
+}
+
+void *internal_start_thread(void (*func)(void *arg), void *arg) { return 0; }
+void internal_join_thread(void *th) { }
+
+// ---------------------- BlockingMutex ---------------- {{{1
+
+BlockingMutex::BlockingMutex() {
+ CHECK(sizeof(SRWLOCK) <= sizeof(opaque_storage_));
+ internal_memset(this, 0, sizeof(*this));
+}
+
+void BlockingMutex::Lock() {
+ AcquireSRWLockExclusive((PSRWLOCK)opaque_storage_);
+ CHECK_EQ(owner_, 0);
+ owner_ = GetThreadSelf();
+}
+
+void BlockingMutex::Unlock() {
+ CheckLocked();
+ owner_ = 0;
+ ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
+}
+
+void BlockingMutex::CheckLocked() {
+ CHECK_EQ(owner_, GetThreadSelf());
+}
+
+uptr GetTlsSize() {
+ return 0;
+}
+
+void InitTlsSize() {
+}
+
+void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
+ uptr *tls_addr, uptr *tls_size) {
+#if SANITIZER_GO
+ *stk_addr = 0;
+ *stk_size = 0;
+ *tls_addr = 0;
+ *tls_size = 0;
+#else
+ uptr stack_top, stack_bottom;
+ GetThreadStackTopAndBottom(main, &stack_top, &stack_bottom);
+ *stk_addr = stack_bottom;
+ *stk_size = stack_top - stack_bottom;
+ *tls_addr = 0;
+ *tls_size = 0;
+#endif
+}
+
+void ReportFile::Write(const char *buffer, uptr length) {
+ SpinMutexLock l(mu);
+ ReopenIfNecessary();
+ if (!WriteToFile(fd, buffer, length)) {
+ // stderr may be closed, but we may be able to print to the debugger
+ // instead. This is the case when launching a program from Visual Studio,
+ // and the following routine should write to its console.
+ OutputDebugStringA(buffer);
+ }
+}
+
+void SetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void UnsetAlternateSignalStack() {
+ // FIXME: Decide what to do on Windows.
+}
+
+void InstallDeadlySignalHandlers(SignalHandlerType handler) {
+ (void)handler;
+ // FIXME: Decide what to do on Windows.
+}
+
+HandleSignalMode GetHandleSignalMode(int signum) {
+ // FIXME: Decide what to do on Windows.
+ return kHandleSignalNo;
+}
+
+// Check based on flags if we should handle this exception.
+bool IsHandledDeadlyException(DWORD exceptionCode) {
+ switch (exceptionCode) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ case EXCEPTION_STACK_OVERFLOW:
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ case EXCEPTION_IN_PAGE_ERROR:
+ return common_flags()->handle_segv;
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ case EXCEPTION_PRIV_INSTRUCTION:
+ case EXCEPTION_BREAKPOINT:
+ return common_flags()->handle_sigill;
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ case EXCEPTION_FLT_INVALID_OPERATION:
+ case EXCEPTION_FLT_OVERFLOW:
+ case EXCEPTION_FLT_STACK_CHECK:
+ case EXCEPTION_FLT_UNDERFLOW:
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ case EXCEPTION_INT_OVERFLOW:
+ return common_flags()->handle_sigfpe;
+ }
+ return false;
+}
+
+bool IsAccessibleMemoryRange(uptr beg, uptr size) {
+ SYSTEM_INFO si;
+ GetNativeSystemInfo(&si);
+ uptr page_size = si.dwPageSize;
+ uptr page_mask = ~(page_size - 1);
+
+ for (uptr page = beg & page_mask, end = (beg + size - 1) & page_mask;
+ page <= end;) {
+ MEMORY_BASIC_INFORMATION info;
+ if (VirtualQuery((LPCVOID)page, &info, sizeof(info)) != sizeof(info))
+ return false;
+
+ if (info.Protect == 0 || info.Protect == PAGE_NOACCESS ||
+ info.Protect == PAGE_EXECUTE)
+ return false;
+
+ if (info.RegionSize == 0)
+ return false;
+
+ page += info.RegionSize;
+ }
+
+ return true;
+}
+
+bool SignalContext::IsStackOverflow() const {
+ return (DWORD)GetType() == EXCEPTION_STACK_OVERFLOW;
+}
+
+void SignalContext::InitPcSpBp() {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+ CONTEXT *context_record = (CONTEXT *)context;
+
+ pc = (uptr)exception_record->ExceptionAddress;
+#ifdef _WIN64
+ bp = (uptr)context_record->Rbp;
+ sp = (uptr)context_record->Rsp;
+#else
+ bp = (uptr)context_record->Ebp;
+ sp = (uptr)context_record->Esp;
+#endif
+}
+
+uptr SignalContext::GetAddress() const {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+ return exception_record->ExceptionInformation[1];
+}
+
+bool SignalContext::IsMemoryAccess() const {
+ return GetWriteFlag() != SignalContext::UNKNOWN;
+}
+
+SignalContext::WriteFlag SignalContext::GetWriteFlag() const {
+ EXCEPTION_RECORD *exception_record = (EXCEPTION_RECORD *)siginfo;
+ // The contents of this array are documented at
+ // https://msdn.microsoft.com/en-us/library/windows/desktop/aa363082(v=vs.85).aspx
+ // The first element indicates read as 0, write as 1, or execute as 8. The
+ // second element is the faulting address.
+ switch (exception_record->ExceptionInformation[0]) {
+ case 0:
+ return SignalContext::READ;
+ case 1:
+ return SignalContext::WRITE;
+ case 8:
+ return SignalContext::UNKNOWN;
+ }
+ return SignalContext::UNKNOWN;
+}
+
+void SignalContext::DumpAllRegisters(void *context) {
+ // FIXME: Implement this.
+}
+
+int SignalContext::GetType() const {
+ return static_cast<const EXCEPTION_RECORD *>(siginfo)->ExceptionCode;
+}
+
+const char *SignalContext::Describe() const {
+ unsigned code = GetType();
+ // Get the string description of the exception if this is a known deadly
+ // exception.
+ switch (code) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ return "access-violation";
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ return "array-bounds-exceeded";
+ case EXCEPTION_STACK_OVERFLOW:
+ return "stack-overflow";
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ return "datatype-misalignment";
+ case EXCEPTION_IN_PAGE_ERROR:
+ return "in-page-error";
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ return "illegal-instruction";
+ case EXCEPTION_PRIV_INSTRUCTION:
+ return "priv-instruction";
+ case EXCEPTION_BREAKPOINT:
+ return "breakpoint";
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ return "flt-denormal-operand";
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ return "flt-divide-by-zero";
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ return "flt-inexact-result";
+ case EXCEPTION_FLT_INVALID_OPERATION:
+ return "flt-invalid-operation";
+ case EXCEPTION_FLT_OVERFLOW:
+ return "flt-overflow";
+ case EXCEPTION_FLT_STACK_CHECK:
+ return "flt-stack-check";
+ case EXCEPTION_FLT_UNDERFLOW:
+ return "flt-underflow";
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ return "int-divide-by-zero";
+ case EXCEPTION_INT_OVERFLOW:
+ return "int-overflow";
+ }
+ return "unknown exception";
+}
+
+uptr ReadBinaryName(/*out*/char *buf, uptr buf_len) {
+ // FIXME: Actually implement this function.
+ CHECK_GT(buf_len, 0);
+ buf[0] = 0;
+ return 0;
+}
+
+uptr ReadLongProcessName(/*out*/char *buf, uptr buf_len) {
+ return ReadBinaryName(buf, buf_len);
+}
+
+void CheckVMASize() {
+ // Do nothing.
+}
+
+void InitializePlatformEarly() {
+ // Do nothing.
+}
+
+void MaybeReexec() {
+ // No need to re-exec on Windows.
+}
+
+void CheckASLR() {
+ // Do nothing
+}
+
+void CheckMPROTECT() {
+ // Do nothing
+}
+
+char **GetArgv() {
+ // FIXME: Actually implement this function.
+ return 0;
+}
+
+char **GetEnviron() {
+ // FIXME: Actually implement this function.
+ return 0;
+}
+
+pid_t StartSubprocess(const char *program, const char *const argv[],
+ fd_t stdin_fd, fd_t stdout_fd, fd_t stderr_fd) {
+ // FIXME: implement on this platform
+ // Should be implemented based on
+ // SymbolizerProcess::StarAtSymbolizerSubprocess
+ // from lib/sanitizer_common/sanitizer_symbolizer_win.cc.
+ return -1;
+}
+
+bool IsProcessRunning(pid_t pid) {
+ // FIXME: implement on this platform.
+ return false;
+}
+
+int WaitForProcess(pid_t pid) { return -1; }
+
+// FIXME implement on this platform.
+void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { }
+
+void CheckNoDeepBind(const char *filename, int flag) {
+ // Do nothing.
+}
+
+// FIXME: implement on this platform.
+bool GetRandom(void *buffer, uptr length, bool blocking) {
+ UNIMPLEMENTED();
+}
+
+u32 GetNumberOfCPUs() {
+ SYSTEM_INFO sysinfo = {};
+ GetNativeSystemInfo(&sysinfo);
+ return sysinfo.dwNumberOfProcessors;
+}
+
+#if SANITIZER_WIN_TRACE
+// TODO(mcgov): Rename this project-wide to PlatformLogInit
+void AndroidLogInit(void) {
+ HRESULT hr = TraceLoggingRegister(g_asan_provider);
+ if (!SUCCEEDED(hr))
+ return;
+}
+
+void SetAbortMessage(const char *) {}
+
+void LogFullErrorReport(const char *buffer) {
+ if (common_flags()->log_to_syslog) {
+ InternalMmapVector<wchar_t> filename;
+ DWORD filename_length = 0;
+ do {
+ filename.resize(filename.size() + 0x100);
+ filename_length =
+ GetModuleFileNameW(NULL, filename.begin(), filename.size());
+ } while (filename_length >= filename.size());
+ TraceLoggingWrite(g_asan_provider, "AsanReportEvent",
+ TraceLoggingValue(filename.begin(), "ExecutableName"),
+ TraceLoggingValue(buffer, "AsanReportContents"));
+ }
+}
+#endif // SANITIZER_WIN_TRACE
+
+} // namespace __sanitizer
+
+#endif // _WIN32
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win.h (revision 351984)
@@ -0,0 +1,25 @@
+//===-- sanitizer_win.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Windows-specific declarations.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_H
+#define SANITIZER_WIN_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+// Check based on flags if we should handle the exception.
+bool IsHandledDeadlyException(DWORD exceptionCode);
+} // namespace __sanitizer
+
+#endif // SANITIZER_WINDOWS
+#endif // SANITIZER_WIN_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_defs.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_defs.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_defs.h (revision 351984)
@@ -0,0 +1,162 @@
+//===-- sanitizer_win_defs.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common definitions for Windows-specific code.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_DEFS_H
+#define SANITIZER_WIN_DEFS_H
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#ifndef WINAPI
+#if defined(_M_IX86) || defined(__i386__)
+#define WINAPI __stdcall
+#else
+#define WINAPI
+#endif
+#endif
+
+#if defined(_M_IX86) || defined(__i386__)
+#define WIN_SYM_PREFIX "_"
+#else
+#define WIN_SYM_PREFIX
+#endif
+
+// For MinGW, the /export: directives contain undecorated symbols, contrary to
+// link/lld-link. The GNU linker doesn't support /alternatename and /include
+// though, thus lld-link in MinGW mode interprets them in the same way as
+// in the default mode.
+#ifdef __MINGW32__
+#define WIN_EXPORT_PREFIX
+#else
+#define WIN_EXPORT_PREFIX WIN_SYM_PREFIX
+#endif
+
+// Intermediate macro to ensure the parameter is expanded before stringified.
+#define STRINGIFY_(A) #A
+#define STRINGIFY(A) STRINGIFY_(A)
+
+// ----------------- A workaround for the absence of weak symbols --------------
+// We don't have a direct equivalent of weak symbols when using MSVC, but we can
+// use the /alternatename directive to tell the linker to default a specific
+// symbol to a specific value.
+// Take into account that this is a pragma directive for the linker, so it will
+// be ignored by the compiler and the function will be marked as UNDEF in the
+// symbol table of the resulting object file. The linker won't find the default
+// implementation until it links with that object file.
+// So, suppose we provide a default implementation "fundef" for "fun", and this
+// is compiled into the object file "test.obj" including the pragma directive.
+// If we have some code with references to "fun" and we link that code with
+// "test.obj", it will work because the linker always link object files.
+// But, if "test.obj" is included in a static library, like "test.lib", then the
+// liker will only link to "test.obj" if necessary. If we only included the
+// definition of "fun", it won't link to "test.obj" (from test.lib) because
+// "fun" appears as UNDEF, so it doesn't resolve the symbol "fun", and will
+// result in a link error (the linker doesn't find the pragma directive).
+// So, a workaround is to force linkage with the modules that include weak
+// definitions, with the following macro: WIN_FORCE_LINK()
+
+#define WIN_WEAK_ALIAS(Name, Default) \
+ __pragma(comment(linker, "/alternatename:" WIN_SYM_PREFIX STRINGIFY(Name) "="\
+ WIN_SYM_PREFIX STRINGIFY(Default)))
+
+#define WIN_FORCE_LINK(Name) \
+ __pragma(comment(linker, "/include:" WIN_SYM_PREFIX STRINGIFY(Name)))
+
+#define WIN_EXPORT(ExportedName, Name) \
+ __pragma(comment(linker, "/export:" WIN_EXPORT_PREFIX STRINGIFY(ExportedName)\
+ "=" WIN_EXPORT_PREFIX STRINGIFY(Name)))
+
+// We cannot define weak functions on Windows, but we can use WIN_WEAK_ALIAS()
+// which defines an alias to a default implementation, and only works when
+// linking statically.
+// So, to define a weak function "fun", we define a default implementation with
+// a different name "fun__def" and we create a "weak alias" fun = fun__def.
+// Then, users can override it just defining "fun".
+// We impose "extern "C"" because otherwise WIN_WEAK_ALIAS() will fail because
+// of name mangling.
+
+// Dummy name for default implementation of weak function.
+# define WEAK_DEFAULT_NAME(Name) Name##__def
+// Name for exported implementation of weak function.
+# define WEAK_EXPORT_NAME(Name) Name##__dll
+
+// Use this macro when you need to define and export a weak function from a
+// library. For example:
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+# define WIN_WEAK_EXPORT_DEF(ReturnType, Name, ...) \
+ WIN_WEAK_ALIAS(Name, WEAK_DEFAULT_NAME(Name)) \
+ WIN_EXPORT(WEAK_EXPORT_NAME(Name), Name) \
+ extern "C" ReturnType Name(__VA_ARGS__); \
+ extern "C" ReturnType WEAK_DEFAULT_NAME(Name)(__VA_ARGS__)
+
+// Use this macro when you need to import a weak function from a library. It
+// defines a weak alias to the imported function from the dll. For example:
+// WIN_WEAK_IMPORT_DEF(compare)
+# define WIN_WEAK_IMPORT_DEF(Name) \
+ WIN_WEAK_ALIAS(Name, WEAK_EXPORT_NAME(Name))
+
+// So, for Windows we provide something similar to weak symbols in Linux, with
+// some differences:
+// + A default implementation must always be provided.
+//
+// + When linking statically it works quite similarly. For example:
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+//
+// // client.cc
+// // We can use the default implementation from the library:
+// compare(1, 2);
+// // Or we can override it:
+// extern "C" bool compare (int a, int b) { return a >= b; }
+//
+// And it will work fine. If we don't override the function, we need to ensure
+// that the linker includes the object file with the default implementation.
+// We can do so with the linker option "-wholearchive:".
+//
+// + When linking dynamically with a library (dll), weak functions are exported
+// with "__dll" suffix. Clients can use the macro WIN_WEAK_IMPORT_DEF(fun)
+// which defines a "weak alias" fun = fun__dll.
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+//
+// // client.cc
+// WIN_WEAK_IMPORT_DEF(compare)
+// // We can use the default implementation from the library:
+// compare(1, 2);
+// // Or we can override it:
+// extern "C" bool compare (int a, int b) { return a >= b; }
+//
+// But if we override the function, the dlls don't have access to it (which
+// is different in linux). If that is desired, the strong definition must be
+// exported and interception can be used from the rest of the dlls.
+//
+// // libExample.cc
+// WIN_WEAK_EXPORT_DEF(bool, compare, int a, int b) { return a > b; }
+// // When initialized, check if the main executable defined "compare".
+// int libExample_init() {
+// uptr fnptr = __interception::InternalGetProcAddress(
+// (void *)GetModuleHandleA(0), "compare");
+// if (fnptr && !__interception::OverrideFunction((uptr)compare, fnptr, 0))
+// abort();
+// return 0;
+// }
+//
+// // client.cc
+// WIN_WEAK_IMPORT_DEF(compare)
+// // We override and export compare:
+// extern "C" __declspec(dllexport) bool compare (int a, int b) {
+// return a >= b;
+// }
+//
+#endif // SANITIZER_WINDOWS
+#endif // SANITIZER_WIN_DEFS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dll_thunk.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dll_thunk.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dll_thunk.cc (revision 351984)
@@ -0,0 +1,101 @@
+//===-- sanitizer_win_dll_thunk.cc ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_win_defs.h"
+#include "sanitizer_win_dll_thunk.h"
+#include "interception/interception.h"
+
+extern "C" {
+void *WINAPI GetModuleHandleA(const char *module_name);
+void abort();
+}
+
+namespace __sanitizer {
+uptr dllThunkGetRealAddrOrDie(const char *name) {
+ uptr ret =
+ __interception::InternalGetProcAddress((void *)GetModuleHandleA(0), name);
+ if (!ret)
+ abort();
+ return ret;
+}
+
+int dllThunkIntercept(const char* main_function, uptr dll_function) {
+ uptr wrapper = dllThunkGetRealAddrOrDie(main_function);
+ if (!__interception::OverrideFunction(dll_function, wrapper, 0))
+ abort();
+ return 0;
+}
+
+int dllThunkInterceptWhenPossible(const char* main_function,
+ const char* default_function, uptr dll_function) {
+ uptr wrapper = __interception::InternalGetProcAddress(
+ (void *)GetModuleHandleA(0), main_function);
+ if (!wrapper)
+ wrapper = dllThunkGetRealAddrOrDie(default_function);
+ if (!__interception::OverrideFunction(dll_function, wrapper, 0))
+ abort();
+ return 0;
+}
+} // namespace __sanitizer
+
+// Include Sanitizer Common interface.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_common_interface.inc"
+
+#pragma section(".DLLTH$A", read) // NOLINT
+#pragma section(".DLLTH$Z", read) // NOLINT
+
+typedef void (*DllThunkCB)();
+extern "C" {
+__declspec(allocate(".DLLTH$A")) DllThunkCB __start_dll_thunk;
+__declspec(allocate(".DLLTH$Z")) DllThunkCB __stop_dll_thunk;
+}
+
+// Disable compiler warnings that show up if we declare our own version
+// of a compiler intrinsic (e.g. strlen).
+#pragma warning(disable: 4391)
+#pragma warning(disable: 4392)
+
+extern "C" int __dll_thunk_init() {
+ static bool flag = false;
+ // __dll_thunk_init is expected to be called by only one thread.
+ if (flag) return 0;
+ flag = true;
+
+ for (DllThunkCB *it = &__start_dll_thunk; it < &__stop_dll_thunk; ++it)
+ if (*it)
+ (*it)();
+
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
+}
+
+// We want to call dll_thunk_init before C/C++ initializers / constructors are
+// executed, otherwise functions like memset might be invoked.
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__dll_thunk_preinit)() =
+ __dll_thunk_init;
+
+static void WINAPI dll_thunk_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) __dll_thunk_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void (WINAPI *__dll_thunk_tls_init)(void *,
+ unsigned long, void *) = dll_thunk_thread_init;
+
+#endif // SANITIZER_DLL_THUNK
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dll_thunk.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dll_thunk.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dll_thunk.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dll_thunk.h (revision 351984)
@@ -0,0 +1,181 @@
+//===-- sanitizer_win_dll_thunk.h -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This header provide helper macros to delegate calls to the shared runtime
+// that lives in the main executable. It should be included to dll_thunks that
+// will be linked to the dlls, when the sanitizer is a static library included
+// in the main executable.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_DLL_THUNK_H
+#define SANITIZER_WIN_DLL_THUNK_H
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+uptr dllThunkGetRealAddrOrDie(const char *name);
+
+int dllThunkIntercept(const char* main_function, uptr dll_function);
+
+int dllThunkInterceptWhenPossible(const char* main_function,
+ const char* default_function, uptr dll_function);
+}
+
+extern "C" int __dll_thunk_init();
+
+// ----------------- Function interception helper macros -------------------- //
+// Override dll_function with main_function from main executable.
+#define INTERCEPT_OR_DIE(main_function, dll_function) \
+ static int intercept_##dll_function() { \
+ return __sanitizer::dllThunkIntercept(main_function, (__sanitizer::uptr) \
+ dll_function); \
+ } \
+ __pragma(section(".DLLTH$M", long, read)) \
+ __declspec(allocate(".DLLTH$M")) int (*__dll_thunk_##dll_function)() = \
+ intercept_##dll_function;
+
+// Try to override dll_function with main_function from main executable.
+// If main_function is not present, override dll_function with default_function.
+#define INTERCEPT_WHEN_POSSIBLE(main_function, default_function, dll_function) \
+ static int intercept_##dll_function() { \
+ return __sanitizer::dllThunkInterceptWhenPossible(main_function, \
+ default_function, (__sanitizer::uptr)dll_function); \
+ } \
+ __pragma(section(".DLLTH$M", long, read)) \
+ __declspec(allocate(".DLLTH$M")) int (*__dll_thunk_##dll_function)() = \
+ intercept_##dll_function;
+
+// -------------------- Function interception macros ------------------------ //
+// Special case of hooks -- ASan own interface functions. Those are only called
+// after __asan_init, thus an empty implementation is sufficient.
+#define INTERCEPT_SANITIZER_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__; \
+ static const char function_name[] = #name; \
+ for (const char* ptr = &function_name[0]; *ptr; ++ptr) \
+ prevent_icf ^= *ptr; \
+ (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name)
+
+// Special case of hooks -- Weak functions, could be redefined in the main
+// executable, but that is not necessary, so we shouldn't die if we can not find
+// a reference. Instead, when the function is not present in the main executable
+// we consider the default impl provided by asan library.
+#define INTERCEPT_SANITIZER_WEAK_FUNCTION(name) \
+ extern "C" __declspec(noinline) void name() { \
+ volatile int prevent_icf = (__LINE__ << 8) ^ __COUNTER__; \
+ static const char function_name[] = #name; \
+ for (const char* ptr = &function_name[0]; *ptr; ++ptr) \
+ prevent_icf ^= *ptr; \
+ (void)prevent_icf; \
+ __debugbreak(); \
+ } \
+ INTERCEPT_WHEN_POSSIBLE(#name, STRINGIFY(WEAK_EXPORT_NAME(name)), name)
+
+// We can't define our own version of strlen etc. because that would lead to
+// link-time or even type mismatch errors. Instead, we can declare a function
+// just to be able to get its address. Me may miss the first few calls to the
+// functions since it can be called before __dll_thunk_init, but that would lead
+// to false negatives in the startup code before user's global initializers,
+// which isn't a big deal.
+#define INTERCEPT_LIBRARY_FUNCTION(name) \
+ extern "C" void name(); \
+ INTERCEPT_OR_DIE(WRAPPER_NAME(name), name)
+
+// Use these macros for functions that could be called before __dll_thunk_init()
+// is executed and don't lead to errors if defined (free, malloc, etc).
+#define INTERCEPT_WRAP_V_V(name) \
+ extern "C" void name() { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_W(name) \
+ extern "C" void name(void *arg) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_WW(name) \
+ extern "C" void name(void *arg1, void *arg2) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg1, arg2); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_V_WWW(name) \
+ extern "C" void name(void *arg1, void *arg2, void *arg3) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_V(name) \
+ extern "C" void *name() { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_W(name) \
+ extern "C" void *name(void *arg) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WW(name) \
+ extern "C" void *name(void *arg1, void *arg2) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#define INTERCEPT_WRAP_W_WWWWWW(name) \
+ extern "C" void *name(void *arg1, void *arg2, void *arg3, void *arg4, \
+ void *arg5, void *arg6) { \
+ typedef decltype(name) *fntype; \
+ static fntype fn = (fntype)__sanitizer::dllThunkGetRealAddrOrDie(#name); \
+ return fn(arg1, arg2, arg3, arg4, arg5, arg6); \
+ } \
+ INTERCEPT_OR_DIE(#name, name);
+
+#endif // SANITIZER_WIN_DLL_THUNK_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dll_thunk.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cc (revision 351984)
@@ -0,0 +1,26 @@
+//===-- santizer_win_dynamic_runtime_thunk.cc -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Sanitizer Common, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from sanitizer common.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "sanitizer_common_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
+
+namespace __sanitizer {
+// Add one, otherwise unused, external symbol to this object file so that the
+// Visual C++ linker includes it and reads the .drective section.
+void ForceWholeArchiveIncludeForSanitizerCommon() {}
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_dynamic_runtime_thunk.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_weak_interception.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_weak_interception.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_weak_interception.cc (revision 351984)
@@ -0,0 +1,93 @@
+//===-- sanitizer_win_weak_interception.cc --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in the sanitizer when it is implemented as a
+// shared library on Windows (dll), in order to delegate the calls of weak
+// functions to the implementation in the main executable when a strong
+// definition is provided.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_platform.h"
+#if SANITIZER_WINDOWS && SANITIZER_DYNAMIC
+#include "sanitizer_win_weak_interception.h"
+#include "sanitizer_allocator_interface.h"
+#include "sanitizer_interface_internal.h"
+#include "sanitizer_win_defs.h"
+#include "interception/interception.h"
+
+extern "C" {
+void *WINAPI GetModuleHandleA(const char *module_name);
+void abort();
+}
+
+namespace __sanitizer {
+// Try to get a pointer to real_function in the main module and override
+// dll_function with that pointer. If the function isn't found, nothing changes.
+int interceptWhenPossible(uptr dll_function, const char *real_function) {
+ uptr real = __interception::InternalGetProcAddress(
+ (void *)GetModuleHandleA(0), real_function);
+ if (real && !__interception::OverrideFunction((uptr)dll_function, real, 0))
+ abort();
+ return 0;
+}
+} // namespace __sanitizer
+
+// Declare weak hooks.
+extern "C" {
+void __sanitizer_weak_hook_memcmp(uptr called_pc, const void *s1,
+ const void *s2, uptr n, int result);
+void __sanitizer_weak_hook_strcmp(uptr called_pc, const char *s1,
+ const char *s2, int result);
+void __sanitizer_weak_hook_strncmp(uptr called_pc, const char *s1,
+ const char *s2, uptr n, int result);
+void __sanitizer_weak_hook_strstr(uptr called_pc, const char *s1,
+ const char *s2, char *result);
+}
+
+// Include Sanitizer Common interface.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "sanitizer_common_interface.inc"
+
+#pragma section(".WEAK$A", read) // NOLINT
+#pragma section(".WEAK$Z", read) // NOLINT
+
+typedef void (*InterceptCB)();
+extern "C" {
+__declspec(allocate(".WEAK$A")) InterceptCB __start_weak_list;
+__declspec(allocate(".WEAK$Z")) InterceptCB __stop_weak_list;
+}
+
+static int weak_intercept_init() {
+ static bool flag = false;
+ // weak_interception_init is expected to be called by only one thread.
+ if (flag) return 0;
+ flag = true;
+
+ for (InterceptCB *it = &__start_weak_list; it < &__stop_weak_list; ++it)
+ if (*it)
+ (*it)();
+
+ // In DLLs, the callbacks are expected to return 0,
+ // otherwise CRT initialization fails.
+ return 0;
+}
+
+#pragma section(".CRT$XIB", long, read) // NOLINT
+__declspec(allocate(".CRT$XIB")) int (*__weak_intercept_preinit)() =
+ weak_intercept_init;
+
+static void WINAPI weak_intercept_thread_init(void *mod, unsigned long reason,
+ void *reserved) {
+ if (reason == /*DLL_PROCESS_ATTACH=*/1) weak_intercept_init();
+}
+
+#pragma section(".CRT$XLAB", long, read) // NOLINT
+__declspec(allocate(".CRT$XLAB")) void(WINAPI *__weak_intercept_tls_init)(
+ void *, unsigned long, void *) = weak_intercept_thread_init;
+
+#endif // SANITIZER_WINDOWS && SANITIZER_DYNAMIC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_weak_interception.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_weak_interception.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_weak_interception.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_weak_interception.h (revision 351984)
@@ -0,0 +1,32 @@
+//===-- sanitizer_win_weak_interception.h ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This header provide helper macros to delegate calls of weak functions to the
+// implementation in the main executable when a strong definition is present.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_WIN_WEAK_INTERCEPTION_H
+#define SANITIZER_WIN_WEAK_INTERCEPTION_H
+#include "sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+int interceptWhenPossible(uptr dll_function, const char *real_function);
+}
+
+// ----------------- Function interception helper macros -------------------- //
+// Weak functions, could be redefined in the main executable, but that is not
+// necessary, so we shouldn't die if we can not find a reference.
+#define INTERCEPT_WEAK(Name) interceptWhenPossible((uptr) Name, #Name);
+
+#define INTERCEPT_SANITIZER_WEAK_FUNCTION(Name) \
+ static int intercept_##Name() { \
+ return __sanitizer::interceptWhenPossible((__sanitizer::uptr) Name, #Name);\
+ } \
+ __pragma(section(".WEAK$M", long, read)) \
+ __declspec(allocate(".WEAK$M")) int (*__weak_intercept_##Name)() = \
+ intercept_##Name;
+
+#endif // SANITIZER_WIN_WEAK_INTERCEPTION_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/sanitizer_win_weak_interception.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc (revision 351984)
@@ -0,0 +1,80 @@
+//===-- sanitizer_symbolize.cc ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of weak hooks from sanitizer_symbolizer_posix_libcdep.cc.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdio.h>
+#include <string>
+
+#include "llvm/DebugInfo/Symbolize/DIPrinter.h"
+#include "llvm/DebugInfo/Symbolize/Symbolize.h"
+
+static llvm::symbolize::LLVMSymbolizer *getDefaultSymbolizer() {
+ static llvm::symbolize::LLVMSymbolizer *DefaultSymbolizer =
+ new llvm::symbolize::LLVMSymbolizer();
+ return DefaultSymbolizer;
+}
+
+namespace __sanitizer {
+int internal_snprintf(char *buffer, unsigned long length, const char *format,
+ ...);
+} // namespace __sanitizer
+
+extern "C" {
+
+typedef uint64_t u64;
+
+bool __sanitizer_symbolize_code(const char *ModuleName, uint64_t ModuleOffset,
+ char *Buffer, int MaxLength) {
+ std::string Result;
+ {
+ llvm::raw_string_ostream OS(Result);
+ llvm::symbolize::DIPrinter Printer(OS);
+ // TODO: it is neccessary to set proper SectionIndex here.
+ // object::SectionedAddress::UndefSection works for only absolute addresses.
+ auto ResOrErr = getDefaultSymbolizer()->symbolizeInlinedCode(
+ ModuleName,
+ {ModuleOffset, llvm::object::SectionedAddress::UndefSection});
+ Printer << (ResOrErr ? ResOrErr.get() : llvm::DIInliningInfo());
+ }
+ return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
+ Result.c_str()) < MaxLength;
+}
+
+bool __sanitizer_symbolize_data(const char *ModuleName, uint64_t ModuleOffset,
+ char *Buffer, int MaxLength) {
+ std::string Result;
+ {
+ llvm::raw_string_ostream OS(Result);
+ llvm::symbolize::DIPrinter Printer(OS);
+ // TODO: it is neccessary to set proper SectionIndex here.
+ // object::SectionedAddress::UndefSection works for only absolute addresses.
+ auto ResOrErr = getDefaultSymbolizer()->symbolizeData(
+ ModuleName,
+ {ModuleOffset, llvm::object::SectionedAddress::UndefSection});
+ Printer << (ResOrErr ? ResOrErr.get() : llvm::DIGlobal());
+ }
+ return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
+ Result.c_str()) < MaxLength;
+}
+
+void __sanitizer_symbolize_flush() { getDefaultSymbolizer()->flush(); }
+
+int __sanitizer_symbolize_demangle(const char *Name, char *Buffer,
+ int MaxLength) {
+ std::string Result =
+ llvm::symbolize::LLVMSymbolizer::DemangleName(Name, nullptr);
+ return __sanitizer::internal_snprintf(Buffer, MaxLength, "%s",
+ Result.c_str()) < MaxLength
+ ? static_cast<int>(Result.size() + 1)
+ : 0;
+}
+
+} // extern "C"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/sanitizer_symbolize.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc (revision 351984)
@@ -0,0 +1,198 @@
+//===-- sanitizer_wrappers.cc -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Redirect some functions to sanitizer interceptors.
+//
+//===----------------------------------------------------------------------===//
+
+#include <dlfcn.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <unistd.h>
+
+#include <tuple>
+
+// Need to match ../sanitizer_common/sanitizer_internal_defs.h
+#if defined(ARCH_PPC)
+#define OFF_T unsigned long
+#else
+#define OFF_T unsigned long long
+#endif
+
+namespace __sanitizer {
+unsigned long internal_open(const char *filename, int flags);
+unsigned long internal_open(const char *filename, int flags, unsigned mode);
+unsigned long internal_close(int fd);
+unsigned long internal_stat(const char *path, void *buf);
+unsigned long internal_lstat(const char *path, void *buf);
+unsigned long internal_fstat(int fd, void *buf);
+size_t internal_strlen(const char *s);
+unsigned long internal_mmap(void *addr, unsigned long length, int prot,
+ int flags, int fd, OFF_T offset);
+void *internal_memcpy(void *dest, const void *src, unsigned long n);
+// Used to propagate errno.
+bool internal_iserror(unsigned long retval, int *rverrno = 0);
+} // namespace __sanitizer
+
+namespace {
+
+template <typename T>
+struct GetTypes;
+
+template <typename R, typename... Args>
+struct GetTypes<R(Args...)> {
+ using Result = R;
+ template <size_t i>
+ struct Arg {
+ using Type = typename std::tuple_element<i, std::tuple<Args...>>::type;
+ };
+};
+
+#define LLVM_SYMBOLIZER_GET_FUNC(Function) \
+ ((__interceptor_##Function) \
+ ? (__interceptor_##Function) \
+ : reinterpret_cast<decltype(&Function)>(dlsym(RTLD_NEXT, #Function)))
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR1(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0); \
+ }
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR2(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type arg1) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1); \
+ }
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR3(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type arg1, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type arg2) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1, arg2); \
+ }
+
+#define LLVM_SYMBOLIZER_INTERCEPTOR4(Function, ...) \
+ GetTypes<__VA_ARGS__>::Result __interceptor_##Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type, \
+ GetTypes<__VA_ARGS__>::Arg<3>::Type) __attribute__((weak)); \
+ GetTypes<__VA_ARGS__>::Result Function( \
+ GetTypes<__VA_ARGS__>::Arg<0>::Type arg0, \
+ GetTypes<__VA_ARGS__>::Arg<1>::Type arg1, \
+ GetTypes<__VA_ARGS__>::Arg<2>::Type arg2, \
+ GetTypes<__VA_ARGS__>::Arg<3>::Type arg3) { \
+ return LLVM_SYMBOLIZER_GET_FUNC(Function)(arg0, arg1, arg2, arg3); \
+ }
+
+} // namespace
+
+// C-style interface around internal sanitizer libc functions.
+extern "C" {
+
+#define RETURN_OR_SET_ERRNO(T, res) \
+ int rverrno; \
+ if (__sanitizer::internal_iserror(res, &rverrno)) { \
+ errno = rverrno; \
+ return (T)-1; \
+ } \
+ return (T)res;
+
+int open(const char *filename, int flags, ...) {
+ unsigned long res;
+ if (flags | O_CREAT) {
+ va_list va;
+ va_start(va, flags);
+ unsigned mode = va_arg(va, unsigned);
+ va_end(va);
+ res = __sanitizer::internal_open(filename, flags, mode);
+ } else {
+ res = __sanitizer::internal_open(filename, flags);
+ }
+ RETURN_OR_SET_ERRNO(int, res);
+}
+
+int close(int fd) {
+ unsigned long res = __sanitizer::internal_close(fd);
+ RETURN_OR_SET_ERRNO(int, res);
+}
+
+#define STAT(func, arg, buf) \
+ unsigned long res = __sanitizer::internal_##func(arg, buf); \
+ RETURN_OR_SET_ERRNO(int, res);
+
+int stat(const char *path, struct stat *buf) { STAT(stat, path, buf); }
+
+int lstat(const char *path, struct stat *buf) { STAT(lstat, path, buf); }
+
+int fstat(int fd, struct stat *buf) { STAT(fstat, fd, buf); }
+
+// Redirect versioned stat functions to the __sanitizer::internal() as well.
+int __xstat(int version, const char *path, struct stat *buf) {
+ STAT(stat, path, buf);
+}
+
+int __lxstat(int version, const char *path, struct stat *buf) {
+ STAT(lstat, path, buf);
+}
+
+int __fxstat(int version, int fd, struct stat *buf) { STAT(fstat, fd, buf); }
+
+size_t strlen(const char *s) { return __sanitizer::internal_strlen(s); }
+
+void *mmap(void *addr, size_t length, int prot, int flags, int fd,
+ off_t offset) {
+ unsigned long res = __sanitizer::internal_mmap(
+ addr, (unsigned long)length, prot, flags, fd, (unsigned long long)offset);
+ RETURN_OR_SET_ERRNO(void *, res);
+}
+
+LLVM_SYMBOLIZER_INTERCEPTOR3(read, ssize_t(int, void *, size_t))
+LLVM_SYMBOLIZER_INTERCEPTOR4(pread, ssize_t(int, void *, size_t, off_t))
+LLVM_SYMBOLIZER_INTERCEPTOR4(pread64, ssize_t(int, void *, size_t, off64_t))
+LLVM_SYMBOLIZER_INTERCEPTOR2(realpath, char *(const char *, char *))
+
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_cond_broadcast, int(pthread_cond_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_cond_wait,
+ int(pthread_cond_t *, pthread_mutex_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutex_lock, int(pthread_mutex_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutex_unlock, int(pthread_mutex_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutex_destroy, int(pthread_mutex_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_mutex_init,
+ int(pthread_mutex_t *,
+ const pthread_mutexattr_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutexattr_destroy,
+ int(pthread_mutexattr_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_mutexattr_init, int(pthread_mutexattr_t *))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_mutexattr_settype,
+ int(pthread_mutexattr_t *, int))
+LLVM_SYMBOLIZER_INTERCEPTOR1(pthread_getspecific, void *(pthread_key_t))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_key_create,
+ int(pthread_key_t *, void (*)(void *)))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_once,
+ int(pthread_once_t *, void (*)(void)))
+LLVM_SYMBOLIZER_INTERCEPTOR2(pthread_setspecific,
+ int(pthread_key_t, const void *))
+LLVM_SYMBOLIZER_INTERCEPTOR3(pthread_sigmask,
+ int(int, const sigset_t *, sigset_t *))
+
+} // extern "C"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/sanitizer_wrappers.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh (revision 351984)
@@ -0,0 +1,39 @@
+#!/bin/bash
+
+function usage() {
+ echo "Usage: $0 INPUT... OUTPUT"
+ exit 1
+}
+
+if [ "$#" -le 1 ]; then
+ usage
+fi
+
+[[ $AR == /* ]] || AR=$PWD/$AR
+[[ $LINK == /* ]] || LINK=$PWD/$LINK
+
+INPUTS=
+OUTPUT=
+for ARG in $@; do
+ INPUTS="$INPUTS $OUTPUT"
+ OUTPUT=$(readlink -f $ARG)
+done
+
+echo Inputs: $INPUTS
+echo Output: $OUTPUT
+
+SCRATCH_DIR=$(mktemp -d)
+ln -s $INPUTS $SCRATCH_DIR/
+
+pushd $SCRATCH_DIR
+
+for INPUT in *; do
+ for OBJ in $($AR t $INPUT); do
+ $AR x $INPUT $OBJ
+ mv -f $OBJ $(basename $INPUT).$OBJ
+ done
+done
+
+$LINK *.o -o $OUTPUT
+
+rm -rf $SCRATCH_DIR
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/ar_to_bc.sh
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt (revision 351984)
@@ -0,0 +1,154 @@
+_GLOBAL_OFFSET_TABLE_ U
+_ZN11__sanitizer13internal_mmapEPvmiiiy U
+_ZN11__sanitizer13internal_openEPKcij U
+_ZN11__sanitizer13internal_statEPKcPv U
+_ZN11__sanitizer14internal_closeEi U
+_ZN11__sanitizer14internal_fstatEiPv U
+_ZN11__sanitizer14internal_lstatEPKcPv U
+_ZN11__sanitizer15internal_strlenEPKc U
+_ZN11__sanitizer16internal_iserrorEmPi U
+_ZN11__sanitizer17internal_snprintfEPcmPKcz U
+__ctype_b_loc U
+__ctype_get_mb_cur_max U
+__cxa_atexit U
+__divdi3 U
+__dso_handle U
+__errno_location U
+__interceptor_pread w
+__interceptor_pthread_cond_broadcast w
+__interceptor_pthread_cond_wait w
+__interceptor_pthread_getspecific w
+__interceptor_pthread_key_create w
+__interceptor_pthread_mutex_lock w
+__interceptor_pthread_mutex_unlock w
+__interceptor_pthread_once w
+__interceptor_pthread_setspecific w
+__interceptor_read w
+__interceptor_realpath w
+__moddi3 U
+__sanitizer_symbolize_code T
+__sanitizer_symbolize_data T
+__sanitizer_symbolize_demangle T
+__sanitizer_symbolize_flush T
+__strdup U
+__udivdi3 U
+__umoddi3 U
+_exit U
+abort U
+access U
+bcmp U
+calloc U
+catclose U
+catgets U
+catopen U
+ceil U
+clock_gettime U
+cfgetospeed U
+dl_iterate_phdr U
+dlsym U
+dup U
+dup2 U
+environ U
+execv U
+execve U
+exit U
+fclose U
+fflush U
+fileno U
+fopen U
+fork U
+fprintf U
+fputc U
+free U
+freelocale U
+fwrite U
+getc U
+getcwd U
+getenv U
+getpagesize U
+getpid U
+getrlimit U
+gettimeofday U
+ioctl U
+isalpha U
+isatty U
+islower U
+isprint U
+isupper U
+isxdigit U
+log10 U
+lseek U
+lseek64 U
+malloc U
+mbrlen U
+mbrtowc U
+mbsnrtowcs U
+mbsrtowcs U
+mbtowc U
+memchr U
+memcmp U
+memcpy U
+memmove U
+memset U
+mkdir U
+munmap U
+newlocale U
+perror U
+posix_spawn U
+posix_spawn_file_actions_adddup2 U
+posix_spawn_file_actions_addopen U
+posix_spawn_file_actions_destroy U
+posix_spawn_file_actions_init U
+qsort U
+rand U
+readlink U
+realloc U
+remove U
+setrlimit U
+setvbuf U
+sigfillset U
+sigprocmask U
+snprintf U
+sprintf U
+srand U
+sscanf U
+stderr U
+stdin U
+stdout U
+strcat U
+strchr U
+strcmp U
+strcpy U
+strdup U
+strerror U
+strerror_r U
+strftime_l U
+strncmp U
+strncpy U
+strrchr U
+strsep U
+strtod_l U
+strtof_l U
+strtok_r U
+strtol U
+strtold_l U
+strtoll_l U
+strtoull_l U
+syscall U
+tcgetattr U
+uname U
+ungetc U
+unlink U
+uselocale U
+vasprintf U
+vfprintf U
+vsnprintf U
+vsscanf U
+waitpid U
+wcrtomb U
+wcslen U
+wcsnrtombs U
+wmemcpy U
+wmemmove U
+wmemset U
+write U
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/global_symbols.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh (revision 351984)
@@ -0,0 +1,184 @@
+#!/bin/bash -eu
+#
+# Run as: CLANG=bin/clang ZLIB_SRC=src/zlib \
+# build_symbolizer.sh runtime_build/lib/clang/4.0.0/lib/linux/
+# zlib can be downloaded from http://www.zlib.net.
+#
+# Script compiles self-contained object file with symbolization code and injects
+# it into the given set of runtime libraries. Script updates only libraries
+# which has unresolved __sanitizer_symbolize_* symbols and matches architecture.
+# Object file is be compiled from LLVM sources with dependencies like libc++ and
+# zlib. Then it internalizes symbols in the file, so that it can be linked
+# into arbitrary programs, avoiding conflicts with the program own symbols and
+# avoiding dependencies on any program symbols. The only acceptable dependencies
+# are libc and __sanitizer::internal_* from sanitizer runtime.
+#
+# Symbols exported by the object file will be used by Sanitizer runtime
+# libraries to symbolize code/data in-process.
+#
+# The script will modify the output directory which is given as the first
+# argument to the script.
+#
+# FIXME: We should really be using a simpler approach to building this object
+# file, and it should be available as a regular cmake rule. Conceptually, we
+# want to be doing "ld -r" followed by "objcopy -G" to create a relocatable
+# object file with only our entry points exposed. However, this does not work at
+# present, see PR30750.
+
+SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+SRC_DIR=$(readlink -f $SCRIPT_DIR/..)
+TARGE_DIR=$(readlink -f $1)
+
+LLVM_SRC="${LLVM_SRC:-$SCRIPT_DIR/../../../../../..}"
+LLVM_SRC=$(readlink -f $LLVM_SRC)
+
+if [[ ! -d "${LLVM_SRC}/projects/libcxxabi" ||
+ ! -d "${LLVM_SRC}/projects/libcxx" ]]; then
+ echo "Missing or incomplete LLVM_SRC"
+ exit 1
+fi
+
+if [[ "$ZLIB_SRC" == "" ||
+ ! -x "${ZLIB_SRC}/configure" ||
+ ! -f "${ZLIB_SRC}/zlib.h" ]]; then
+ echo "Missing or incomplete ZLIB_SRC"
+ exit 1
+fi
+ZLIB_SRC=$(readlink -f $ZLIB_SRC)
+
+J="${J:-50}"
+
+CLANG="${CLANG:-`which clang`}"
+CLANG_DIR=$(readlink -f $(dirname "$CLANG"))
+
+BUILD_DIR=$(readlink -f ./symbolizer)
+mkdir -p $BUILD_DIR
+cd $BUILD_DIR
+
+CC=$CLANG_DIR/clang
+CXX=$CLANG_DIR/clang++
+TBLGEN=$CLANG_DIR/llvm-tblgen
+OPT=$CLANG_DIR/opt
+export AR=$CLANG_DIR/llvm-ar
+export LINK=$CLANG_DIR/llvm-link
+
+for F in $CC $CXX $TBLGEN $LINK $OPT $AR; do
+ if [[ ! -x "$F" ]]; then
+ echo "Missing $F"
+ exit 1
+ fi
+done
+
+ZLIB_BUILD=${BUILD_DIR}/zlib
+LIBCXX_BUILD=${BUILD_DIR}/libcxx
+LLVM_BUILD=${BUILD_DIR}/llvm
+SYMBOLIZER_BUILD=${BUILD_DIR}/symbolizer
+
+FLAGS=${FLAGS:-}
+FLAGS="$FLAGS -fPIC -flto -Os -g0 -DNDEBUG"
+
+# Build zlib.
+mkdir -p ${ZLIB_BUILD}
+cd ${ZLIB_BUILD}
+cp -r ${ZLIB_SRC}/* .
+CC=$CC CFLAGS="$FLAGS" RANLIB=/bin/true ./configure --static
+make -j${J} libz.a
+
+# Build and install libcxxabi and libcxx.
+if [[ ! -d ${LIBCXX_BUILD} ]]; then
+ mkdir -p ${LIBCXX_BUILD}
+ cd ${LIBCXX_BUILD}
+ LIBCXX_FLAGS="${FLAGS} -Wno-macro-redefined -I${LLVM_SRC}/projects/libcxxabi/include"
+ cmake -GNinja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER=$CC \
+ -DCMAKE_CXX_COMPILER=$CXX \
+ -DCMAKE_C_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
+ -DCMAKE_CXX_FLAGS_RELEASE="${LIBCXX_FLAGS}" \
+ -DLIBCXXABI_ENABLE_ASSERTIONS=OFF \
+ -DLIBCXXABI_ENABLE_EXCEPTIONS=OFF \
+ -DLIBCXXABI_ENABLE_SHARED=OFF \
+ -DLIBCXX_ENABLE_ASSERTIONS=OFF \
+ -DLIBCXX_ENABLE_EXCEPTIONS=OFF \
+ -DLIBCXX_ENABLE_RTTI=OFF \
+ -DLIBCXX_ENABLE_SHARED=OFF \
+ $LLVM_SRC
+fi
+cd ${LIBCXX_BUILD}
+ninja cxx cxxabi
+
+FLAGS="${FLAGS} -fno-rtti -fno-exceptions"
+LLVM_FLAGS="${FLAGS} -nostdinc++ -I${ZLIB_BUILD} -I${LIBCXX_BUILD}/include/c++/v1"
+
+# Build LLVM.
+if [[ ! -d ${LLVM_BUILD} ]]; then
+ mkdir -p ${LLVM_BUILD}
+ cd ${LLVM_BUILD}
+ cmake -GNinja \
+ -DCMAKE_BUILD_TYPE=Release \
+ -DCMAKE_C_COMPILER=$CC \
+ -DCMAKE_CXX_COMPILER=$CXX \
+ -DCMAKE_C_FLAGS_RELEASE="${LLVM_FLAGS}" \
+ -DCMAKE_CXX_FLAGS_RELEASE="${LLVM_FLAGS}" \
+ -DLLVM_TABLEGEN=$TBLGEN \
+ -DLLVM_ENABLE_ZLIB=ON \
+ -DLLVM_ENABLE_TERMINFO=OFF \
+ -DLLVM_ENABLE_THREADS=OFF \
+ $LLVM_SRC
+fi
+cd ${LLVM_BUILD}
+ninja LLVMSymbolize LLVMObject LLVMBinaryFormat LLVMDebugInfoDWARF LLVMSupport LLVMDebugInfoPDB LLVMMC LLVMDemangle
+
+cd ${BUILD_DIR}
+rm -rf ${SYMBOLIZER_BUILD}
+mkdir ${SYMBOLIZER_BUILD}
+cd ${SYMBOLIZER_BUILD}
+
+echo "Compiling..."
+SYMBOLIZER_FLAGS="$LLVM_FLAGS -I${LLVM_SRC}/include -I${LLVM_BUILD}/include -std=c++11"
+$CXX $SYMBOLIZER_FLAGS ${SRC_DIR}/sanitizer_symbolize.cc ${SRC_DIR}/sanitizer_wrappers.cc -c
+$AR rc symbolizer.a sanitizer_symbolize.o sanitizer_wrappers.o
+
+SYMBOLIZER_API_LIST=__sanitizer_symbolize_code,__sanitizer_symbolize_data,__sanitizer_symbolize_flush,__sanitizer_symbolize_demangle
+
+# Merge all the object files together and copy the resulting library back.
+$SCRIPT_DIR/ar_to_bc.sh $LIBCXX_BUILD/lib/libc++.a \
+ $LIBCXX_BUILD/lib/libc++abi.a \
+ $LLVM_BUILD/lib/libLLVMSymbolize.a \
+ $LLVM_BUILD/lib/libLLVMObject.a \
+ $LLVM_BUILD/lib/libLLVMBinaryFormat.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoDWARF.a \
+ $LLVM_BUILD/lib/libLLVMSupport.a \
+ $LLVM_BUILD/lib/libLLVMDebugInfoPDB.a \
+ $LLVM_BUILD/lib/libLLVMDemangle.a \
+ $LLVM_BUILD/lib/libLLVMMC.a \
+ $ZLIB_BUILD/libz.a \
+ symbolizer.a \
+ all.bc
+
+echo "Optimizing..."
+$OPT -internalize -internalize-public-api-list=${SYMBOLIZER_API_LIST} all.bc -o opt.bc
+$CC $FLAGS -fno-lto -c opt.bc -o symbolizer.o
+
+echo "Checking undefined symbols..."
+nm -f posix -g symbolizer.o | cut -f 1,2 -d \ | LC_COLLATE=C sort -u > undefined.new
+(diff -u $SCRIPT_DIR/global_symbols.txt undefined.new | grep -E "^\+[^+]") && \
+ (echo "Failed: unexpected symbols"; exit 1)
+
+arch() {
+ objdump -f $1 | grep -m1 -Po "(?<=file format ).*$"
+}
+
+SYMBOLIZER_FORMAT=$(arch symbolizer.o)
+echo "Injecting $SYMBOLIZER_FORMAT symbolizer..."
+for A in $TARGE_DIR/libclang_rt.*san*.a; do
+ A_FORMAT=$(arch $A)
+ if [[ "$A_FORMAT" != "$SYMBOLIZER_FORMAT" ]] ; then
+ continue
+ fi
+ (nm -u $A 2>/dev/null | grep -E "__sanitizer_symbolize_code" >/dev/null) || continue
+ echo "$A"
+ $AR rcs $A symbolizer.o
+done
+
+echo "Success!"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/symbolizer/scripts/build_symbolizer.sh
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:executable
## -0,0 +1 ##
+*
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/weak_symbols.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/weak_symbols.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/weak_symbols.txt (revision 351984)
@@ -0,0 +1,8 @@
+___sanitizer_free_hook
+___sanitizer_malloc_hook
+___sanitizer_report_error_summary
+___sanitizer_sandbox_on_notify
+___sanitizer_symbolize_code
+___sanitizer_symbolize_data
+___sanitizer_symbolize_demangle
+___sanitizer_symbolize_flush
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/sanitizer_common/weak_symbols.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cpu_model.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cpu_model.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cpu_model.c (revision 351984)
@@ -0,0 +1,689 @@
+//===-- cpu_model.c - Support for __cpu_model builtin ------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is based on LLVM's lib/Support/Host.cpp.
+// It implements the operating system Host concept and builtin
+// __cpu_model for the compiler_rt library, for x86 only.
+//
+//===----------------------------------------------------------------------===//
+
+#if (defined(__i386__) || defined(_M_IX86) || defined(__x86_64__) || \
+ defined(_M_X64)) && \
+ (defined(__GNUC__) || defined(__clang__) || defined(_MSC_VER))
+
+#include <assert.h>
+
+#define bool int
+#define true 1
+#define false 0
+
+#ifdef _MSC_VER
+#include <intrin.h>
+#endif
+
+#ifndef __has_attribute
+#define __has_attribute(attr) 0
+#endif
+
+enum VendorSignatures {
+ SIG_INTEL = 0x756e6547, // Genu
+ SIG_AMD = 0x68747541, // Auth
+};
+
+enum ProcessorVendors {
+ VENDOR_INTEL = 1,
+ VENDOR_AMD,
+ VENDOR_OTHER,
+ VENDOR_MAX
+};
+
+enum ProcessorTypes {
+ INTEL_BONNELL = 1,
+ INTEL_CORE2,
+ INTEL_COREI7,
+ AMDFAM10H,
+ AMDFAM15H,
+ INTEL_SILVERMONT,
+ INTEL_KNL,
+ AMD_BTVER1,
+ AMD_BTVER2,
+ AMDFAM17H,
+ INTEL_KNM,
+ INTEL_GOLDMONT,
+ INTEL_GOLDMONT_PLUS,
+ INTEL_TREMONT,
+ CPU_TYPE_MAX
+};
+
+enum ProcessorSubtypes {
+ INTEL_COREI7_NEHALEM = 1,
+ INTEL_COREI7_WESTMERE,
+ INTEL_COREI7_SANDYBRIDGE,
+ AMDFAM10H_BARCELONA,
+ AMDFAM10H_SHANGHAI,
+ AMDFAM10H_ISTANBUL,
+ AMDFAM15H_BDVER1,
+ AMDFAM15H_BDVER2,
+ AMDFAM15H_BDVER3,
+ AMDFAM15H_BDVER4,
+ AMDFAM17H_ZNVER1,
+ INTEL_COREI7_IVYBRIDGE,
+ INTEL_COREI7_HASWELL,
+ INTEL_COREI7_BROADWELL,
+ INTEL_COREI7_SKYLAKE,
+ INTEL_COREI7_SKYLAKE_AVX512,
+ INTEL_COREI7_CANNONLAKE,
+ INTEL_COREI7_ICELAKE_CLIENT,
+ INTEL_COREI7_ICELAKE_SERVER,
+ AMDFAM17H_ZNVER2,
+ INTEL_COREI7_CASCADELAKE,
+ CPU_SUBTYPE_MAX
+};
+
+enum ProcessorFeatures {
+ FEATURE_CMOV = 0,
+ FEATURE_MMX,
+ FEATURE_POPCNT,
+ FEATURE_SSE,
+ FEATURE_SSE2,
+ FEATURE_SSE3,
+ FEATURE_SSSE3,
+ FEATURE_SSE4_1,
+ FEATURE_SSE4_2,
+ FEATURE_AVX,
+ FEATURE_AVX2,
+ FEATURE_SSE4_A,
+ FEATURE_FMA4,
+ FEATURE_XOP,
+ FEATURE_FMA,
+ FEATURE_AVX512F,
+ FEATURE_BMI,
+ FEATURE_BMI2,
+ FEATURE_AES,
+ FEATURE_PCLMUL,
+ FEATURE_AVX512VL,
+ FEATURE_AVX512BW,
+ FEATURE_AVX512DQ,
+ FEATURE_AVX512CD,
+ FEATURE_AVX512ER,
+ FEATURE_AVX512PF,
+ FEATURE_AVX512VBMI,
+ FEATURE_AVX512IFMA,
+ FEATURE_AVX5124VNNIW,
+ FEATURE_AVX5124FMAPS,
+ FEATURE_AVX512VPOPCNTDQ,
+ FEATURE_AVX512VBMI2,
+ FEATURE_GFNI,
+ FEATURE_VPCLMULQDQ,
+ FEATURE_AVX512VNNI,
+ FEATURE_AVX512BITALG
+};
+
+// The check below for i386 was copied from clang's cpuid.h (__get_cpuid_max).
+// Check motivated by bug reports for OpenSSL crashing on CPUs without CPUID
+// support. Consequently, for i386, the presence of CPUID is checked first
+// via the corresponding eflags bit.
+static bool isCpuIdSupported() {
+#if defined(__GNUC__) || defined(__clang__)
+#if defined(__i386__)
+ int __cpuid_supported;
+ __asm__(" pushfl\n"
+ " popl %%eax\n"
+ " movl %%eax,%%ecx\n"
+ " xorl $0x00200000,%%eax\n"
+ " pushl %%eax\n"
+ " popfl\n"
+ " pushfl\n"
+ " popl %%eax\n"
+ " movl $0,%0\n"
+ " cmpl %%eax,%%ecx\n"
+ " je 1f\n"
+ " movl $1,%0\n"
+ "1:"
+ : "=r"(__cpuid_supported)
+ :
+ : "eax", "ecx");
+ if (!__cpuid_supported)
+ return false;
+#endif
+ return true;
+#endif
+ return true;
+}
+
+// This code is copied from lib/Support/Host.cpp.
+// Changes to either file should be mirrored in the other.
+
+/// getX86CpuIDAndInfo - Execute the specified cpuid and return the 4 values in
+/// the specified arguments. If we can't run cpuid on the host, return true.
+static bool getX86CpuIDAndInfo(unsigned value, unsigned *rEAX, unsigned *rEBX,
+ unsigned *rECX, unsigned *rEDX) {
+#if defined(__GNUC__) || defined(__clang__)
+#if defined(__x86_64__)
+ // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
+ // FIXME: should we save this for Clang?
+ __asm__("movq\t%%rbx, %%rsi\n\t"
+ "cpuid\n\t"
+ "xchgq\t%%rbx, %%rsi\n\t"
+ : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
+ : "a"(value));
+ return false;
+#elif defined(__i386__)
+ __asm__("movl\t%%ebx, %%esi\n\t"
+ "cpuid\n\t"
+ "xchgl\t%%ebx, %%esi\n\t"
+ : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
+ : "a"(value));
+ return false;
+#else
+ return true;
+#endif
+#elif defined(_MSC_VER)
+ // The MSVC intrinsic is portable across x86 and x64.
+ int registers[4];
+ __cpuid(registers, value);
+ *rEAX = registers[0];
+ *rEBX = registers[1];
+ *rECX = registers[2];
+ *rEDX = registers[3];
+ return false;
+#else
+ return true;
+#endif
+}
+
+/// getX86CpuIDAndInfoEx - Execute the specified cpuid with subleaf and return
+/// the 4 values in the specified arguments. If we can't run cpuid on the host,
+/// return true.
+static bool getX86CpuIDAndInfoEx(unsigned value, unsigned subleaf,
+ unsigned *rEAX, unsigned *rEBX, unsigned *rECX,
+ unsigned *rEDX) {
+#if defined(__GNUC__) || defined(__clang__)
+#if defined(__x86_64__)
+ // gcc doesn't know cpuid would clobber ebx/rbx. Preserve it manually.
+ // FIXME: should we save this for Clang?
+ __asm__("movq\t%%rbx, %%rsi\n\t"
+ "cpuid\n\t"
+ "xchgq\t%%rbx, %%rsi\n\t"
+ : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
+ : "a"(value), "c"(subleaf));
+ return false;
+#elif defined(__i386__)
+ __asm__("movl\t%%ebx, %%esi\n\t"
+ "cpuid\n\t"
+ "xchgl\t%%ebx, %%esi\n\t"
+ : "=a"(*rEAX), "=S"(*rEBX), "=c"(*rECX), "=d"(*rEDX)
+ : "a"(value), "c"(subleaf));
+ return false;
+#else
+ return true;
+#endif
+#elif defined(_MSC_VER)
+ int registers[4];
+ __cpuidex(registers, value, subleaf);
+ *rEAX = registers[0];
+ *rEBX = registers[1];
+ *rECX = registers[2];
+ *rEDX = registers[3];
+ return false;
+#else
+ return true;
+#endif
+}
+
+// Read control register 0 (XCR0). Used to detect features such as AVX.
+static bool getX86XCR0(unsigned *rEAX, unsigned *rEDX) {
+#if defined(__GNUC__) || defined(__clang__)
+ // Check xgetbv; this uses a .byte sequence instead of the instruction
+ // directly because older assemblers do not include support for xgetbv and
+ // there is no easy way to conditionally compile based on the assembler used.
+ __asm__(".byte 0x0f, 0x01, 0xd0" : "=a"(*rEAX), "=d"(*rEDX) : "c"(0));
+ return false;
+#elif defined(_MSC_FULL_VER) && defined(_XCR_XFEATURE_ENABLED_MASK)
+ unsigned long long Result = _xgetbv(_XCR_XFEATURE_ENABLED_MASK);
+ *rEAX = Result;
+ *rEDX = Result >> 32;
+ return false;
+#else
+ return true;
+#endif
+}
+
+static void detectX86FamilyModel(unsigned EAX, unsigned *Family,
+ unsigned *Model) {
+ *Family = (EAX >> 8) & 0xf; // Bits 8 - 11
+ *Model = (EAX >> 4) & 0xf; // Bits 4 - 7
+ if (*Family == 6 || *Family == 0xf) {
+ if (*Family == 0xf)
+ // Examine extended family ID if family ID is F.
+ *Family += (EAX >> 20) & 0xff; // Bits 20 - 27
+ // Examine extended model ID if family ID is 6 or F.
+ *Model += ((EAX >> 16) & 0xf) << 4; // Bits 16 - 19
+ }
+}
+
+static void getIntelProcessorTypeAndSubtype(unsigned Family, unsigned Model,
+ unsigned Brand_id,
+ unsigned Features,
+ unsigned Features2, unsigned *Type,
+ unsigned *Subtype) {
+ if (Brand_id != 0)
+ return;
+ switch (Family) {
+ case 6:
+ switch (Model) {
+ case 0x0f: // Intel Core 2 Duo processor, Intel Core 2 Duo mobile
+ // processor, Intel Core 2 Quad processor, Intel Core 2 Quad
+ // mobile processor, Intel Core 2 Extreme processor, Intel
+ // Pentium Dual-Core processor, Intel Xeon processor, model
+ // 0Fh. All processors are manufactured using the 65 nm process.
+ case 0x16: // Intel Celeron processor model 16h. All processors are
+ // manufactured using the 65 nm process
+ case 0x17: // Intel Core 2 Extreme processor, Intel Xeon processor, model
+ // 17h. All processors are manufactured using the 45 nm process.
+ //
+ // 45nm: Penryn , Wolfdale, Yorkfield (XE)
+ case 0x1d: // Intel Xeon processor MP. All processors are manufactured using
+ // the 45 nm process.
+ *Type = INTEL_CORE2; // "penryn"
+ break;
+ case 0x1a: // Intel Core i7 processor and Intel Xeon processor. All
+ // processors are manufactured using the 45 nm process.
+ case 0x1e: // Intel(R) Core(TM) i7 CPU 870 @ 2.93GHz.
+ // As found in a Summer 2010 model iMac.
+ case 0x1f:
+ case 0x2e: // Nehalem EX
+ *Type = INTEL_COREI7; // "nehalem"
+ *Subtype = INTEL_COREI7_NEHALEM;
+ break;
+ case 0x25: // Intel Core i7, laptop version.
+ case 0x2c: // Intel Core i7 processor and Intel Xeon processor. All
+ // processors are manufactured using the 32 nm process.
+ case 0x2f: // Westmere EX
+ *Type = INTEL_COREI7; // "westmere"
+ *Subtype = INTEL_COREI7_WESTMERE;
+ break;
+ case 0x2a: // Intel Core i7 processor. All processors are manufactured
+ // using the 32 nm process.
+ case 0x2d:
+ *Type = INTEL_COREI7; //"sandybridge"
+ *Subtype = INTEL_COREI7_SANDYBRIDGE;
+ break;
+ case 0x3a:
+ case 0x3e: // Ivy Bridge EP
+ *Type = INTEL_COREI7; // "ivybridge"
+ *Subtype = INTEL_COREI7_IVYBRIDGE;
+ break;
+
+ // Haswell:
+ case 0x3c:
+ case 0x3f:
+ case 0x45:
+ case 0x46:
+ *Type = INTEL_COREI7; // "haswell"
+ *Subtype = INTEL_COREI7_HASWELL;
+ break;
+
+ // Broadwell:
+ case 0x3d:
+ case 0x47:
+ case 0x4f:
+ case 0x56:
+ *Type = INTEL_COREI7; // "broadwell"
+ *Subtype = INTEL_COREI7_BROADWELL;
+ break;
+
+ // Skylake:
+ case 0x4e: // Skylake mobile
+ case 0x5e: // Skylake desktop
+ case 0x8e: // Kaby Lake mobile
+ case 0x9e: // Kaby Lake desktop
+ *Type = INTEL_COREI7; // "skylake"
+ *Subtype = INTEL_COREI7_SKYLAKE;
+ break;
+
+ // Skylake Xeon:
+ case 0x55:
+ *Type = INTEL_COREI7;
+ if (Features2 & (1 << (FEATURE_AVX512VNNI - 32)))
+ *Subtype = INTEL_COREI7_CASCADELAKE; // "cascadelake"
+ else
+ *Subtype = INTEL_COREI7_SKYLAKE_AVX512; // "skylake-avx512"
+ break;
+
+ // Cannonlake:
+ case 0x66:
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_CANNONLAKE; // "cannonlake"
+ break;
+
+ // Icelake:
+ case 0x7d:
+ case 0x7e:
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_ICELAKE_CLIENT; // "icelake-client"
+ break;
+
+ // Icelake Xeon:
+ case 0x6a:
+ case 0x6c:
+ *Type = INTEL_COREI7;
+ *Subtype = INTEL_COREI7_ICELAKE_SERVER; // "icelake-server"
+ break;
+
+ case 0x1c: // Most 45 nm Intel Atom processors
+ case 0x26: // 45 nm Atom Lincroft
+ case 0x27: // 32 nm Atom Medfield
+ case 0x35: // 32 nm Atom Midview
+ case 0x36: // 32 nm Atom Midview
+ *Type = INTEL_BONNELL;
+ break; // "bonnell"
+
+ // Atom Silvermont codes from the Intel software optimization guide.
+ case 0x37:
+ case 0x4a:
+ case 0x4d:
+ case 0x5a:
+ case 0x5d:
+ case 0x4c: // really airmont
+ *Type = INTEL_SILVERMONT;
+ break; // "silvermont"
+ // Goldmont:
+ case 0x5c: // Apollo Lake
+ case 0x5f: // Denverton
+ *Type = INTEL_GOLDMONT;
+ break; // "goldmont"
+ case 0x7a:
+ *Type = INTEL_GOLDMONT_PLUS;
+ break;
+ case 0x86:
+ *Type = INTEL_TREMONT;
+ break;
+
+ case 0x57:
+ *Type = INTEL_KNL; // knl
+ break;
+
+ case 0x85:
+ *Type = INTEL_KNM; // knm
+ break;
+
+ default: // Unknown family 6 CPU.
+ break;
+ break;
+ }
+ default:
+ break; // Unknown.
+ }
+}
+
+static void getAMDProcessorTypeAndSubtype(unsigned Family, unsigned Model,
+ unsigned Features, unsigned Features2,
+ unsigned *Type, unsigned *Subtype) {
+ // FIXME: this poorly matches the generated SubtargetFeatureKV table. There
+ // appears to be no way to generate the wide variety of AMD-specific targets
+ // from the information returned from CPUID.
+ switch (Family) {
+ case 16:
+ *Type = AMDFAM10H; // "amdfam10"
+ switch (Model) {
+ case 2:
+ *Subtype = AMDFAM10H_BARCELONA;
+ break;
+ case 4:
+ *Subtype = AMDFAM10H_SHANGHAI;
+ break;
+ case 8:
+ *Subtype = AMDFAM10H_ISTANBUL;
+ break;
+ }
+ break;
+ case 20:
+ *Type = AMD_BTVER1;
+ break; // "btver1";
+ case 21:
+ *Type = AMDFAM15H;
+ if (Model >= 0x60 && Model <= 0x7f) {
+ *Subtype = AMDFAM15H_BDVER4;
+ break; // "bdver4"; 60h-7Fh: Excavator
+ }
+ if (Model >= 0x30 && Model <= 0x3f) {
+ *Subtype = AMDFAM15H_BDVER3;
+ break; // "bdver3"; 30h-3Fh: Steamroller
+ }
+ if ((Model >= 0x10 && Model <= 0x1f) || Model == 0x02) {
+ *Subtype = AMDFAM15H_BDVER2;
+ break; // "bdver2"; 02h, 10h-1Fh: Piledriver
+ }
+ if (Model <= 0x0f) {
+ *Subtype = AMDFAM15H_BDVER1;
+ break; // "bdver1"; 00h-0Fh: Bulldozer
+ }
+ break;
+ case 22:
+ *Type = AMD_BTVER2;
+ break; // "btver2"
+ case 23:
+ *Type = AMDFAM17H;
+ if (Model >= 0x30 && Model <= 0x3f) {
+ *Subtype = AMDFAM17H_ZNVER2;
+ break; // "znver2"; 30h-3fh: Zen2
+ }
+ if (Model <= 0x0f) {
+ *Subtype = AMDFAM17H_ZNVER1;
+ break; // "znver1"; 00h-0Fh: Zen1
+ }
+ break;
+ default:
+ break; // "generic"
+ }
+}
+
+static void getAvailableFeatures(unsigned ECX, unsigned EDX, unsigned MaxLeaf,
+ unsigned *FeaturesOut,
+ unsigned *Features2Out) {
+ unsigned Features = 0;
+ unsigned Features2 = 0;
+ unsigned EAX, EBX;
+
+#define setFeature(F) \
+ do { \
+ if (F < 32) \
+ Features |= 1U << (F & 0x1f); \
+ else if (F < 64) \
+ Features2 |= 1U << ((F - 32) & 0x1f); \
+ } while (0)
+
+ if ((EDX >> 15) & 1)
+ setFeature(FEATURE_CMOV);
+ if ((EDX >> 23) & 1)
+ setFeature(FEATURE_MMX);
+ if ((EDX >> 25) & 1)
+ setFeature(FEATURE_SSE);
+ if ((EDX >> 26) & 1)
+ setFeature(FEATURE_SSE2);
+
+ if ((ECX >> 0) & 1)
+ setFeature(FEATURE_SSE3);
+ if ((ECX >> 1) & 1)
+ setFeature(FEATURE_PCLMUL);
+ if ((ECX >> 9) & 1)
+ setFeature(FEATURE_SSSE3);
+ if ((ECX >> 12) & 1)
+ setFeature(FEATURE_FMA);
+ if ((ECX >> 19) & 1)
+ setFeature(FEATURE_SSE4_1);
+ if ((ECX >> 20) & 1)
+ setFeature(FEATURE_SSE4_2);
+ if ((ECX >> 23) & 1)
+ setFeature(FEATURE_POPCNT);
+ if ((ECX >> 25) & 1)
+ setFeature(FEATURE_AES);
+
+ // If CPUID indicates support for XSAVE, XRESTORE and AVX, and XGETBV
+ // indicates that the AVX registers will be saved and restored on context
+ // switch, then we have full AVX support.
+ const unsigned AVXBits = (1 << 27) | (1 << 28);
+ bool HasAVX = ((ECX & AVXBits) == AVXBits) && !getX86XCR0(&EAX, &EDX) &&
+ ((EAX & 0x6) == 0x6);
+ bool HasAVX512Save = HasAVX && ((EAX & 0xe0) == 0xe0);
+
+ if (HasAVX)
+ setFeature(FEATURE_AVX);
+
+ bool HasLeaf7 =
+ MaxLeaf >= 0x7 && !getX86CpuIDAndInfoEx(0x7, 0x0, &EAX, &EBX, &ECX, &EDX);
+
+ if (HasLeaf7 && ((EBX >> 3) & 1))
+ setFeature(FEATURE_BMI);
+ if (HasLeaf7 && ((EBX >> 5) & 1) && HasAVX)
+ setFeature(FEATURE_AVX2);
+ if (HasLeaf7 && ((EBX >> 8) & 1))
+ setFeature(FEATURE_BMI2);
+ if (HasLeaf7 && ((EBX >> 16) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512F);
+ if (HasLeaf7 && ((EBX >> 17) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512DQ);
+ if (HasLeaf7 && ((EBX >> 21) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512IFMA);
+ if (HasLeaf7 && ((EBX >> 26) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512PF);
+ if (HasLeaf7 && ((EBX >> 27) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512ER);
+ if (HasLeaf7 && ((EBX >> 28) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512CD);
+ if (HasLeaf7 && ((EBX >> 30) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512BW);
+ if (HasLeaf7 && ((EBX >> 31) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512VL);
+
+ if (HasLeaf7 && ((ECX >> 1) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512VBMI);
+ if (HasLeaf7 && ((ECX >> 6) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512VBMI2);
+ if (HasLeaf7 && ((ECX >> 8) & 1))
+ setFeature(FEATURE_GFNI);
+ if (HasLeaf7 && ((ECX >> 10) & 1) && HasAVX)
+ setFeature(FEATURE_VPCLMULQDQ);
+ if (HasLeaf7 && ((ECX >> 11) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512VNNI);
+ if (HasLeaf7 && ((ECX >> 12) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512BITALG);
+ if (HasLeaf7 && ((ECX >> 14) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX512VPOPCNTDQ);
+
+ if (HasLeaf7 && ((EDX >> 2) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX5124VNNIW);
+ if (HasLeaf7 && ((EDX >> 3) & 1) && HasAVX512Save)
+ setFeature(FEATURE_AVX5124FMAPS);
+
+ unsigned MaxExtLevel;
+ getX86CpuIDAndInfo(0x80000000, &MaxExtLevel, &EBX, &ECX, &EDX);
+
+ bool HasExtLeaf1 = MaxExtLevel >= 0x80000001 &&
+ !getX86CpuIDAndInfo(0x80000001, &EAX, &EBX, &ECX, &EDX);
+ if (HasExtLeaf1 && ((ECX >> 6) & 1))
+ setFeature(FEATURE_SSE4_A);
+ if (HasExtLeaf1 && ((ECX >> 11) & 1))
+ setFeature(FEATURE_XOP);
+ if (HasExtLeaf1 && ((ECX >> 16) & 1))
+ setFeature(FEATURE_FMA4);
+
+ *FeaturesOut = Features;
+ *Features2Out = Features2;
+#undef setFeature
+}
+
+#if defined(HAVE_INIT_PRIORITY)
+#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__ 101))
+#elif __has_attribute(__constructor__)
+#define CONSTRUCTOR_ATTRIBUTE __attribute__((__constructor__))
+#else
+// FIXME: For MSVC, we should make a function pointer global in .CRT$X?? so that
+// this runs during initialization.
+#define CONSTRUCTOR_ATTRIBUTE
+#endif
+
+#ifndef _WIN32
+__attribute__((visibility("hidden")))
+#endif
+int __cpu_indicator_init(void) CONSTRUCTOR_ATTRIBUTE;
+
+#ifndef _WIN32
+__attribute__((visibility("hidden")))
+#endif
+struct __processor_model {
+ unsigned int __cpu_vendor;
+ unsigned int __cpu_type;
+ unsigned int __cpu_subtype;
+ unsigned int __cpu_features[1];
+} __cpu_model = {0, 0, 0, {0}};
+
+#ifndef _WIN32
+__attribute__((visibility("hidden")))
+#endif
+unsigned int __cpu_features2;
+
+// A constructor function that is sets __cpu_model and __cpu_features2 with
+// the right values. This needs to run only once. This constructor is
+// given the highest priority and it should run before constructors without
+// the priority set. However, it still runs after ifunc initializers and
+// needs to be called explicitly there.
+
+int CONSTRUCTOR_ATTRIBUTE __cpu_indicator_init(void) {
+ unsigned EAX, EBX, ECX, EDX;
+ unsigned MaxLeaf = 5;
+ unsigned Vendor;
+ unsigned Model, Family, Brand_id;
+ unsigned Features = 0;
+ unsigned Features2 = 0;
+
+ // This function needs to run just once.
+ if (__cpu_model.__cpu_vendor)
+ return 0;
+
+ if (!isCpuIdSupported())
+ return -1;
+
+ // Assume cpuid insn present. Run in level 0 to get vendor id.
+ if (getX86CpuIDAndInfo(0, &MaxLeaf, &Vendor, &ECX, &EDX) || MaxLeaf < 1) {
+ __cpu_model.__cpu_vendor = VENDOR_OTHER;
+ return -1;
+ }
+ getX86CpuIDAndInfo(1, &EAX, &EBX, &ECX, &EDX);
+ detectX86FamilyModel(EAX, &Family, &Model);
+ Brand_id = EBX & 0xff;
+
+ // Find available features.
+ getAvailableFeatures(ECX, EDX, MaxLeaf, &Features, &Features2);
+ __cpu_model.__cpu_features[0] = Features;
+ __cpu_features2 = Features2;
+
+ if (Vendor == SIG_INTEL) {
+ // Get CPU type.
+ getIntelProcessorTypeAndSubtype(Family, Model, Brand_id, Features,
+ Features2, &(__cpu_model.__cpu_type),
+ &(__cpu_model.__cpu_subtype));
+ __cpu_model.__cpu_vendor = VENDOR_INTEL;
+ } else if (Vendor == SIG_AMD) {
+ // Get CPU type.
+ getAMDProcessorTypeAndSubtype(Family, Model, Features, Features2,
+ &(__cpu_model.__cpu_type),
+ &(__cpu_model.__cpu_subtype));
+ __cpu_model.__cpu_vendor = VENDOR_AMD;
+ } else
+ __cpu_model.__cpu_vendor = VENDOR_OTHER;
+
+ assert(__cpu_model.__cpu_vendor < VENDOR_MAX);
+ assert(__cpu_model.__cpu_type < CPU_TYPE_MAX);
+ assert(__cpu_model.__cpu_subtype < CPU_SUBTYPE_MAX);
+
+ return 0;
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cpu_model.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/aarch64/chkstk.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/aarch64/chkstk.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/aarch64/chkstk.S (revision 351984)
@@ -0,0 +1,35 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// __chkstk routine
+// This routine is windows specific.
+// http://msdn.microsoft.com/en-us/library/ms648426.aspx
+
+// This clobbers registers x16 and x17.
+// Does not modify any memory or the stack pointer.
+
+// mov x15, #256 // Number of bytes of stack, in units of 16 byte
+// bl __chkstk
+// sub sp, sp, x15, lsl #4
+
+#ifdef __aarch64__
+
+#define PAGE_SIZE 4096
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__chkstk)
+ lsl x16, x15, #4
+ mov x17, sp
+1:
+ sub x17, x17, #PAGE_SIZE
+ subs x16, x16, #PAGE_SIZE
+ ldr xzr, [x17]
+ b.gt 1b
+
+ ret
+END_COMPILERRT_FUNCTION(__chkstk)
+
+#endif // __aarch64__
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/aarch64/chkstk.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvdi2.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- absvdi2.c - Implement __absvdi2 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __absvdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: absolute value
+
+// Effects: aborts if abs(x) < 0
+
+COMPILER_RT_ABI di_int __absvdi2(di_int a) {
+ const int N = (int)(sizeof(di_int) * CHAR_BIT);
+ if (a == ((di_int)1 << (N - 1)))
+ compilerrt_abort();
+ const di_int t = a >> (N - 1);
+ return (a ^ t) - t;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvsi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvsi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvsi2.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- absvsi2.c - Implement __absvsi2 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __absvsi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: absolute value
+
+// Effects: aborts if abs(x) < 0
+
+COMPILER_RT_ABI si_int __absvsi2(si_int a) {
+ const int N = (int)(sizeof(si_int) * CHAR_BIT);
+ if (a == (1 << (N - 1)))
+ compilerrt_abort();
+ const si_int t = a >> (N - 1);
+ return (a ^ t) - t;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvsi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvti2.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- absvti2.c - Implement __absvdi2 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __absvti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: absolute value
+
+// Effects: aborts if abs(x) < 0
+
+COMPILER_RT_ABI ti_int __absvti2(ti_int a) {
+ const int N = (int)(sizeof(ti_int) * CHAR_BIT);
+ if (a == ((ti_int)1 << (N - 1)))
+ compilerrt_abort();
+ const ti_int s = a >> (N - 1);
+ return (a ^ s) - s;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/absvti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/adddf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/adddf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/adddf3.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- lib/adddf3.c - Double-precision addition ------------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements double-precision soft-float addition with the IEEE-754
+// default rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_add_impl.inc"
+
+COMPILER_RT_ABI double __adddf3(double a, double b) { return __addXf3__(a, b); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI double __aeabi_dadd(double a, double b) { return __adddf3(a, b); }
+#else
+COMPILER_RT_ALIAS(__adddf3, __aeabi_dadd)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/adddf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addsf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addsf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addsf3.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- lib/addsf3.c - Single-precision addition ------------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements single-precision soft-float addition with the IEEE-754
+// default rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_add_impl.inc"
+
+COMPILER_RT_ABI float __addsf3(float a, float b) { return __addXf3__(a, b); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI float __aeabi_fadd(float a, float b) { return __addsf3(a, b); }
+#else
+COMPILER_RT_ALIAS(__addsf3, __aeabi_fadd)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addsf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addtf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addtf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addtf3.c (revision 351984)
@@ -0,0 +1,24 @@
+//===-- lib/addtf3.c - Quad-precision addition --------------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements quad-precision soft-float addition with the IEEE-754
+// default rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#include "fp_add_impl.inc"
+
+COMPILER_RT_ABI long double __addtf3(long double a, long double b) {
+ return __addXf3__(a, b);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addtf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvdi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvdi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvdi3.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- addvdi3.c - Implement __addvdi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __addvdi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a + b
+
+// Effects: aborts if a + b overflows
+
+COMPILER_RT_ABI di_int __addvdi3(di_int a, di_int b) {
+ di_int s = (du_int)a + (du_int)b;
+ if (b >= 0) {
+ if (s < a)
+ compilerrt_abort();
+ } else {
+ if (s >= a)
+ compilerrt_abort();
+ }
+ return s;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvdi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvsi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvsi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvsi3.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- addvsi3.c - Implement __addvsi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __addvsi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a + b
+
+// Effects: aborts if a + b overflows
+
+COMPILER_RT_ABI si_int __addvsi3(si_int a, si_int b) {
+ si_int s = (su_int)a + (su_int)b;
+ if (b >= 0) {
+ if (s < a)
+ compilerrt_abort();
+ } else {
+ if (s >= a)
+ compilerrt_abort();
+ }
+ return s;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvsi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvti3.c (revision 351984)
@@ -0,0 +1,33 @@
+//===-- addvti3.c - Implement __addvti3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __addvti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a + b
+
+// Effects: aborts if a + b overflows
+
+COMPILER_RT_ABI ti_int __addvti3(ti_int a, ti_int b) {
+ ti_int s = (tu_int)a + (tu_int)b;
+ if (b >= 0) {
+ if (s < a)
+ compilerrt_abort();
+ } else {
+ if (s >= a)
+ compilerrt_abort();
+ }
+ return s;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/addvti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/apple_versioning.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/apple_versioning.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/apple_versioning.c (revision 351984)
@@ -0,0 +1,339 @@
+//===-- apple_versioning.c - Adds versioning symbols for ld ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if __APPLE__
+#include <Availability.h>
+
+#if __IPHONE_OS_VERSION_MIN_REQUIRED
+#define NOT_HERE_BEFORE_10_6(sym)
+#define NOT_HERE_IN_10_8_AND_EARLIER(sym) \
+ extern const char sym##_tmp61 __asm("$ld$hide$os6.1$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp61 = 0; \
+ extern const char sym##_tmp60 __asm("$ld$hide$os6.0$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp60 = 0; \
+ extern const char sym##_tmp51 __asm("$ld$hide$os5.1$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp51 = 0; \
+ extern const char sym##_tmp50 __asm("$ld$hide$os5.0$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp50 = 0;
+#else
+#define NOT_HERE_BEFORE_10_6(sym) \
+ extern const char sym##_tmp4 __asm("$ld$hide$os10.4$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp4 = 0; \
+ extern const char sym##_tmp5 __asm("$ld$hide$os10.5$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp5 = 0;
+#define NOT_HERE_IN_10_8_AND_EARLIER(sym) \
+ extern const char sym##_tmp8 __asm("$ld$hide$os10.8$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp8 = 0; \
+ extern const char sym##_tmp7 __asm("$ld$hide$os10.7$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp7 = 0; \
+ extern const char sym##_tmp6 __asm("$ld$hide$os10.6$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp6 = 0;
+#endif
+
+// Symbols in libSystem.dylib in 10.6 and later,
+// but are in libgcc_s.dylib in earlier versions
+
+NOT_HERE_BEFORE_10_6(__absvdi2)
+NOT_HERE_BEFORE_10_6(__absvsi2)
+NOT_HERE_BEFORE_10_6(__absvti2)
+NOT_HERE_BEFORE_10_6(__addvdi3)
+NOT_HERE_BEFORE_10_6(__addvsi3)
+NOT_HERE_BEFORE_10_6(__addvti3)
+NOT_HERE_BEFORE_10_6(__ashldi3)
+NOT_HERE_BEFORE_10_6(__ashlti3)
+NOT_HERE_BEFORE_10_6(__ashrdi3)
+NOT_HERE_BEFORE_10_6(__ashrti3)
+NOT_HERE_BEFORE_10_6(__clear_cache)
+NOT_HERE_BEFORE_10_6(__clzdi2)
+NOT_HERE_BEFORE_10_6(__clzsi2)
+NOT_HERE_BEFORE_10_6(__clzti2)
+NOT_HERE_BEFORE_10_6(__cmpdi2)
+NOT_HERE_BEFORE_10_6(__cmpti2)
+NOT_HERE_BEFORE_10_6(__ctzdi2)
+NOT_HERE_BEFORE_10_6(__ctzsi2)
+NOT_HERE_BEFORE_10_6(__ctzti2)
+NOT_HERE_BEFORE_10_6(__divdc3)
+NOT_HERE_BEFORE_10_6(__divdi3)
+NOT_HERE_BEFORE_10_6(__divsc3)
+NOT_HERE_BEFORE_10_6(__divtc3)
+NOT_HERE_BEFORE_10_6(__divti3)
+NOT_HERE_BEFORE_10_6(__divxc3)
+NOT_HERE_BEFORE_10_6(__enable_execute_stack)
+NOT_HERE_BEFORE_10_6(__ffsdi2)
+NOT_HERE_BEFORE_10_6(__ffsti2)
+NOT_HERE_BEFORE_10_6(__fixdfdi)
+NOT_HERE_BEFORE_10_6(__fixdfti)
+NOT_HERE_BEFORE_10_6(__fixsfdi)
+NOT_HERE_BEFORE_10_6(__fixsfti)
+NOT_HERE_BEFORE_10_6(__fixtfdi)
+NOT_HERE_BEFORE_10_6(__fixunsdfdi)
+NOT_HERE_BEFORE_10_6(__fixunsdfsi)
+NOT_HERE_BEFORE_10_6(__fixunsdfti)
+NOT_HERE_BEFORE_10_6(__fixunssfdi)
+NOT_HERE_BEFORE_10_6(__fixunssfsi)
+NOT_HERE_BEFORE_10_6(__fixunssfti)
+NOT_HERE_BEFORE_10_6(__fixunstfdi)
+NOT_HERE_BEFORE_10_6(__fixunsxfdi)
+NOT_HERE_BEFORE_10_6(__fixunsxfsi)
+NOT_HERE_BEFORE_10_6(__fixunsxfti)
+NOT_HERE_BEFORE_10_6(__fixxfdi)
+NOT_HERE_BEFORE_10_6(__fixxfti)
+NOT_HERE_BEFORE_10_6(__floatdidf)
+NOT_HERE_BEFORE_10_6(__floatdisf)
+NOT_HERE_BEFORE_10_6(__floatditf)
+NOT_HERE_BEFORE_10_6(__floatdixf)
+NOT_HERE_BEFORE_10_6(__floattidf)
+NOT_HERE_BEFORE_10_6(__floattisf)
+NOT_HERE_BEFORE_10_6(__floattixf)
+NOT_HERE_BEFORE_10_6(__floatundidf)
+NOT_HERE_BEFORE_10_6(__floatundisf)
+NOT_HERE_BEFORE_10_6(__floatunditf)
+NOT_HERE_BEFORE_10_6(__floatundixf)
+NOT_HERE_BEFORE_10_6(__floatuntidf)
+NOT_HERE_BEFORE_10_6(__floatuntisf)
+NOT_HERE_BEFORE_10_6(__floatuntixf)
+NOT_HERE_BEFORE_10_6(__gcc_personality_v0)
+NOT_HERE_BEFORE_10_6(__lshrdi3)
+NOT_HERE_BEFORE_10_6(__lshrti3)
+NOT_HERE_BEFORE_10_6(__moddi3)
+NOT_HERE_BEFORE_10_6(__modti3)
+NOT_HERE_BEFORE_10_6(__muldc3)
+NOT_HERE_BEFORE_10_6(__muldi3)
+NOT_HERE_BEFORE_10_6(__mulsc3)
+NOT_HERE_BEFORE_10_6(__multc3)
+NOT_HERE_BEFORE_10_6(__multi3)
+NOT_HERE_BEFORE_10_6(__mulvdi3)
+NOT_HERE_BEFORE_10_6(__mulvsi3)
+NOT_HERE_BEFORE_10_6(__mulvti3)
+NOT_HERE_BEFORE_10_6(__mulxc3)
+NOT_HERE_BEFORE_10_6(__negdi2)
+NOT_HERE_BEFORE_10_6(__negti2)
+NOT_HERE_BEFORE_10_6(__negvdi2)
+NOT_HERE_BEFORE_10_6(__negvsi2)
+NOT_HERE_BEFORE_10_6(__negvti2)
+NOT_HERE_BEFORE_10_6(__paritydi2)
+NOT_HERE_BEFORE_10_6(__paritysi2)
+NOT_HERE_BEFORE_10_6(__parityti2)
+NOT_HERE_BEFORE_10_6(__popcountdi2)
+NOT_HERE_BEFORE_10_6(__popcountsi2)
+NOT_HERE_BEFORE_10_6(__popcountti2)
+NOT_HERE_BEFORE_10_6(__powidf2)
+NOT_HERE_BEFORE_10_6(__powisf2)
+NOT_HERE_BEFORE_10_6(__powitf2)
+NOT_HERE_BEFORE_10_6(__powixf2)
+NOT_HERE_BEFORE_10_6(__subvdi3)
+NOT_HERE_BEFORE_10_6(__subvsi3)
+NOT_HERE_BEFORE_10_6(__subvti3)
+NOT_HERE_BEFORE_10_6(__ucmpdi2)
+NOT_HERE_BEFORE_10_6(__ucmpti2)
+NOT_HERE_BEFORE_10_6(__udivdi3)
+NOT_HERE_BEFORE_10_6(__udivmoddi4)
+NOT_HERE_BEFORE_10_6(__udivmodti4)
+NOT_HERE_BEFORE_10_6(__udivti3)
+NOT_HERE_BEFORE_10_6(__umoddi3)
+NOT_HERE_BEFORE_10_6(__umodti3)
+
+#if __ppc__
+NOT_HERE_BEFORE_10_6(__gcc_qadd)
+NOT_HERE_BEFORE_10_6(__gcc_qdiv)
+NOT_HERE_BEFORE_10_6(__gcc_qmul)
+NOT_HERE_BEFORE_10_6(__gcc_qsub)
+NOT_HERE_BEFORE_10_6(__trampoline_setup)
+#endif // __ppc__
+
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange_1)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange_2)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange_4)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_compare_exchange_8)
+
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange_1)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange_2)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange_4)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_exchange_8)
+
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_add_1)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_add_2)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_add_4)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_add_8)
+
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_and_1)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_and_2)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_and_4)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_and_8)
+
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_or_1)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_or_2)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_or_4)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_or_8)
+
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_sub_1)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_sub_2)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_sub_4)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_sub_8)
+
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_xor_1)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_xor_2)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_xor_4)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_fetch_xor_8)
+
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load_1)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load_2)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load_4)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_load_8)
+
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_1)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_2)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_4)
+NOT_HERE_IN_10_8_AND_EARLIER(__atomic_store_8)
+
+#if __arm__ && __DYNAMIC__
+#define NOT_HERE_UNTIL_AFTER_4_3(sym) \
+ extern const char sym##_tmp1 __asm("$ld$hide$os3.0$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp1 = 0; \
+ extern const char sym##_tmp2 __asm("$ld$hide$os3.1$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp2 = 0; \
+ extern const char sym##_tmp3 __asm("$ld$hide$os3.2$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp3 = 0; \
+ extern const char sym##_tmp4 __asm("$ld$hide$os4.0$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp4 = 0; \
+ extern const char sym##_tmp5 __asm("$ld$hide$os4.1$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp5 = 0; \
+ extern const char sym##_tmp6 __asm("$ld$hide$os4.2$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp6 = 0; \
+ extern const char sym##_tmp7 __asm("$ld$hide$os4.3$_" #sym); \
+ __attribute__((visibility("default"))) const char sym##_tmp7 = 0;
+
+NOT_HERE_UNTIL_AFTER_4_3(__absvdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__absvsi2)
+NOT_HERE_UNTIL_AFTER_4_3(__adddf3)
+NOT_HERE_UNTIL_AFTER_4_3(__adddf3vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__addsf3)
+NOT_HERE_UNTIL_AFTER_4_3(__addsf3vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__addvdi3)
+NOT_HERE_UNTIL_AFTER_4_3(__addvsi3)
+NOT_HERE_UNTIL_AFTER_4_3(__ashldi3)
+NOT_HERE_UNTIL_AFTER_4_3(__ashrdi3)
+NOT_HERE_UNTIL_AFTER_4_3(__bswapdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__bswapsi2)
+NOT_HERE_UNTIL_AFTER_4_3(__clzdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__clzsi2)
+NOT_HERE_UNTIL_AFTER_4_3(__cmpdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__ctzdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__ctzsi2)
+NOT_HERE_UNTIL_AFTER_4_3(__divdc3)
+NOT_HERE_UNTIL_AFTER_4_3(__divdf3)
+NOT_HERE_UNTIL_AFTER_4_3(__divdf3vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__divdi3)
+NOT_HERE_UNTIL_AFTER_4_3(__divsc3)
+NOT_HERE_UNTIL_AFTER_4_3(__divsf3)
+NOT_HERE_UNTIL_AFTER_4_3(__divsf3vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__divsi3)
+NOT_HERE_UNTIL_AFTER_4_3(__eqdf2)
+NOT_HERE_UNTIL_AFTER_4_3(__eqdf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__eqsf2)
+NOT_HERE_UNTIL_AFTER_4_3(__eqsf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__extendsfdf2)
+NOT_HERE_UNTIL_AFTER_4_3(__extendsfdf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__ffsdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__fixdfdi)
+NOT_HERE_UNTIL_AFTER_4_3(__fixdfsi)
+NOT_HERE_UNTIL_AFTER_4_3(__fixdfsivfp)
+NOT_HERE_UNTIL_AFTER_4_3(__fixsfdi)
+NOT_HERE_UNTIL_AFTER_4_3(__fixsfsi)
+NOT_HERE_UNTIL_AFTER_4_3(__fixsfsivfp)
+NOT_HERE_UNTIL_AFTER_4_3(__fixunsdfdi)
+NOT_HERE_UNTIL_AFTER_4_3(__fixunsdfsi)
+NOT_HERE_UNTIL_AFTER_4_3(__fixunsdfsivfp)
+NOT_HERE_UNTIL_AFTER_4_3(__fixunssfdi)
+NOT_HERE_UNTIL_AFTER_4_3(__fixunssfsi)
+NOT_HERE_UNTIL_AFTER_4_3(__fixunssfsivfp)
+NOT_HERE_UNTIL_AFTER_4_3(__floatdidf)
+NOT_HERE_UNTIL_AFTER_4_3(__floatdisf)
+NOT_HERE_UNTIL_AFTER_4_3(__floatsidf)
+NOT_HERE_UNTIL_AFTER_4_3(__floatsidfvfp)
+NOT_HERE_UNTIL_AFTER_4_3(__floatsisf)
+NOT_HERE_UNTIL_AFTER_4_3(__floatsisfvfp)
+NOT_HERE_UNTIL_AFTER_4_3(__floatundidf)
+NOT_HERE_UNTIL_AFTER_4_3(__floatundisf)
+NOT_HERE_UNTIL_AFTER_4_3(__floatunsidf)
+NOT_HERE_UNTIL_AFTER_4_3(__floatunsisf)
+NOT_HERE_UNTIL_AFTER_4_3(__floatunssidfvfp)
+NOT_HERE_UNTIL_AFTER_4_3(__floatunssisfvfp)
+NOT_HERE_UNTIL_AFTER_4_3(__gedf2)
+NOT_HERE_UNTIL_AFTER_4_3(__gedf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__gesf2)
+NOT_HERE_UNTIL_AFTER_4_3(__gesf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__gtdf2)
+NOT_HERE_UNTIL_AFTER_4_3(__gtdf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__gtsf2)
+NOT_HERE_UNTIL_AFTER_4_3(__gtsf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__ledf2)
+NOT_HERE_UNTIL_AFTER_4_3(__ledf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__lesf2)
+NOT_HERE_UNTIL_AFTER_4_3(__lesf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__lshrdi3)
+NOT_HERE_UNTIL_AFTER_4_3(__ltdf2)
+NOT_HERE_UNTIL_AFTER_4_3(__ltdf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__ltsf2)
+NOT_HERE_UNTIL_AFTER_4_3(__ltsf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__moddi3)
+NOT_HERE_UNTIL_AFTER_4_3(__modsi3)
+NOT_HERE_UNTIL_AFTER_4_3(__muldc3)
+NOT_HERE_UNTIL_AFTER_4_3(__muldf3)
+NOT_HERE_UNTIL_AFTER_4_3(__muldf3vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__muldi3)
+NOT_HERE_UNTIL_AFTER_4_3(__mulsc3)
+NOT_HERE_UNTIL_AFTER_4_3(__mulsf3)
+NOT_HERE_UNTIL_AFTER_4_3(__mulsf3vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__mulvdi3)
+NOT_HERE_UNTIL_AFTER_4_3(__mulvsi3)
+NOT_HERE_UNTIL_AFTER_4_3(__nedf2)
+NOT_HERE_UNTIL_AFTER_4_3(__nedf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__negdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__negvdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__negvsi2)
+NOT_HERE_UNTIL_AFTER_4_3(__nesf2)
+NOT_HERE_UNTIL_AFTER_4_3(__nesf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__paritydi2)
+NOT_HERE_UNTIL_AFTER_4_3(__paritysi2)
+NOT_HERE_UNTIL_AFTER_4_3(__popcountdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__popcountsi2)
+NOT_HERE_UNTIL_AFTER_4_3(__powidf2)
+NOT_HERE_UNTIL_AFTER_4_3(__powisf2)
+NOT_HERE_UNTIL_AFTER_4_3(__subdf3)
+NOT_HERE_UNTIL_AFTER_4_3(__subdf3vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__subsf3)
+NOT_HERE_UNTIL_AFTER_4_3(__subsf3vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__subvdi3)
+NOT_HERE_UNTIL_AFTER_4_3(__subvsi3)
+NOT_HERE_UNTIL_AFTER_4_3(__truncdfsf2)
+NOT_HERE_UNTIL_AFTER_4_3(__truncdfsf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__ucmpdi2)
+NOT_HERE_UNTIL_AFTER_4_3(__udivdi3)
+NOT_HERE_UNTIL_AFTER_4_3(__udivmoddi4)
+NOT_HERE_UNTIL_AFTER_4_3(__udivsi3)
+NOT_HERE_UNTIL_AFTER_4_3(__umoddi3)
+NOT_HERE_UNTIL_AFTER_4_3(__umodsi3)
+NOT_HERE_UNTIL_AFTER_4_3(__unorddf2)
+NOT_HERE_UNTIL_AFTER_4_3(__unorddf2vfp)
+NOT_HERE_UNTIL_AFTER_4_3(__unordsf2)
+NOT_HERE_UNTIL_AFTER_4_3(__unordsf2vfp)
+
+NOT_HERE_UNTIL_AFTER_4_3(__divmodsi4)
+NOT_HERE_UNTIL_AFTER_4_3(__udivmodsi4)
+#endif // __arm__ && __DYNAMIC__
+
+#else // !__APPLE__
+
+extern int avoid_empty_file;
+
+#endif // !__APPLE__
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/apple_versioning.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/adddf3vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/adddf3vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/adddf3vfp.S (revision 351984)
@@ -0,0 +1,31 @@
+//===-- adddf3vfp.S - Implement adddf3vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// double __adddf3vfp(double a, double b) { return a + b; }
+//
+// Adds two double precision floating point numbers using the Darwin
+// calling convention where double arguments are passsed in GPR pairs
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__adddf3vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vadd.f64 d0, d0, d1
+#else
+ vmov d6, r0, r1 // move first param from r0/r1 pair into d6
+ vmov d7, r2, r3 // move second param from r2/r3 pair into d7
+ vadd.f64 d6, d6, d7
+ vmov r0, r1, d6 // move result back to r0/r1 pair
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__adddf3vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/adddf3vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/addsf3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/addsf3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/addsf3.S (revision 351984)
@@ -0,0 +1,276 @@
+//===-- addsf3.S - Adds two single precision floating pointer numbers-----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __addsf3 (single precision floating pointer number
+// addition with the IEEE-754 default rounding (to nearest, ties to even)
+// function for the ARM Thumb1 ISA.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+#define significandBits 23
+#define typeWidth 32
+
+ .syntax unified
+ .text
+ .thumb
+ .p2align 2
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_fadd, __addsf3)
+
+DEFINE_COMPILERRT_THUMB_FUNCTION(__addsf3)
+ push {r4, r5, r6, r7, lr}
+ // Get the absolute value of a and b.
+ lsls r2, r0, #1
+ lsls r3, r1, #1
+ lsrs r2, r2, #1 // aAbs
+ beq LOCAL_LABEL(a_zero_nan_inf)
+ lsrs r3, r3, #1 // bAbs
+ beq LOCAL_LABEL(zero_nan_inf)
+
+ // Detect if a or b is infinity or Nan.
+ lsrs r6, r2, #(significandBits)
+ lsrs r7, r3, #(significandBits)
+ cmp r6, #0xFF
+ beq LOCAL_LABEL(zero_nan_inf)
+ cmp r7, #0xFF
+ beq LOCAL_LABEL(zero_nan_inf)
+
+ // Swap Rep and Abs so that a and aAbs has the larger absolute value.
+ cmp r2, r3
+ bhs LOCAL_LABEL(no_swap)
+ movs r4, r0
+ movs r5, r2
+ movs r0, r1
+ movs r2, r3
+ movs r1, r4
+ movs r3, r5
+LOCAL_LABEL(no_swap):
+
+ // Get the significands and shift them to give us round, guard and sticky.
+ lsls r4, r0, #(typeWidth - significandBits)
+ lsrs r4, r4, #(typeWidth - significandBits - 3) // aSignificand << 3
+ lsls r5, r1, #(typeWidth - significandBits)
+ lsrs r5, r5, #(typeWidth - significandBits - 3) // bSignificand << 3
+
+ // Get the implicitBit.
+ movs r6, #1
+ lsls r6, r6, #(significandBits + 3)
+
+ // Get aExponent and set implicit bit if necessary.
+ lsrs r2, r2, #(significandBits)
+ beq LOCAL_LABEL(a_done_implicit_bit)
+ orrs r4, r6
+LOCAL_LABEL(a_done_implicit_bit):
+
+ // Get bExponent and set implicit bit if necessary.
+ lsrs r3, r3, #(significandBits)
+ beq LOCAL_LABEL(b_done_implicit_bit)
+ orrs r5, r6
+LOCAL_LABEL(b_done_implicit_bit):
+
+ // Get the difference in exponents.
+ subs r6, r2, r3
+ beq LOCAL_LABEL(done_align)
+
+ // If b is denormal, then a must be normal as align > 0, and we only need to
+ // right shift bSignificand by (align - 1) bits.
+ cmp r3, #0
+ bne 1f
+ subs r6, r6, #1
+1:
+
+ // No longer needs bExponent. r3 is dead here.
+ // Set sticky bits of b: sticky = bSignificand << (typeWidth - align).
+ movs r3, #(typeWidth)
+ subs r3, r3, r6
+ movs r7, r5
+ lsls r7, r3
+ beq 1f
+ movs r7, #1
+1:
+
+ // bSignificand = bSignificand >> align | sticky;
+ lsrs r5, r6
+ orrs r5, r7
+ bne LOCAL_LABEL(done_align)
+ movs r5, #1 // sticky; b is known to be non-zero.
+
+LOCAL_LABEL(done_align):
+ // isSubtraction = (aRep ^ bRep) >> 31;
+ movs r7, r0
+ eors r7, r1
+ lsrs r7, #31
+ bne LOCAL_LABEL(do_substraction)
+
+ // Same sign, do Addition.
+
+ // aSignificand += bSignificand;
+ adds r4, r4, r5
+
+ // Check carry bit.
+ movs r6, #1
+ lsls r6, r6, #(significandBits + 3 + 1)
+ movs r7, r4
+ ands r7, r6
+ beq LOCAL_LABEL(form_result)
+ // If the addition carried up, we need to right-shift the result and
+ // adjust the exponent.
+ movs r7, r4
+ movs r6, #1
+ ands r7, r6 // sticky = aSignificand & 1;
+ lsrs r4, #1
+ orrs r4, r7 // result Significand
+ adds r2, #1 // result Exponent
+ // If we have overflowed the type, return +/- infinity.
+ cmp r2, 0xFF
+ beq LOCAL_LABEL(ret_inf)
+
+LOCAL_LABEL(form_result):
+ // Shift the sign, exponent and significand into place.
+ lsrs r0, #(typeWidth - 1)
+ lsls r0, #(typeWidth - 1) // Get Sign.
+ lsls r2, #(significandBits)
+ orrs r0, r2
+ movs r1, r4
+ lsls r4, #(typeWidth - significandBits - 3)
+ lsrs r4, #(typeWidth - significandBits)
+ orrs r0, r4
+
+ // Final rounding. The result may overflow to infinity, but that is the
+ // correct result in that case.
+ // roundGuardSticky = aSignificand & 0x7;
+ movs r2, #0x7
+ ands r1, r2
+ // if (roundGuardSticky > 0x4) result++;
+
+ cmp r1, #0x4
+ blt LOCAL_LABEL(done_round)
+ beq 1f
+ adds r0, #1
+ pop {r4, r5, r6, r7, pc}
+1:
+
+ // if (roundGuardSticky == 0x4) result += result & 1;
+ movs r1, r0
+ lsrs r1, #1
+ bcc LOCAL_LABEL(done_round)
+ adds r0, r0, #1
+LOCAL_LABEL(done_round):
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(do_substraction):
+ subs r4, r4, r5 // aSignificand -= bSignificand;
+ beq LOCAL_LABEL(ret_zero)
+ movs r6, r4
+ cmp r2, 0
+ beq LOCAL_LABEL(form_result) // if a's exp is 0, no need to normalize.
+ // If partial cancellation occured, we need to left-shift the result
+ // and adjust the exponent:
+ lsrs r6, r6, #(significandBits + 3)
+ bne LOCAL_LABEL(form_result)
+
+ push {r0, r1, r2, r3}
+ movs r0, r4
+ bl SYMBOL_NAME(__clzsi2)
+ movs r5, r0
+ pop {r0, r1, r2, r3}
+ // shift = rep_clz(aSignificand) - rep_clz(implicitBit << 3);
+ subs r5, r5, #(typeWidth - significandBits - 3 - 1)
+ // aSignificand <<= shift; aExponent -= shift;
+ lsls r4, r5
+ subs r2, r2, r5
+ bgt LOCAL_LABEL(form_result)
+
+ // Do normalization if aExponent <= 0.
+ movs r6, #1
+ subs r6, r6, r2 // 1 - aExponent;
+ movs r2, #0 // aExponent = 0;
+ movs r3, #(typeWidth) // bExponent is dead.
+ subs r3, r3, r6
+ movs r7, r4
+ lsls r7, r3 // stickyBit = (bool)(aSignificant << (typeWidth - align))
+ beq 1f
+ movs r7, #1
+1:
+ lsrs r4, r6 // aSignificand >> shift
+ orrs r4, r7
+ b LOCAL_LABEL(form_result)
+
+LOCAL_LABEL(ret_zero):
+ movs r0, #0
+ pop {r4, r5, r6, r7, pc}
+
+
+LOCAL_LABEL(a_zero_nan_inf):
+ lsrs r3, r3, #1
+
+LOCAL_LABEL(zero_nan_inf):
+ // Here r2 has aAbs, r3 has bAbs
+ movs r4, #0xFF
+ lsls r4, r4, #(significandBits) // Make +inf.
+
+ cmp r2, r4
+ bhi LOCAL_LABEL(a_is_nan)
+ cmp r3, r4
+ bhi LOCAL_LABEL(b_is_nan)
+
+ cmp r2, r4
+ bne LOCAL_LABEL(a_is_rational)
+ // aAbs is INF.
+ eors r1, r0 // aRep ^ bRep.
+ movs r6, #1
+ lsls r6, r6, #(typeWidth - 1) // get sign mask.
+ cmp r1, r6 // if they only differ on sign bit, it's -INF + INF
+ beq LOCAL_LABEL(a_is_nan)
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(a_is_rational):
+ cmp r3, r4
+ bne LOCAL_LABEL(b_is_rational)
+ movs r0, r1
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(b_is_rational):
+ // either a or b or both are zero.
+ adds r4, r2, r3
+ beq LOCAL_LABEL(both_zero)
+ cmp r2, #0 // is absA 0 ?
+ beq LOCAL_LABEL(ret_b)
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(both_zero):
+ ands r0, r1 // +0 + -0 = +0
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(ret_b):
+ movs r0, r1
+
+LOCAL_LABEL(ret):
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(b_is_nan):
+ movs r0, r1
+LOCAL_LABEL(a_is_nan):
+ movs r1, #1
+ lsls r1, r1, #(significandBits -1) // r1 is quiet bit.
+ orrs r0, r1
+ pop {r4, r5, r6, r7, pc}
+
+LOCAL_LABEL(ret_inf):
+ movs r4, #0xFF
+ lsls r4, r4, #(significandBits)
+ orrs r0, r4
+ lsrs r0, r0, #(significandBits)
+ lsls r0, r0, #(significandBits)
+ pop {r4, r5, r6, r7, pc}
+
+
+END_COMPILERRT_FUNCTION(__addsf3)
+
+NO_EXEC_STACK_DIRECTIVE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/addsf3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/addsf3vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/addsf3vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/addsf3vfp.S (revision 351984)
@@ -0,0 +1,32 @@
+//===-- addsf3vfp.S - Implement addsf3vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern float __addsf3vfp(float a, float b);
+//
+// Adds two single precision floating point numbers using the Darwin
+// calling convention where single arguments are passsed in GPRs
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__addsf3vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vadd.f32 s0, s0, s1
+#else
+ vmov s14, r0 // move first param from r0 into float register
+ vmov s15, r1 // move second param from r1 into float register
+ vadd.f32 s14, s14, s15
+ vmov r0, s14 // move result back to r0
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__addsf3vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/addsf3vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cdcmp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cdcmp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cdcmp.S (revision 351984)
@@ -0,0 +1,144 @@
+//===-- aeabi_cdcmp.S - EABI cdcmp* implementation ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+#error big endian support not implemented
+#endif
+
+#define APSR_Z (1 << 30)
+#define APSR_C (1 << 29)
+
+// void __aeabi_cdcmpeq(double a, double b) {
+// if (isnan(a) || isnan(b)) {
+// Z = 0; C = 1;
+// } else {
+// __aeabi_cdcmple(a, b);
+// }
+// }
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
+ push {r0-r3, lr}
+ bl __aeabi_cdcmpeq_check_nan
+ cmp r0, #1
+#if defined(USE_THUMB_1)
+ beq 1f
+ // NaN has been ruled out, so __aeabi_cdcmple can't trap
+ mov r0, sp
+ ldm r0, {r0-r3}
+ bl __aeabi_cdcmple
+ pop {r0-r3, pc}
+1:
+ // Z = 0, C = 1
+ movs r0, #0xF
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+#else
+ pop {r0-r3, lr}
+
+ // NaN has been ruled out, so __aeabi_cdcmple can't trap
+ // Use "it ne" + unconditional branch to guarantee a supported relocation if
+ // __aeabi_cdcmple is in a different section for some builds.
+ IT(ne)
+ bne __aeabi_cdcmple
+
+#if defined(USE_THUMB_2)
+ mov ip, #APSR_C
+ msr APSR_nzcvq, ip
+#else
+ msr APSR_nzcvq, #APSR_C
+#endif
+ JMP(lr)
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_cdcmpeq)
+
+
+// void __aeabi_cdcmple(double a, double b) {
+// if (__aeabi_dcmplt(a, b)) {
+// Z = 0; C = 0;
+// } else if (__aeabi_dcmpeq(a, b)) {
+// Z = 1; C = 1;
+// } else {
+// Z = 0; C = 1;
+// }
+// }
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_cdcmple)
+ // Per the RTABI, this function must preserve r0-r11.
+ // Save lr in the same instruction for compactness
+ push {r0-r3, lr}
+
+ bl __aeabi_dcmplt
+ cmp r0, #1
+#if defined(USE_THUMB_1)
+ bne 1f
+ // Z = 0, C = 0
+ movs r0, #1
+ lsls r0, r0, #1
+ pop {r0-r3, pc}
+1:
+ mov r0, sp
+ ldm r0, {r0-r3}
+ bl __aeabi_dcmpeq
+ cmp r0, #1
+ bne 2f
+ // Z = 1, C = 1
+ movs r0, #2
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+2:
+ // Z = 0, C = 1
+ movs r0, #0xF
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+#else
+ ITT(eq)
+ moveq ip, #0
+ beq 1f
+
+ ldm sp, {r0-r3}
+ bl __aeabi_dcmpeq
+ cmp r0, #1
+ ITE(eq)
+ moveq ip, #(APSR_C | APSR_Z)
+ movne ip, #(APSR_C)
+
+1:
+ msr APSR_nzcvq, ip
+ pop {r0-r3}
+ POP_PC()
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_cdcmple)
+
+// int __aeabi_cdrcmple(double a, double b) {
+// return __aeabi_cdcmple(b, a);
+// }
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_cdrcmple)
+ // Swap r0 and r2
+ mov ip, r0
+ mov r0, r2
+ mov r2, ip
+
+ // Swap r1 and r3
+ mov ip, r1
+ mov r1, r3
+ mov r3, ip
+
+ b __aeabi_cdcmple
+END_COMPILERRT_FUNCTION(__aeabi_cdrcmple)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cdcmp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cdcmpeq_check_nan.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cdcmpeq_check_nan.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cdcmpeq_check_nan.c (revision 351984)
@@ -0,0 +1,15 @@
+//===-- lib/arm/aeabi_cdcmpeq_helper.c - Helper for cdcmpeq ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../int_lib.h"
+#include <stdint.h>
+
+AEABI_RTABI __attribute__((visibility("hidden"))) int
+__aeabi_cdcmpeq_check_nan(double a, double b) {
+ return __builtin_isnan(a) || __builtin_isnan(b);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cdcmpeq_check_nan.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cfcmp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cfcmp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cfcmp.S (revision 351984)
@@ -0,0 +1,139 @@
+//===-- aeabi_cfcmp.S - EABI cfcmp* implementation ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+#if __BYTE_ORDER__ != __ORDER_LITTLE_ENDIAN__
+#error big endian support not implemented
+#endif
+
+#define APSR_Z (1 << 30)
+#define APSR_C (1 << 29)
+
+// void __aeabi_cfcmpeq(float a, float b) {
+// if (isnan(a) || isnan(b)) {
+// Z = 0; C = 1;
+// } else {
+// __aeabi_cfcmple(a, b);
+// }
+// }
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
+ push {r0-r3, lr}
+ bl __aeabi_cfcmpeq_check_nan
+ cmp r0, #1
+#if defined(USE_THUMB_1)
+ beq 1f
+ // NaN has been ruled out, so __aeabi_cfcmple can't trap
+ mov r0, sp
+ ldm r0, {r0-r3}
+ bl __aeabi_cfcmple
+ pop {r0-r3, pc}
+1:
+ // Z = 0, C = 1
+ movs r0, #0xF
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+#else
+ pop {r0-r3, lr}
+
+ // NaN has been ruled out, so __aeabi_cfcmple can't trap
+ // Use "it ne" + unconditional branch to guarantee a supported relocation if
+ // __aeabi_cfcmple is in a different section for some builds.
+ IT(ne)
+ bne __aeabi_cfcmple
+
+#if defined(USE_THUMB_2)
+ mov ip, #APSR_C
+ msr APSR_nzcvq, ip
+#else
+ msr APSR_nzcvq, #APSR_C
+#endif
+ JMP(lr)
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_cfcmpeq)
+
+
+// void __aeabi_cfcmple(float a, float b) {
+// if (__aeabi_fcmplt(a, b)) {
+// Z = 0; C = 0;
+// } else if (__aeabi_fcmpeq(a, b)) {
+// Z = 1; C = 1;
+// } else {
+// Z = 0; C = 1;
+// }
+// }
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_cfcmple)
+ // Per the RTABI, this function must preserve r0-r11.
+ // Save lr in the same instruction for compactness
+ push {r0-r3, lr}
+
+ bl __aeabi_fcmplt
+ cmp r0, #1
+#if defined(USE_THUMB_1)
+ bne 1f
+ // Z = 0, C = 0
+ movs r0, #1
+ lsls r0, r0, #1
+ pop {r0-r3, pc}
+1:
+ mov r0, sp
+ ldm r0, {r0-r3}
+ bl __aeabi_fcmpeq
+ cmp r0, #1
+ bne 2f
+ // Z = 1, C = 1
+ movs r0, #2
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+2:
+ // Z = 0, C = 1
+ movs r0, #0xF
+ lsls r0, r0, #31
+ pop {r0-r3, pc}
+#else
+ ITT(eq)
+ moveq ip, #0
+ beq 1f
+
+ ldm sp, {r0-r3}
+ bl __aeabi_fcmpeq
+ cmp r0, #1
+ ITE(eq)
+ moveq ip, #(APSR_C | APSR_Z)
+ movne ip, #(APSR_C)
+
+1:
+ msr APSR_nzcvq, ip
+ pop {r0-r3}
+ POP_PC()
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_cfcmple)
+
+// int __aeabi_cfrcmple(float a, float b) {
+// return __aeabi_cfcmple(b, a);
+// }
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_cfrcmple)
+ // Swap r0 and r1
+ mov ip, r0
+ mov r0, r1
+ mov r1, ip
+
+ b __aeabi_cfcmple
+END_COMPILERRT_FUNCTION(__aeabi_cfrcmple)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cfcmp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cfcmpeq_check_nan.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cfcmpeq_check_nan.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cfcmpeq_check_nan.c (revision 351984)
@@ -0,0 +1,15 @@
+//===-- lib/arm/aeabi_cfcmpeq_helper.c - Helper for cdcmpeq ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../int_lib.h"
+#include <stdint.h>
+
+AEABI_RTABI __attribute__((visibility("hidden"))) int
+__aeabi_cfcmpeq_check_nan(float a, float b) {
+ return __builtin_isnan(a) || __builtin_isnan(b);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_cfcmpeq_check_nan.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_dcmp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_dcmp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_dcmp.S (revision 351984)
@@ -0,0 +1,51 @@
+//===-- aeabi_dcmp.S - EABI dcmp* implementation ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// int __aeabi_dcmp{eq,lt,le,ge,gt}(double a, double b) {
+// int result = __{eq,lt,le,ge,gt}df2(a, b);
+// if (result {==,<,<=,>=,>} 0) {
+// return 1;
+// } else {
+// return 0;
+// }
+// }
+
+#if defined(COMPILER_RT_ARMHF_TARGET)
+# define CONVERT_DCMP_ARGS_TO_DF2_ARGS \
+ vmov d0, r0, r1 SEPARATOR \
+ vmov d1, r2, r3
+#else
+# define CONVERT_DCMP_ARGS_TO_DF2_ARGS
+#endif
+
+#define DEFINE_AEABI_DCMP(cond) \
+ .syntax unified SEPARATOR \
+ .p2align 2 SEPARATOR \
+DEFINE_COMPILERRT_FUNCTION(__aeabi_dcmp ## cond) \
+ push { r4, lr } SEPARATOR \
+ CONVERT_DCMP_ARGS_TO_DF2_ARGS SEPARATOR \
+ bl SYMBOL_NAME(__ ## cond ## df2) SEPARATOR \
+ cmp r0, #0 SEPARATOR \
+ b ## cond 1f SEPARATOR \
+ movs r0, #0 SEPARATOR \
+ pop { r4, pc } SEPARATOR \
+1: SEPARATOR \
+ movs r0, #1 SEPARATOR \
+ pop { r4, pc } SEPARATOR \
+END_COMPILERRT_FUNCTION(__aeabi_dcmp ## cond)
+
+DEFINE_AEABI_DCMP(eq)
+DEFINE_AEABI_DCMP(lt)
+DEFINE_AEABI_DCMP(le)
+DEFINE_AEABI_DCMP(ge)
+DEFINE_AEABI_DCMP(gt)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_dcmp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_div0.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_div0.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_div0.c (revision 351984)
@@ -0,0 +1,40 @@
+//===-- aeabi_div0.c - ARM Runtime ABI support routines for compiler-rt ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the division by zero helper routines as specified by the
+// Run-time ABI for the ARM Architecture.
+//
+//===----------------------------------------------------------------------===//
+
+// RTABI 4.3.2 - Division by zero
+//
+// The *div0 functions:
+// - Return the value passed to them as a parameter
+// - Or, return a fixed value defined by the execution environment (such as 0)
+// - Or, raise a signal (often SIGFPE) or throw an exception, and do not return
+//
+// An application may provide its own implementations of the *div0 functions to
+// for a particular behaviour from the *div and *divmod functions called out of
+// line.
+
+#include "../int_lib.h"
+
+// provide an unused declaration to pacify pendantic compilation
+extern unsigned char declaration;
+
+#if defined(__ARM_EABI__)
+AEABI_RTABI int __attribute__((weak)) __attribute__((visibility("hidden")))
+__aeabi_idiv0(int return_value) {
+ return return_value;
+}
+
+AEABI_RTABI long long __attribute__((weak))
+__attribute__((visibility("hidden"))) __aeabi_ldiv0(long long return_value) {
+ return return_value;
+}
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_div0.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_drsub.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_drsub.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_drsub.c (revision 351984)
@@ -0,0 +1,14 @@
+//===-- lib/arm/aeabi_drsub.c - Double-precision subtraction --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "../fp_lib.h"
+
+AEABI_RTABI fp_t __aeabi_dsub(fp_t, fp_t);
+
+AEABI_RTABI fp_t __aeabi_drsub(fp_t a, fp_t b) { return __aeabi_dsub(b, a); }
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_drsub.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_fcmp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_fcmp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_fcmp.S (revision 351984)
@@ -0,0 +1,51 @@
+//===-- aeabi_fcmp.S - EABI fcmp* implementation ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// int __aeabi_fcmp{eq,lt,le,ge,gt}(float a, float b) {
+// int result = __{eq,lt,le,ge,gt}sf2(a, b);
+// if (result {==,<,<=,>=,>} 0) {
+// return 1;
+// } else {
+// return 0;
+// }
+// }
+
+#if defined(COMPILER_RT_ARMHF_TARGET)
+# define CONVERT_FCMP_ARGS_TO_SF2_ARGS \
+ vmov s0, r0 SEPARATOR \
+ vmov s1, r1
+#else
+# define CONVERT_FCMP_ARGS_TO_SF2_ARGS
+#endif
+
+#define DEFINE_AEABI_FCMP(cond) \
+ .syntax unified SEPARATOR \
+ .p2align 2 SEPARATOR \
+DEFINE_COMPILERRT_FUNCTION(__aeabi_fcmp ## cond) \
+ push { r4, lr } SEPARATOR \
+ CONVERT_FCMP_ARGS_TO_SF2_ARGS SEPARATOR \
+ bl SYMBOL_NAME(__ ## cond ## sf2) SEPARATOR \
+ cmp r0, #0 SEPARATOR \
+ b ## cond 1f SEPARATOR \
+ movs r0, #0 SEPARATOR \
+ pop { r4, pc } SEPARATOR \
+1: SEPARATOR \
+ movs r0, #1 SEPARATOR \
+ pop { r4, pc } SEPARATOR \
+END_COMPILERRT_FUNCTION(__aeabi_fcmp ## cond)
+
+DEFINE_AEABI_FCMP(eq)
+DEFINE_AEABI_FCMP(lt)
+DEFINE_AEABI_FCMP(le)
+DEFINE_AEABI_FCMP(ge)
+DEFINE_AEABI_FCMP(gt)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_fcmp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_frsub.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_frsub.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_frsub.c (revision 351984)
@@ -0,0 +1,14 @@
+//===-- lib/arm/aeabi_frsub.c - Single-precision subtraction --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "../fp_lib.h"
+
+AEABI_RTABI fp_t __aeabi_fsub(fp_t, fp_t);
+
+AEABI_RTABI fp_t __aeabi_frsub(fp_t a, fp_t b) { return __aeabi_fsub(b, a); }
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_frsub.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_idivmod.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_idivmod.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_idivmod.S (revision 351984)
@@ -0,0 +1,50 @@
+//===-- aeabi_idivmod.S - EABI idivmod implementation ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { int quot, int rem} __aeabi_idivmod(int numerator, int denominator) {
+// int rem, quot;
+// quot = __divmodsi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+#if defined(__MINGW32__)
+#define __aeabi_idivmod __rt_sdiv
+#endif
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_idivmod)
+#if defined(USE_THUMB_1)
+ push {r0, r1, lr}
+ bl SYMBOL_NAME(__divsi3)
+ pop {r1, r2, r3} // now r0 = quot, r1 = num, r2 = denom
+ muls r2, r0, r2 // r2 = quot * denom
+ subs r1, r1, r2
+ JMP (r3)
+#else // defined(USE_THUMB_1)
+ push { lr }
+ sub sp, sp, #4
+ mov r2, sp
+#if defined(__MINGW32__)
+ mov r3, r0
+ mov r0, r1
+ mov r1, r3
+#endif
+ bl SYMBOL_NAME(__divmodsi4)
+ ldr r1, [sp]
+ add sp, sp, #4
+ pop { pc }
+#endif // defined(USE_THUMB_1)
+END_COMPILERRT_FUNCTION(__aeabi_idivmod)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_idivmod.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_ldivmod.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_ldivmod.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_ldivmod.S (revision 351984)
@@ -0,0 +1,45 @@
+//===-- aeabi_ldivmod.S - EABI ldivmod implementation ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { int64_t quot, int64_t rem}
+// __aeabi_ldivmod(int64_t numerator, int64_t denominator) {
+// int64_t rem, quot;
+// quot = __divmoddi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+#if defined(__MINGW32__)
+#define __aeabi_ldivmod __rt_sdiv64
+#endif
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_ldivmod)
+ push {r6, lr}
+ sub sp, sp, #16
+ add r6, sp, #8
+ str r6, [sp]
+#if defined(__MINGW32__)
+ movs r6, r0
+ movs r0, r2
+ movs r2, r6
+ movs r6, r1
+ movs r1, r3
+ movs r3, r6
+#endif
+ bl SYMBOL_NAME(__divmoddi4)
+ ldr r2, [sp, #8]
+ ldr r3, [sp, #12]
+ add sp, sp, #16
+ pop {r6, pc}
+END_COMPILERRT_FUNCTION(__aeabi_ldivmod)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_ldivmod.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memcmp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memcmp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memcmp.S (revision 351984)
@@ -0,0 +1,29 @@
+//===-- aeabi_memcmp.S - EABI memcmp implementation -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// void __aeabi_memcmp(void *dest, void *src, size_t n) { memcmp(dest, src, n); }
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memcmp)
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memcmp
+ pop {r7, pc}
+#else
+ b memcmp
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_memcmp)
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp4, __aeabi_memcmp)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcmp8, __aeabi_memcmp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memcmp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memcpy.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memcpy.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memcpy.S (revision 351984)
@@ -0,0 +1,29 @@
+//===-- aeabi_memcpy.S - EABI memcpy implementation -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// void __aeabi_memcpy(void *dest, void *src, size_t n) { memcpy(dest, src, n); }
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memcpy)
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memcpy
+ pop {r7, pc}
+#else
+ b memcpy
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_memcpy)
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy4, __aeabi_memcpy)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memcpy8, __aeabi_memcpy)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memcpy.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memmove.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memmove.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memmove.S (revision 351984)
@@ -0,0 +1,28 @@
+//===-- aeabi_memmove.S - EABI memmove implementation --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===---------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// void __aeabi_memmove(void *dest, void *src, size_t n) { memmove(dest, src, n); }
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memmove)
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memmove
+ pop {r7, pc}
+#else
+ b memmove
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_memmove)
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove4, __aeabi_memmove)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memmove8, __aeabi_memmove)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memmove.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memset.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memset.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memset.S (revision 351984)
@@ -0,0 +1,49 @@
+//===-- aeabi_memset.S - EABI memset implementation -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// void __aeabi_memset(void *dest, size_t n, int c) { memset(dest, c, n); }
+// void __aeabi_memclr(void *dest, size_t n) { __aeabi_memset(dest, n, 0); }
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memset)
+ mov r3, r1
+ mov r1, r2
+ mov r2, r3
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memset
+ pop {r7, pc}
+#else
+ b memset
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_memset)
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset4, __aeabi_memset)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memset8, __aeabi_memset)
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_memclr)
+ mov r2, r1
+ movs r1, #0
+#ifdef USE_THUMB_1
+ push {r7, lr}
+ bl memset
+ pop {r7, pc}
+#else
+ b memset
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_memclr)
+
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr4, __aeabi_memclr)
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_memclr8, __aeabi_memclr)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_memset.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_uidivmod.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_uidivmod.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_uidivmod.S (revision 351984)
@@ -0,0 +1,57 @@
+//===-- aeabi_uidivmod.S - EABI uidivmod implementation -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { unsigned quot, unsigned rem}
+// __aeabi_uidivmod(unsigned numerator, unsigned denominator) {
+// unsigned rem, quot;
+// quot = __udivmodsi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+#if defined(__MINGW32__)
+#define __aeabi_uidivmod __rt_udiv
+#endif
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_uidivmod)
+#if defined(USE_THUMB_1)
+ cmp r0, r1
+ bcc LOCAL_LABEL(case_denom_larger)
+ push {r0, r1, lr}
+ bl SYMBOL_NAME(__aeabi_uidiv)
+ pop {r1, r2, r3}
+ muls r2, r0, r2 // r2 = quot * denom
+ subs r1, r1, r2
+ JMP (r3)
+LOCAL_LABEL(case_denom_larger):
+ movs r1, r0
+ movs r0, #0
+ JMP (lr)
+#else // defined(USE_THUMB_1)
+ push { lr }
+ sub sp, sp, #4
+ mov r2, sp
+#if defined(__MINGW32__)
+ mov r3, r0
+ mov r0, r1
+ mov r1, r3
+#endif
+ bl SYMBOL_NAME(__udivmodsi4)
+ ldr r1, [sp]
+ add sp, sp, #4
+ pop { pc }
+#endif
+END_COMPILERRT_FUNCTION(__aeabi_uidivmod)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_uidivmod.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_uldivmod.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_uldivmod.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_uldivmod.S (revision 351984)
@@ -0,0 +1,45 @@
+//===-- aeabi_uldivmod.S - EABI uldivmod implementation -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// struct { uint64_t quot, uint64_t rem}
+// __aeabi_uldivmod(uint64_t numerator, uint64_t denominator) {
+// uint64_t rem, quot;
+// quot = __udivmoddi4(numerator, denominator, &rem);
+// return {quot, rem};
+// }
+
+#if defined(__MINGW32__)
+#define __aeabi_uldivmod __rt_udiv64
+#endif
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__aeabi_uldivmod)
+ push {r6, lr}
+ sub sp, sp, #16
+ add r6, sp, #8
+ str r6, [sp]
+#if defined(__MINGW32__)
+ movs r6, r0
+ movs r0, r2
+ movs r2, r6
+ movs r6, r1
+ movs r1, r3
+ movs r3, r6
+#endif
+ bl SYMBOL_NAME(__udivmoddi4)
+ ldr r2, [sp, #8]
+ ldr r3, [sp, #12]
+ add sp, sp, #16
+ pop {r6, pc}
+END_COMPILERRT_FUNCTION(__aeabi_uldivmod)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/aeabi_uldivmod.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/bswapdi2.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/bswapdi2.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/bswapdi2.S (revision 351984)
@@ -0,0 +1,43 @@
+//===------- bswapdi2 - Implement bswapdi2 --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+//
+// extern uint64_t __bswapdi2(uint64_t);
+//
+// Reverse all the bytes in a 64-bit integer.
+//
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__bswapdi2)
+#if __ARM_ARCH < 6
+ // before armv6 does not have "rev" instruction
+ // r2 = rev(r0)
+ eor r2, r0, r0, ror #16
+ bic r2, r2, #0xff0000
+ mov r2, r2, lsr #8
+ eor r2, r2, r0, ror #8
+ // r0 = rev(r1)
+ eor r0, r1, r1, ror #16
+ bic r0, r0, #0xff0000
+ mov r0, r0, lsr #8
+ eor r0, r0, r1, ror #8
+#else
+ rev r2, r0 // r2 = rev(r0)
+ rev r0, r1 // r0 = rev(r1)
+#endif
+ mov r1, r2 // r1 = r2 = rev(r0)
+ JMP(lr)
+END_COMPILERRT_FUNCTION(__bswapdi2)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/bswapdi2.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/bswapsi2.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/bswapsi2.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/bswapsi2.S (revision 351984)
@@ -0,0 +1,35 @@
+//===------- bswapsi2 - Implement bswapsi2 --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+//
+// extern uint32_t __bswapsi2(uint32_t);
+//
+// Reverse all the bytes in a 32-bit integer.
+//
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__bswapsi2)
+#if __ARM_ARCH < 6
+ // before armv6 does not have "rev" instruction
+ eor r1, r0, r0, ror #16
+ bic r1, r1, #0xff0000
+ mov r1, r1, lsr #8
+ eor r0, r1, r0, ror #8
+#else
+ rev r0, r0
+#endif
+ JMP(lr)
+END_COMPILERRT_FUNCTION(__bswapsi2)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/bswapsi2.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/chkstk.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/chkstk.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/chkstk.S (revision 351984)
@@ -0,0 +1,35 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// __chkstk routine
+// This routine is windows specific.
+// http://msdn.microsoft.com/en-us/library/ms648426.aspx
+
+// This clobbers the register r12, and the condition codes, and uses r5 and r6
+// as temporaries by backing them up and restoring them afterwards.
+// Does not modify any memory or the stack pointer.
+
+// movw r4, #256 // Number of bytes of stack, in units of 4 byte
+// bl __chkstk
+// sub.w sp, sp, r4
+
+#define PAGE_SIZE 4096
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__chkstk)
+ lsl r4, r4, #2
+ mov r12, sp
+ push {r5, r6}
+ mov r5, r4
+1:
+ sub r12, r12, #PAGE_SIZE
+ subs r5, r5, #PAGE_SIZE
+ ldr r6, [r12]
+ bgt 1b
+
+ pop {r5, r6}
+ bx lr
+END_COMPILERRT_FUNCTION(__chkstk)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/chkstk.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/clzdi2.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/clzdi2.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/clzdi2.S (revision 351984)
@@ -0,0 +1,86 @@
+//===-- clzdi2.c - Implement __clzdi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements count leading zeros for 64bit arguments.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__clzdi2)
+#ifdef __ARM_FEATURE_CLZ
+#ifdef __ARMEB__
+ cmp r0, 0
+ itee ne
+ clzne r0, r0
+ clzeq r0, r1
+ addeq r0, r0, 32
+#else
+ cmp r1, 0
+ itee ne
+ clzne r0, r1
+ clzeq r0, r0
+ addeq r0, r0, 32
+#endif
+ JMP(lr)
+#else
+ // Assumption: n != 0
+
+ // r0: n
+ // r1: upper half of n, overwritten after check
+ // r1: count of leading zeros in n + 1
+ // r2: scratch register for shifted r0
+#ifdef __ARMEB__
+ cmp r0, 0
+ moveq r0, r1
+#else
+ cmp r1, 0
+ movne r0, r1
+#endif
+ movne r1, 1
+ moveq r1, 33
+
+ // Basic block:
+ // if ((r0 >> SHIFT) == 0)
+ // r1 += SHIFT;
+ // else
+ // r0 >>= SHIFT;
+ // for descending powers of two as SHIFT.
+#define BLOCK(shift) \
+ lsrs r2, r0, shift; \
+ movne r0, r2; \
+ addeq r1, shift \
+
+ BLOCK(16)
+ BLOCK(8)
+ BLOCK(4)
+ BLOCK(2)
+
+ // The basic block invariants at this point are (r0 >> 2) == 0 and
+ // r0 != 0. This means 1 <= r0 <= 3 and 0 <= (r0 >> 1) <= 1.
+ //
+ // r0 | (r0 >> 1) == 0 | (r0 >> 1) == 1 | -(r0 >> 1) | 1 - (r0 >> 1)
+ // ---+----------------+----------------+------------+--------------
+ // 1 | 1 | 0 | 0 | 1
+ // 2 | 0 | 1 | -1 | 0
+ // 3 | 0 | 1 | -1 | 0
+ //
+ // The r1's initial value of 1 compensates for the 1 here.
+ sub r0, r1, r0, lsr #1
+
+ JMP(lr)
+#endif // __ARM_FEATURE_CLZ
+END_COMPILERRT_FUNCTION(__clzdi2)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/clzdi2.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/clzsi2.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/clzsi2.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/clzsi2.S (revision 351984)
@@ -0,0 +1,66 @@
+//===-- clzsi2.c - Implement __clzsi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements count leading zeros for 32bit arguments.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__clzsi2)
+#ifdef __ARM_FEATURE_CLZ
+ clz r0, r0
+ JMP(lr)
+#else
+ // Assumption: n != 0
+
+ // r0: n
+ // r1: count of leading zeros in n + 1
+ // r2: scratch register for shifted r0
+ mov r1, 1
+
+ // Basic block:
+ // if ((r0 >> SHIFT) == 0)
+ // r1 += SHIFT;
+ // else
+ // r0 >>= SHIFT;
+ // for descending powers of two as SHIFT.
+
+#define BLOCK(shift) \
+ lsrs r2, r0, shift; \
+ movne r0, r2; \
+ addeq r1, shift \
+
+ BLOCK(16)
+ BLOCK(8)
+ BLOCK(4)
+ BLOCK(2)
+
+ // The basic block invariants at this point are (r0 >> 2) == 0 and
+ // r0 != 0. This means 1 <= r0 <= 3 and 0 <= (r0 >> 1) <= 1.
+ //
+ // r0 | (r0 >> 1) == 0 | (r0 >> 1) == 1 | -(r0 >> 1) | 1 - (r0 >> 1)
+ // ---+----------------+----------------+------------+--------------
+ // 1 | 1 | 0 | 0 | 1
+ // 2 | 0 | 1 | -1 | 0
+ // 3 | 0 | 1 | -1 | 0
+ //
+ // The r1's initial value of 1 compensates for the 1 here.
+ sub r0, r1, r0, lsr #1
+
+ JMP(lr)
+#endif // __ARM_FEATURE_CLZ
+END_COMPILERRT_FUNCTION(__clzsi2)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/clzsi2.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/comparesf2.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/comparesf2.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/comparesf2.S (revision 351984)
@@ -0,0 +1,261 @@
+//===-- comparesf2.S - Implement single-precision soft-float comparisons --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the following soft-fp_t comparison routines:
+//
+// __eqsf2 __gesf2 __unordsf2
+// __lesf2 __gtsf2
+// __ltsf2
+// __nesf2
+//
+// The semantics of the routines grouped in each column are identical, so there
+// is a single implementation for each, with multiple names.
+//
+// The routines behave as follows:
+//
+// __lesf2(a,b) returns -1 if a < b
+// 0 if a == b
+// 1 if a > b
+// 1 if either a or b is NaN
+//
+// __gesf2(a,b) returns -1 if a < b
+// 0 if a == b
+// 1 if a > b
+// -1 if either a or b is NaN
+//
+// __unordsf2(a,b) returns 0 if both a and b are numbers
+// 1 if either a or b is NaN
+//
+// Note that __lesf2( ) and __gesf2( ) are identical except in their handling of
+// NaN values.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+ .macro COMPARESF2_FUNCTION_BODY handle_nan:req
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vmov r0, s0
+ vmov r1, s1
+#endif
+ // Make copies of a and b with the sign bit shifted off the top. These will
+ // be used to detect zeros and NaNs.
+#if defined(USE_THUMB_1)
+ push {r6, lr}
+ lsls r2, r0, #1
+ lsls r3, r1, #1
+#else
+ mov r2, r0, lsl #1
+ mov r3, r1, lsl #1
+#endif
+
+ // We do the comparison in three stages (ignoring NaN values for the time
+ // being). First, we orr the absolute values of a and b; this sets the Z
+ // flag if both a and b are zero (of either sign). The shift of r3 doesn't
+ // effect this at all, but it *does* make sure that the C flag is clear for
+ // the subsequent operations.
+#if defined(USE_THUMB_1)
+ lsrs r6, r3, #1
+ orrs r6, r2
+#else
+ orrs r12, r2, r3, lsr #1
+#endif
+ // Next, we check if a and b have the same or different signs. If they have
+ // opposite signs, this eor will set the N flag.
+#if defined(USE_THUMB_1)
+ beq 1f
+ movs r6, r0
+ eors r6, r1
+1:
+#else
+ it ne
+ eorsne r12, r0, r1
+#endif
+
+ // If a and b are equal (either both zeros or bit identical; again, we're
+ // ignoring NaNs for now), this subtract will zero out r0. If they have the
+ // same sign, the flags are updated as they would be for a comparison of the
+ // absolute values of a and b.
+#if defined(USE_THUMB_1)
+ bmi 1f
+ subs r0, r2, r3
+1:
+#else
+ it pl
+ subspl r0, r2, r3
+#endif
+
+ // If a is smaller in magnitude than b and both have the same sign, place
+ // the negation of the sign of b in r0. Thus, if both are negative and
+ // a > b, this sets r0 to 0; if both are positive and a < b, this sets
+ // r0 to -1.
+ //
+ // This is also done if a and b have opposite signs and are not both zero,
+ // because in that case the subtract was not performed and the C flag is
+ // still clear from the shift argument in orrs; if a is positive and b
+ // negative, this places 0 in r0; if a is negative and b positive, -1 is
+ // placed in r0.
+#if defined(USE_THUMB_1)
+ bhs 1f
+ // Here if a and b have the same sign and absA < absB, the result is thus
+ // b < 0 ? 1 : -1. Same if a and b have the opposite sign (ignoring Nan).
+ movs r0, #1
+ lsrs r1, #31
+ bne LOCAL_LABEL(CHECK_NAN\@)
+ negs r0, r0
+ b LOCAL_LABEL(CHECK_NAN\@)
+1:
+#else
+ it lo
+ mvnlo r0, r1, asr #31
+#endif
+
+ // If a is greater in magnitude than b and both have the same sign, place
+ // the sign of b in r0. Thus, if both are negative and a < b, -1 is placed
+ // in r0, which is the desired result. Conversely, if both are positive
+ // and a > b, zero is placed in r0.
+#if defined(USE_THUMB_1)
+ bls 1f
+ // Here both have the same sign and absA > absB.
+ movs r0, #1
+ lsrs r1, #31
+ beq LOCAL_LABEL(CHECK_NAN\@)
+ negs r0, r0
+1:
+#else
+ it hi
+ movhi r0, r1, asr #31
+#endif
+
+ // If you've been keeping track, at this point r0 contains -1 if a < b and
+ // 0 if a >= b. All that remains to be done is to set it to 1 if a > b.
+ // If a == b, then the Z flag is set, so we can get the correct final value
+ // into r0 by simply or'ing with 1 if Z is clear.
+ // For Thumb-1, r0 contains -1 if a < b, 0 if a > b and 0 if a == b.
+#if !defined(USE_THUMB_1)
+ it ne
+ orrne r0, r0, #1
+#endif
+
+ // Finally, we need to deal with NaNs. If either argument is NaN, replace
+ // the value in r0 with 1.
+#if defined(USE_THUMB_1)
+LOCAL_LABEL(CHECK_NAN\@):
+ movs r6, #0xff
+ lsls r6, #24
+ cmp r2, r6
+ bhi 1f
+ cmp r3, r6
+1:
+ bls 2f
+ \handle_nan
+2:
+ pop {r6, pc}
+#else
+ cmp r2, #0xff000000
+ ite ls
+ cmpls r3, #0xff000000
+ \handle_nan
+ JMP(lr)
+#endif
+ .endm
+
+@ int __eqsf2(float a, float b)
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__eqsf2)
+
+ .macro __eqsf2_handle_nan
+#if defined(USE_THUMB_1)
+ movs r0, #1
+#else
+ movhi r0, #1
+#endif
+ .endm
+
+COMPARESF2_FUNCTION_BODY __eqsf2_handle_nan
+
+END_COMPILERRT_FUNCTION(__eqsf2)
+
+DEFINE_COMPILERRT_FUNCTION_ALIAS(__lesf2, __eqsf2)
+DEFINE_COMPILERRT_FUNCTION_ALIAS(__ltsf2, __eqsf2)
+DEFINE_COMPILERRT_FUNCTION_ALIAS(__nesf2, __eqsf2)
+
+#if defined(__ELF__)
+// Alias for libgcc compatibility
+DEFINE_COMPILERRT_FUNCTION_ALIAS(__cmpsf2, __lesf2)
+#endif
+
+@ int __gtsf2(float a, float b)
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__gtsf2)
+
+ .macro __gtsf2_handle_nan
+#if defined(USE_THUMB_1)
+ movs r0, #1
+ negs r0, r0
+#else
+ movhi r0, #-1
+#endif
+ .endm
+
+COMPARESF2_FUNCTION_BODY __gtsf2_handle_nan
+
+END_COMPILERRT_FUNCTION(__gtsf2)
+
+DEFINE_COMPILERRT_FUNCTION_ALIAS(__gesf2, __gtsf2)
+
+@ int __unordsf2(float a, float b)
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__unordsf2)
+
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vmov r0, s0
+ vmov r1, s1
+#endif
+ // Return 1 for NaN values, 0 otherwise.
+ lsls r2, r0, #1
+ lsls r3, r1, #1
+ movs r0, #0
+#if defined(USE_THUMB_1)
+ movs r1, #0xff
+ lsls r1, #24
+ cmp r2, r1
+ bhi 1f
+ cmp r3, r1
+1:
+ bls 2f
+ movs r0, #1
+2:
+#else
+ cmp r2, #0xff000000
+ ite ls
+ cmpls r3, #0xff000000
+ movhi r0, #1
+#endif
+ JMP(lr)
+END_COMPILERRT_FUNCTION(__unordsf2)
+
+#if defined(COMPILER_RT_ARMHF_TARGET)
+DEFINE_COMPILERRT_FUNCTION(__aeabi_fcmpun)
+ vmov s0, r0
+ vmov s1, r1
+ b SYMBOL_NAME(__unordsf2)
+END_COMPILERRT_FUNCTION(__aeabi_fcmpun)
+#else
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_fcmpun, __unordsf2)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/comparesf2.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divdf3vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divdf3vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divdf3vfp.S (revision 351984)
@@ -0,0 +1,32 @@
+//===-- divdf3vfp.S - Implement divdf3vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __divdf3vfp(double a, double b);
+//
+// Divides two double precision floating point numbers using the Darwin
+// calling convention where double arguments are passsed in GPR pairs
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__divdf3vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vdiv.f64 d0, d0, d1
+#else
+ vmov d6, r0, r1 // move first param from r0/r1 pair into d6
+ vmov d7, r2, r3 // move second param from r2/r3 pair into d7
+ vdiv.f64 d5, d6, d7
+ vmov r0, r1, d5 // move result back to r0/r1 pair
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__divdf3vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divdf3vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divmodsi4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divmodsi4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divmodsi4.S (revision 351984)
@@ -0,0 +1,70 @@
+//===-- divmodsi4.S - 32-bit signed integer divide and modulus ------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __divmodsi4 (32-bit signed integer divide and
+// modulus) function for the ARM architecture. A naive digit-by-digit
+// computation is employed for simplicity.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+#define ESTABLISH_FRAME \
+ push {r4-r7, lr} ;\
+ add r7, sp, #12
+#define CLEAR_FRAME_AND_RETURN \
+ pop {r4-r7, pc}
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+@ int __divmodsi4(int divident, int divisor, int *remainder)
+@ Calculate the quotient and remainder of the (signed) division. The return
+@ value is the quotient, the remainder is placed in the variable.
+
+ .p2align 3
+DEFINE_COMPILERRT_FUNCTION(__divmodsi4)
+#if __ARM_ARCH_EXT_IDIV__
+ tst r1, r1
+ beq LOCAL_LABEL(divzero)
+ mov r3, r0
+ sdiv r0, r3, r1
+ mls r1, r0, r1, r3
+ str r1, [r2]
+ bx lr
+LOCAL_LABEL(divzero):
+ mov r0, #0
+ bx lr
+#else
+ ESTABLISH_FRAME
+// Set aside the sign of the quotient and modulus, and the address for the
+// modulus.
+ eor r4, r0, r1
+ mov r5, r0
+ mov r6, r2
+// Take the absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
+ eor ip, r0, r0, asr #31
+ eor lr, r1, r1, asr #31
+ sub r0, ip, r0, asr #31
+ sub r1, lr, r1, asr #31
+// Unsigned divmod:
+ bl SYMBOL_NAME(__udivmodsi4)
+// Apply the sign of quotient and modulus
+ ldr r1, [r6]
+ eor r0, r0, r4, asr #31
+ eor r1, r1, r5, asr #31
+ sub r0, r0, r4, asr #31
+ sub r1, r1, r5, asr #31
+ str r1, [r6]
+ CLEAR_FRAME_AND_RETURN
+#endif
+END_COMPILERRT_FUNCTION(__divmodsi4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divmodsi4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divsf3vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divsf3vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divsf3vfp.S (revision 351984)
@@ -0,0 +1,32 @@
+//===-- divsf3vfp.S - Implement divsf3vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern float __divsf3vfp(float a, float b);
+//
+// Divides two single precision floating point numbers using the Darwin
+// calling convention where single arguments are passsed like 32-bit ints.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__divsf3vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vdiv.f32 s0, s0, s1
+#else
+ vmov s14, r0 // move first param from r0 into float register
+ vmov s15, r1 // move second param from r1 into float register
+ vdiv.f32 s13, s14, s15
+ vmov r0, s13 // move result back to r0
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__divsf3vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divsf3vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divsi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divsi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divsi3.S (revision 351984)
@@ -0,0 +1,81 @@
+//===-- divsi3.S - 32-bit signed integer divide ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __divsi3 (32-bit signed integer divide) function
+// for the ARM architecture as a wrapper around the unsigned routine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+#define ESTABLISH_FRAME \
+ push {r4, r7, lr} ;\
+ add r7, sp, #4
+#define CLEAR_FRAME_AND_RETURN \
+ pop {r4, r7, pc}
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+ .p2align 3
+// Ok, APCS and AAPCS agree on 32 bit args, so it's safe to use the same routine.
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_idiv, __divsi3)
+
+@ int __divsi3(int divident, int divisor)
+@ Calculate and return the quotient of the (signed) division.
+
+DEFINE_COMPILERRT_FUNCTION(__divsi3)
+#if __ARM_ARCH_EXT_IDIV__
+ tst r1,r1
+ beq LOCAL_LABEL(divzero)
+ sdiv r0, r0, r1
+ bx lr
+LOCAL_LABEL(divzero):
+ mov r0,#0
+ bx lr
+#else
+ESTABLISH_FRAME
+// Set aside the sign of the quotient.
+# if defined(USE_THUMB_1)
+ movs r4, r0
+ eors r4, r1
+# else
+ eor r4, r0, r1
+# endif
+// Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
+# if defined(USE_THUMB_1)
+ asrs r2, r0, #31
+ asrs r3, r1, #31
+ eors r0, r2
+ eors r1, r3
+ subs r0, r0, r2
+ subs r1, r1, r3
+# else
+ eor r2, r0, r0, asr #31
+ eor r3, r1, r1, asr #31
+ sub r0, r2, r0, asr #31
+ sub r1, r3, r1, asr #31
+# endif
+// abs(a) / abs(b)
+ bl SYMBOL_NAME(__udivsi3)
+// Apply sign of quotient to result and return.
+# if defined(USE_THUMB_1)
+ asrs r4, #31
+ eors r0, r4
+ subs r0, r0, r4
+# else
+ eor r0, r0, r4, asr #31
+ sub r0, r0, r4, asr #31
+# endif
+ CLEAR_FRAME_AND_RETURN
+#endif
+END_COMPILERRT_FUNCTION(__divsi3)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/divsi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/eqdf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/eqdf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/eqdf2vfp.S (revision 351984)
@@ -0,0 +1,35 @@
+//===-- eqdf2vfp.S - Implement eqdf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// extern int __eqdf2vfp(double a, double b);
+//
+// Returns one iff a == b and neither is NaN.
+// Uses Darwin calling convention where double precision arguments are passsed
+// like in GPR pairs.
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__eqdf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f64 d0, d1
+#else
+ vmov d6, r0, r1 // load r0/r1 pair in double register
+ vmov d7, r2, r3 // load r2/r3 pair in double register
+ vcmp.f64 d6, d7
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(eq)
+ moveq r0, #1 // set result register to 1 if equal
+ movne r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__eqdf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/eqdf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/eqsf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/eqsf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/eqsf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- eqsf2vfp.S - Implement eqsf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __eqsf2vfp(float a, float b);
+//
+// Returns one iff a == b and neither is NaN.
+// Uses Darwin calling convention where single precision arguments are passsed
+// like 32-bit ints
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__eqsf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f32 s0, s1
+#else
+ vmov s14, r0 // move from GPR 0 to float register
+ vmov s15, r1 // move from GPR 1 to float register
+ vcmp.f32 s14, s15
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(eq)
+ moveq r0, #1 // set result register to 1 if equal
+ movne r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__eqsf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/eqsf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/extendsfdf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/extendsfdf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/extendsfdf2vfp.S (revision 351984)
@@ -0,0 +1,32 @@
+//===-- extendsfdf2vfp.S - Implement extendsfdf2vfp -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __extendsfdf2vfp(float a);
+//
+// Converts single precision float to double precision result.
+// Uses Darwin calling convention where a single precision parameter is
+// passed in a GPR and a double precision result is returned in R0/R1 pair.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__extendsfdf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcvt.f64.f32 d0, s0
+#else
+ vmov s15, r0 // load float register from R0
+ vcvt.f64.f32 d7, s15 // convert single to double
+ vmov r0, r1, d7 // return result in r0/r1 pair
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__extendsfdf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/extendsfdf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixdfsivfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixdfsivfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixdfsivfp.S (revision 351984)
@@ -0,0 +1,33 @@
+//===-- fixdfsivfp.S - Implement fixdfsivfp -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __fixdfsivfp(double a);
+//
+// Converts double precision float to a 32-bit int rounding towards zero.
+// Uses Darwin calling convention where a double precision parameter is
+// passed in GPR register pair.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__fixdfsivfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcvt.s32.f64 s0, d0
+ vmov r0, s0
+#else
+ vmov d7, r0, r1 // load double register from R0/R1
+ vcvt.s32.f64 s15, d7 // convert double to 32-bit int into s15
+ vmov r0, s15 // move s15 to result register
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__fixdfsivfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixdfsivfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixsfsivfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixsfsivfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixsfsivfp.S (revision 351984)
@@ -0,0 +1,33 @@
+//===-- fixsfsivfp.S - Implement fixsfsivfp -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __fixsfsivfp(float a);
+//
+// Converts single precision float to a 32-bit int rounding towards zero.
+// Uses Darwin calling convention where a single precision parameter is
+// passed in a GPR..
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__fixsfsivfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcvt.s32.f32 s0, s0
+ vmov r0, s0
+#else
+ vmov s15, r0 // load float register from R0
+ vcvt.s32.f32 s15, s15 // convert single to 32-bit int into s15
+ vmov r0, s15 // move s15 to result register
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__fixsfsivfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixsfsivfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixunsdfsivfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixunsdfsivfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixunsdfsivfp.S (revision 351984)
@@ -0,0 +1,34 @@
+//===-- fixunsdfsivfp.S - Implement fixunsdfsivfp -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern unsigned int __fixunsdfsivfp(double a);
+//
+// Converts double precision float to a 32-bit unsigned int rounding towards
+// zero. All negative values become zero.
+// Uses Darwin calling convention where a double precision parameter is
+// passed in GPR register pair.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__fixunsdfsivfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcvt.u32.f64 s0, d0
+ vmov r0, s0
+#else
+ vmov d7, r0, r1 // load double register from R0/R1
+ vcvt.u32.f64 s15, d7 // convert double to 32-bit int into s15
+ vmov r0, s15 // move s15 to result register
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__fixunsdfsivfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixunsdfsivfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixunssfsivfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixunssfsivfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixunssfsivfp.S (revision 351984)
@@ -0,0 +1,34 @@
+//===-- fixunssfsivfp.S - Implement fixunssfsivfp -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern unsigned int __fixunssfsivfp(float a);
+//
+// Converts single precision float to a 32-bit unsigned int rounding towards
+// zero. All negative values become zero.
+// Uses Darwin calling convention where a single precision parameter is
+// passed in a GPR..
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__fixunssfsivfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcvt.u32.f32 s0, s0
+ vmov r0, s0
+#else
+ vmov s15, r0 // load float register from R0
+ vcvt.u32.f32 s15, s15 // convert single to 32-bit unsigned into s15
+ vmov r0, s15 // move s15 to result register
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__fixunssfsivfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/fixunssfsivfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatsidfvfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatsidfvfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatsidfvfp.S (revision 351984)
@@ -0,0 +1,33 @@
+//===-- floatsidfvfp.S - Implement floatsidfvfp ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __floatsidfvfp(int a);
+//
+// Converts a 32-bit int to a double precision float.
+// Uses Darwin calling convention where a double precision result is
+// return in GPR register pair.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__floatsidfvfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vmov s0, r0
+ vcvt.f64.s32 d0, s0
+#else
+ vmov s15, r0 // move int to float register s15
+ vcvt.f64.s32 d7, s15 // convert 32-bit int in s15 to double in d7
+ vmov r0, r1, d7 // move d7 to result register pair r0/r1
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__floatsidfvfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatsidfvfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatsisfvfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatsisfvfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatsisfvfp.S (revision 351984)
@@ -0,0 +1,33 @@
+//===-- floatsisfvfp.S - Implement floatsisfvfp ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern float __floatsisfvfp(int a);
+//
+// Converts single precision float to a 32-bit int rounding towards zero.
+// Uses Darwin calling convention where a single precision result is
+// return in a GPR..
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__floatsisfvfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vmov s0, r0
+ vcvt.f32.s32 s0, s0
+#else
+ vmov s15, r0 // move int to float register s15
+ vcvt.f32.s32 s15, s15 // convert 32-bit int in s15 to float in s15
+ vmov r0, s15 // move s15 to result register
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__floatsisfvfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatsisfvfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatunssidfvfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatunssidfvfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatunssidfvfp.S (revision 351984)
@@ -0,0 +1,33 @@
+//===-- floatunssidfvfp.S - Implement floatunssidfvfp ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __floatunssidfvfp(unsigned int a);
+//
+// Converts a 32-bit int to a double precision float.
+// Uses Darwin calling convention where a double precision result is
+// return in GPR register pair.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__floatunssidfvfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vmov s0, r0
+ vcvt.f64.u32 d0, s0
+#else
+ vmov s15, r0 // move int to float register s15
+ vcvt.f64.u32 d7, s15 // convert 32-bit int in s15 to double in d7
+ vmov r0, r1, d7 // move d7 to result register pair r0/r1
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__floatunssidfvfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatunssidfvfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatunssisfvfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatunssisfvfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatunssisfvfp.S (revision 351984)
@@ -0,0 +1,33 @@
+//===-- floatunssisfvfp.S - Implement floatunssisfvfp ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern float __floatunssisfvfp(unsigned int a);
+//
+// Converts single precision float to a 32-bit int rounding towards zero.
+// Uses Darwin calling convention where a single precision result is
+// return in a GPR..
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__floatunssisfvfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vmov s0, r0
+ vcvt.f32.u32 s0, s0
+#else
+ vmov s15, r0 // move int to float register s15
+ vcvt.f32.u32 s15, s15 // convert 32-bit int in s15 to float in s15
+ vmov r0, s15 // move s15 to result register
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__floatunssisfvfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/floatunssisfvfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gedf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gedf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gedf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- gedf2vfp.S - Implement gedf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __gedf2vfp(double a, double b);
+//
+// Returns one iff a >= b and neither is NaN.
+// Uses Darwin calling convention where double precision arguments are passsed
+// like in GPR pairs.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__gedf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f64 d0, d1
+#else
+ vmov d6, r0, r1 // load r0/r1 pair in double register
+ vmov d7, r2, r3 // load r2/r3 pair in double register
+ vcmp.f64 d6, d7
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(ge)
+ movge r0, #1 // set result register to 1 if greater than or equal
+ movlt r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__gedf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gedf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gesf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gesf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gesf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- gesf2vfp.S - Implement gesf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __gesf2vfp(float a, float b);
+//
+// Returns one iff a >= b and neither is NaN.
+// Uses Darwin calling convention where single precision arguments are passsed
+// like 32-bit ints
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__gesf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f32 s0, s1
+#else
+ vmov s14, r0 // move from GPR 0 to float register
+ vmov s15, r1 // move from GPR 1 to float register
+ vcmp.f32 s14, s15
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(ge)
+ movge r0, #1 // set result register to 1 if greater than or equal
+ movlt r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__gesf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gesf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gtdf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gtdf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gtdf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- gtdf2vfp.S - Implement gtdf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __gtdf2vfp(double a, double b);
+//
+// Returns one iff a > b and neither is NaN.
+// Uses Darwin calling convention where double precision arguments are passsed
+// like in GPR pairs.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__gtdf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f64 d0, d1
+#else
+ vmov d6, r0, r1 // load r0/r1 pair in double register
+ vmov d7, r2, r3 // load r2/r3 pair in double register
+ vcmp.f64 d6, d7
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(gt)
+ movgt r0, #1 // set result register to 1 if equal
+ movle r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__gtdf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gtdf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gtsf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gtsf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gtsf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- gtsf2vfp.S - Implement gtsf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __gtsf2vfp(float a, float b);
+//
+// Returns one iff a > b and neither is NaN.
+// Uses Darwin calling convention where single precision arguments are passsed
+// like 32-bit ints
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__gtsf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f32 s0, s1
+#else
+ vmov s14, r0 // move from GPR 0 to float register
+ vmov s15, r1 // move from GPR 1 to float register
+ vcmp.f32 s14, s15
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(gt)
+ movgt r0, #1 // set result register to 1 if equal
+ movle r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__gtsf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/gtsf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ledf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ledf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ledf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- ledf2vfp.S - Implement ledf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __ledf2vfp(double a, double b);
+//
+// Returns one iff a <= b and neither is NaN.
+// Uses Darwin calling convention where double precision arguments are passsed
+// like in GPR pairs.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__ledf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f64 d0, d1
+#else
+ vmov d6, r0, r1 // load r0/r1 pair in double register
+ vmov d7, r2, r3 // load r2/r3 pair in double register
+ vcmp.f64 d6, d7
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(ls)
+ movls r0, #1 // set result register to 1 if equal
+ movhi r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__ledf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ledf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/lesf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/lesf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/lesf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- lesf2vfp.S - Implement lesf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __lesf2vfp(float a, float b);
+//
+// Returns one iff a <= b and neither is NaN.
+// Uses Darwin calling convention where single precision arguments are passsed
+// like 32-bit ints
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__lesf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f32 s0, s1
+#else
+ vmov s14, r0 // move from GPR 0 to float register
+ vmov s15, r1 // move from GPR 1 to float register
+ vcmp.f32 s14, s15
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(ls)
+ movls r0, #1 // set result register to 1 if equal
+ movhi r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__lesf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/lesf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ltdf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ltdf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ltdf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- ltdf2vfp.S - Implement ltdf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __ltdf2vfp(double a, double b);
+//
+// Returns one iff a < b and neither is NaN.
+// Uses Darwin calling convention where double precision arguments are passsed
+// like in GPR pairs.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__ltdf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f64 d0, d1
+#else
+ vmov d6, r0, r1 // load r0/r1 pair in double register
+ vmov d7, r2, r3 // load r2/r3 pair in double register
+ vcmp.f64 d6, d7
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(mi)
+ movmi r0, #1 // set result register to 1 if equal
+ movpl r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__ltdf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ltdf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ltsf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ltsf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ltsf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- ltsf2vfp.S - Implement ltsf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __ltsf2vfp(float a, float b);
+//
+// Returns one iff a < b and neither is NaN.
+// Uses Darwin calling convention where single precision arguments are passsed
+// like 32-bit ints
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__ltsf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f32 s0, s1
+#else
+ vmov s14, r0 // move from GPR 0 to float register
+ vmov s15, r1 // move from GPR 1 to float register
+ vcmp.f32 s14, s15
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(mi)
+ movmi r0, #1 // set result register to 1 if equal
+ movpl r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__ltsf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/ltsf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/modsi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/modsi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/modsi3.S (revision 351984)
@@ -0,0 +1,59 @@
+//===-- modsi3.S - 32-bit signed integer modulus --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __modsi3 (32-bit signed integer modulus) function
+// for the ARM architecture as a wrapper around the unsigned routine.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+#define ESTABLISH_FRAME \
+ push {r4, r7, lr} ;\
+ add r7, sp, #4
+#define CLEAR_FRAME_AND_RETURN \
+ pop {r4, r7, pc}
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+@ int __modsi3(int divident, int divisor)
+@ Calculate and return the remainder of the (signed) division.
+
+ .p2align 3
+DEFINE_COMPILERRT_FUNCTION(__modsi3)
+#if __ARM_ARCH_EXT_IDIV__
+ tst r1, r1
+ beq LOCAL_LABEL(divzero)
+ sdiv r2, r0, r1
+ mls r0, r2, r1, r0
+ bx lr
+LOCAL_LABEL(divzero):
+ mov r0, #0
+ bx lr
+#else
+ ESTABLISH_FRAME
+ // Set aside the sign of the dividend.
+ mov r4, r0
+ // Take absolute value of a and b via abs(x) = (x^(x >> 31)) - (x >> 31).
+ eor r2, r0, r0, asr #31
+ eor r3, r1, r1, asr #31
+ sub r0, r2, r0, asr #31
+ sub r1, r3, r1, asr #31
+ // abs(a) % abs(b)
+ bl SYMBOL_NAME(__umodsi3)
+ // Apply sign of dividend to result and return.
+ eor r0, r0, r4, asr #31
+ sub r0, r0, r4, asr #31
+ CLEAR_FRAME_AND_RETURN
+#endif
+END_COMPILERRT_FUNCTION(__modsi3)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/modsi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/muldf3vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/muldf3vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/muldf3vfp.S (revision 351984)
@@ -0,0 +1,32 @@
+//===-- muldf3vfp.S - Implement muldf3vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __muldf3vfp(double a, double b);
+//
+// Multiplies two double precision floating point numbers using the Darwin
+// calling convention where double arguments are passsed in GPR pairs
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__muldf3vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vmul.f64 d0, d0, d1
+#else
+ vmov d6, r0, r1 // move first param from r0/r1 pair into d6
+ vmov d7, r2, r3 // move second param from r2/r3 pair into d7
+ vmul.f64 d6, d6, d7
+ vmov r0, r1, d6 // move result back to r0/r1 pair
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__muldf3vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/muldf3vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/mulsf3vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/mulsf3vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/mulsf3vfp.S (revision 351984)
@@ -0,0 +1,32 @@
+//===-- mulsf3vfp.S - Implement mulsf3vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern float __mulsf3vfp(float a, float b);
+//
+// Multiplies two single precision floating point numbers using the Darwin
+// calling convention where single arguments are passsed like 32-bit ints.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__mulsf3vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vmul.f32 s0, s0, s1
+#else
+ vmov s14, r0 // move first param from r0 into float register
+ vmov s15, r1 // move second param from r1 into float register
+ vmul.f32 s13, s14, s15
+#endif
+ vmov r0, s13 // move result back to r0
+ bx lr
+END_COMPILERRT_FUNCTION(__mulsf3vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/mulsf3vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/nedf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/nedf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/nedf2vfp.S (revision 351984)
@@ -0,0 +1,35 @@
+//===-- nedf2vfp.S - Implement nedf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// extern double __nedf2vfp(double a, double b);
+//
+// Returns zero if a and b are unequal and neither is NaN.
+// Uses Darwin calling convention where double precision arguments are passsed
+// like in GPR pairs.
+
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__nedf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f64 d0, d1
+#else
+ vmov d6, r0, r1 // load r0/r1 pair in double register
+ vmov d7, r2, r3 // load r2/r3 pair in double register
+ vcmp.f64 d6, d7
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(ne)
+ movne r0, #1 // set result register to 0 if unequal
+ moveq r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__nedf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/nedf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/negdf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/negdf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/negdf2vfp.S (revision 351984)
@@ -0,0 +1,29 @@
+//===-- negdf2vfp.S - Implement negdf2vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __negdf2vfp(double a, double b);
+//
+// Returns the negation a double precision floating point numbers using the
+// Darwin calling convention where double arguments are passsed in GPR pairs.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__negdf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vneg.f64 d0, d0
+#else
+ eor r1, r1, #-2147483648 // flip sign bit on double in r0/r1 pair
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__negdf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/negdf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/negsf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/negsf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/negsf2vfp.S (revision 351984)
@@ -0,0 +1,29 @@
+//===-- negsf2vfp.S - Implement negsf2vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern float __negsf2vfp(float a);
+//
+// Returns the negation of a single precision floating point numbers using the
+// Darwin calling convention where single arguments are passsed like 32-bit ints
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__negsf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vneg.f32 s0, s0
+#else
+ eor r0, r0, #-2147483648 // flip sign bit on float in r0
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__negsf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/negsf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/nesf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/nesf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/nesf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- nesf2vfp.S - Implement nesf2vfp -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __nesf2vfp(float a, float b);
+//
+// Returns one iff a != b and neither is NaN.
+// Uses Darwin calling convention where single precision arguments are passsed
+// like 32-bit ints
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__nesf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f32 s0, s1
+#else
+ vmov s14, r0 // move from GPR 0 to float register
+ vmov s15, r1 // move from GPR 1 to float register
+ vcmp.f32 s14, s15
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(ne)
+ movne r0, #1 // set result register to 1 if unequal
+ moveq r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__nesf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/nesf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/restore_vfp_d8_d15_regs.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/restore_vfp_d8_d15_regs.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/restore_vfp_d8_d15_regs.S (revision 351984)
@@ -0,0 +1,34 @@
+//===-- save_restore_regs.S - Implement save/restore* ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// When compiling C++ functions that need to handle thrown exceptions the
+// compiler is required to save all registers and call __Unwind_SjLj_Register
+// in the function prolog. But when compiling for thumb1, there are
+// no instructions to access the floating point registers, so the
+// compiler needs to add a call to the helper function _save_vfp_d8_d15_regs
+// written in ARM to save the float registers. In the epilog, the compiler
+// must also add a call to __restore_vfp_d8_d15_regs to restore those registers.
+//
+
+ .text
+ .syntax unified
+
+//
+// Restore registers d8-d15 from stack
+//
+ .p2align 2
+DEFINE_COMPILERRT_PRIVATE_FUNCTION(__restore_vfp_d8_d15_regs)
+ vldmia sp!, {d8-d15} // pop registers d8-d15 off stack
+ bx lr // return to prolog
+END_COMPILERRT_FUNCTION(__restore_vfp_d8_d15_regs)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/restore_vfp_d8_d15_regs.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/save_vfp_d8_d15_regs.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/save_vfp_d8_d15_regs.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/save_vfp_d8_d15_regs.S (revision 351984)
@@ -0,0 +1,34 @@
+//===-- save_restore_regs.S - Implement save/restore* ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// When compiling C++ functions that need to handle thrown exceptions the
+// compiler is required to save all registers and call __Unwind_SjLj_Register
+// in the function prolog. But when compiling for thumb1, there are
+// no instructions to access the floating point registers, so the
+// compiler needs to add a call to the helper function _save_vfp_d8_d15_regs
+// written in ARM to save the float registers. In the epilog, the compiler
+// must also add a call to __restore_vfp_d8_d15_regs to restore those registers.
+//
+
+ .text
+ .syntax unified
+
+//
+// Save registers d8-d15 onto stack
+//
+ .p2align 2
+DEFINE_COMPILERRT_PRIVATE_FUNCTION(__save_vfp_d8_d15_regs)
+ vstmdb sp!, {d8-d15} // push registers d8-d15 onto stack
+ bx lr // return to prolog
+END_COMPILERRT_FUNCTION(__save_vfp_d8_d15_regs)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/save_vfp_d8_d15_regs.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/softfloat-alias.list
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/softfloat-alias.list (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/softfloat-alias.list (revision 351984)
@@ -0,0 +1,21 @@
+#
+# These are soft float functions which can be
+# aliased to the *vfp functions on arm processors
+# that support floating point instructions.
+#
+___adddf3vfp ___adddf3
+___addsf3vfp ___addsf3
+___divdf3vfp ___divdf3
+___divsf3vfp ___divsf3
+___extendsfdf2vfp ___extendsfdf2
+___fixdfsivfp ___fixdfsi
+___fixsfsivfp ___fixsfsi
+___floatsidfvfp ___floatsidf
+___floatsisfvfp ___floatsisf
+___muldf3vfp ___muldf3
+___mulsf3vfp ___mulsf3
+___subdf3vfp ___subdf3
+___subsf3vfp ___subsf3
+___truncdfsf2vfp ___truncdfsf2
+___floatunssidfvfp ___floatunsidf
+___floatunssisfvfp ___floatunsisf
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/subdf3vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/subdf3vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/subdf3vfp.S (revision 351984)
@@ -0,0 +1,32 @@
+//===-- subdf3vfp.S - Implement subdf3vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern double __subdf3vfp(double a, double b);
+//
+// Returns difference between two double precision floating point numbers using
+// the Darwin calling convention where double arguments are passsed in GPR pairs
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__subdf3vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vsub.f64 d0, d0, d1
+#else
+ vmov d6, r0, r1 // move first param from r0/r1 pair into d6
+ vmov d7, r2, r3 // move second param from r2/r3 pair into d7
+ vsub.f64 d6, d6, d7
+ vmov r0, r1, d6 // move result back to r0/r1 pair
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__subdf3vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/subdf3vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/subsf3vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/subsf3vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/subsf3vfp.S (revision 351984)
@@ -0,0 +1,33 @@
+//===-- subsf3vfp.S - Implement subsf3vfp ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern float __subsf3vfp(float a, float b);
+//
+// Returns the difference between two single precision floating point numbers
+// using the Darwin calling convention where single arguments are passsed
+// like 32-bit ints.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__subsf3vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vsub.f32 s0, s0, s1
+#else
+ vmov s14, r0 // move first param from r0 into float register
+ vmov s15, r1 // move second param from r1 into float register
+ vsub.f32 s14, s14, s15
+ vmov r0, s14 // move result back to r0
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__subsf3vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/subsf3vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch16.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch16.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch16.S (revision 351984)
@@ -0,0 +1,45 @@
+//===-- switch.S - Implement switch* --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// When compiling switch statements in thumb mode, the compiler
+// can use these __switch* helper functions The compiler emits a blx to
+// the __switch* function followed by a table of displacements for each
+// case statement. On entry, R0 is the index into the table. The __switch*
+// function uses the return address in lr to find the start of the table.
+// The first entry in the table is the count of the entries in the table.
+// It then uses R0 to index into the table and get the displacement of the
+// address to jump to. If R0 is greater than the size of the table, it jumps
+// to the last entry in the table. Each displacement in the table is actually
+// the distance from lr to the label, thus making the tables PIC.
+
+
+ .text
+ .syntax unified
+
+//
+// The table contains signed 2-byte sized elements which are 1/2 the distance
+// from lr to the target label.
+//
+ .p2align 2
+DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switch16)
+ ldrh ip, [lr, #-1] // get first 16-bit word in table
+ cmp r0, ip // compare with index
+ add r0, lr, r0, lsl #1 // compute address of element in table
+ add ip, lr, ip, lsl #1 // compute address of last element in table
+ ite lo
+ ldrshlo r0, [r0, #1] // load 16-bit element if r0 is in range
+ ldrshhs r0, [ip, #1] // load 16-bit element if r0 out of range
+ add ip, lr, r0, lsl #1 // compute label = lr + element*2
+ bx ip // jump to computed label
+END_COMPILERRT_FUNCTION(__switch16)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch16.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch32.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch32.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch32.S (revision 351984)
@@ -0,0 +1,45 @@
+//===-- switch.S - Implement switch* --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// When compiling switch statements in thumb mode, the compiler
+// can use these __switch* helper functions The compiler emits a blx to
+// the __switch* function followed by a table of displacements for each
+// case statement. On entry, R0 is the index into the table. The __switch*
+// function uses the return address in lr to find the start of the table.
+// The first entry in the table is the count of the entries in the table.
+// It then uses R0 to index into the table and get the displacement of the
+// address to jump to. If R0 is greater than the size of the table, it jumps
+// to the last entry in the table. Each displacement in the table is actually
+// the distance from lr to the label, thus making the tables PIC.
+
+
+ .text
+ .syntax unified
+
+//
+// The table contains signed 4-byte sized elements which are the distance
+// from lr to the target label.
+//
+ .p2align 2
+DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switch32)
+ ldr ip, [lr, #-1] // get first 32-bit word in table
+ cmp r0, ip // compare with index
+ add r0, lr, r0, lsl #2 // compute address of element in table
+ add ip, lr, ip, lsl #2 // compute address of last element in table
+ ite lo
+ ldrlo r0, [r0, #3] // load 32-bit element if r0 is in range
+ ldrhs r0, [ip, #3] // load 32-bit element if r0 out of range
+ add ip, lr, r0 // compute label = lr + element
+ bx ip // jump to computed label
+END_COMPILERRT_FUNCTION(__switch32)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch32.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch8.S (revision 351984)
@@ -0,0 +1,43 @@
+//===-- switch.S - Implement switch* --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// When compiling switch statements in thumb mode, the compiler
+// can use these __switch* helper functions The compiler emits a blx to
+// the __switch* function followed by a table of displacements for each
+// case statement. On entry, R0 is the index into the table. The __switch*
+// function uses the return address in lr to find the start of the table.
+// The first entry in the table is the count of the entries in the table.
+// It then uses R0 to index into the table and get the displacement of the
+// address to jump to. If R0 is greater than the size of the table, it jumps
+// to the last entry in the table. Each displacement in the table is actually
+// the distance from lr to the label, thus making the tables PIC.
+
+
+ .text
+ .syntax unified
+
+//
+// The table contains signed byte sized elements which are 1/2 the distance
+// from lr to the target label.
+//
+ .p2align 2
+DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switch8)
+ ldrb ip, [lr, #-1] // get first byte in table
+ cmp r0, ip // signed compare with index
+ ite lo
+ ldrsblo r0, [lr, r0] // get indexed byte out of table
+ ldrsbhs r0, [lr, ip] // if out of range, use last entry in table
+ add ip, lr, r0, lsl #1 // compute label = lr + element*2
+ bx ip // jump to computed label
+END_COMPILERRT_FUNCTION(__switch8)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switch8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switchu8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switchu8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switchu8.S (revision 351984)
@@ -0,0 +1,43 @@
+//===-- switch.S - Implement switch* --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// When compiling switch statements in thumb mode, the compiler
+// can use these __switch* helper functions The compiler emits a blx to
+// the __switch* function followed by a table of displacements for each
+// case statement. On entry, R0 is the index into the table. The __switch*
+// function uses the return address in lr to find the start of the table.
+// The first entry in the table is the count of the entries in the table.
+// It then uses R0 to index into the table and get the displacement of the
+// address to jump to. If R0 is greater than the size of the table, it jumps
+// to the last entry in the table. Each displacement in the table is actually
+// the distance from lr to the label, thus making the tables PIC.
+
+
+ .text
+ .syntax unified
+
+//
+// The table contains unsigned byte sized elements which are 1/2 the distance
+// from lr to the target label.
+//
+ .p2align 2
+DEFINE_COMPILERRT_PRIVATE_FUNCTION(__switchu8)
+ ldrb ip, [lr, #-1] // get first byte in table
+ cmp r0, ip // compare with index
+ ite lo
+ ldrblo r0, [lr, r0] // get indexed byte out of table
+ ldrbhs r0, [lr, ip] // if out of range, use last entry in table
+ add ip, lr, r0, lsl #1 // compute label = lr + element*2
+ bx ip // jump to computed label
+END_COMPILERRT_FUNCTION(__switchu8)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/switchu8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync-ops.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync-ops.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync-ops.h (revision 351984)
@@ -0,0 +1,61 @@
+//===-- sync-ops.h - --===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements outline macros for the __sync_fetch_and_*
+// operations. Different instantiations will generate appropriate assembly for
+// ARM and Thumb-2 versions of the functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+#define SYNC_OP_4(op) \
+ .p2align 2; \
+ .thumb; \
+ .syntax unified; \
+ DEFINE_COMPILERRT_THUMB_FUNCTION(__sync_fetch_and_##op) \
+ dmb; \
+ mov r12, r0; \
+ LOCAL_LABEL(tryatomic_##op) : ldrex r0, [r12]; \
+ op(r2, r0, r1); \
+ strex r3, r2, [r12]; \
+ cmp r3, #0; \
+ bne LOCAL_LABEL(tryatomic_##op); \
+ dmb; \
+ bx lr
+
+#define SYNC_OP_8(op) \
+ .p2align 2; \
+ .thumb; \
+ .syntax unified; \
+ DEFINE_COMPILERRT_THUMB_FUNCTION(__sync_fetch_and_##op) \
+ push {r4, r5, r6, lr}; \
+ dmb; \
+ mov r12, r0; \
+ LOCAL_LABEL(tryatomic_##op) : ldrexd r0, r1, [r12]; \
+ op(r4, r5, r0, r1, r2, r3); \
+ strexd r6, r4, r5, [r12]; \
+ cmp r6, #0; \
+ bne LOCAL_LABEL(tryatomic_##op); \
+ dmb; \
+ pop { r4, r5, r6, pc }
+
+#define MINMAX_4(rD, rN, rM, cmp_kind) \
+ cmp rN, rM; \
+ mov rD, rM; \
+ it cmp_kind; \
+ mov##cmp_kind rD, rN
+
+#define MINMAX_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI, cmp_kind) \
+ cmp rN_LO, rM_LO; \
+ sbcs rN_HI, rM_HI; \
+ mov rD_LO, rM_LO; \
+ mov rD_HI, rM_HI; \
+ itt cmp_kind; \
+ mov##cmp_kind rD_LO, rN_LO; \
+ mov##cmp_kind rD_HI, rN_HI
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync-ops.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_add_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_add_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_add_4.S (revision 351984)
@@ -0,0 +1,22 @@
+//===-- sync_fetch_and_add_4.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_add_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+// "adds" is 2 bytes shorter than "add".
+#define add_4(rD, rN, rM) add rD, rN, rM
+
+SYNC_OP_4(add_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_add_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_add_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_add_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_add_8.S (revision 351984)
@@ -0,0 +1,25 @@
+//===-- sync_fetch_and_add_8.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_add_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define add_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) \
+ adds rD_LO, rN_LO, rM_LO ; \
+ adc rD_HI, rN_HI, rM_HI
+
+SYNC_OP_8(add_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_add_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_and_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_and_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_and_4.S (revision 351984)
@@ -0,0 +1,21 @@
+//===-- sync_fetch_and_and_4.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_and_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#define and_4(rD, rN, rM) and rD, rN, rM
+
+SYNC_OP_4(and_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_and_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_and_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_and_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_and_8.S (revision 351984)
@@ -0,0 +1,25 @@
+//===-- sync_fetch_and_and_8.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_and_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define and_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) \
+ and rD_LO, rN_LO, rM_LO ; \
+ and rD_HI, rN_HI, rM_HI
+
+SYNC_OP_8(and_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_and_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_max_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_max_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_max_4.S (revision 351984)
@@ -0,0 +1,21 @@
+//===-- sync_fetch_and_max_4.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_max_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#define max_4(rD, rN, rM) MINMAX_4(rD, rN, rM, gt)
+
+SYNC_OP_4(max_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_max_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_max_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_max_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_max_8.S (revision 351984)
@@ -0,0 +1,23 @@
+//===-- sync_fetch_and_max_8.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_max_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define max_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) MINMAX_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI, gt)
+
+SYNC_OP_8(max_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_max_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_min_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_min_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_min_4.S (revision 351984)
@@ -0,0 +1,21 @@
+//===-- sync_fetch_and_min_4.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_min_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#define min_4(rD, rN, rM) MINMAX_4(rD, rN, rM, lt)
+
+SYNC_OP_4(min_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_min_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_min_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_min_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_min_8.S (revision 351984)
@@ -0,0 +1,23 @@
+//===-- sync_fetch_and_min_8.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_min_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define min_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) MINMAX_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI, lt)
+
+SYNC_OP_8(min_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_min_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_nand_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_nand_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_nand_4.S (revision 351984)
@@ -0,0 +1,21 @@
+//===-- sync_fetch_and_nand_4.S - -----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_nand_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#define nand_4(rD, rN, rM) bic rD, rN, rM
+
+SYNC_OP_4(nand_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_nand_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_nand_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_nand_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_nand_8.S (revision 351984)
@@ -0,0 +1,25 @@
+//===-- sync_fetch_and_nand_8.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_nand_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define nand_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) \
+ bic rD_LO, rN_LO, rM_LO ; \
+ bic rD_HI, rN_HI, rM_HI
+
+SYNC_OP_8(nand_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_nand_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_or_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_or_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_or_4.S (revision 351984)
@@ -0,0 +1,21 @@
+//===-- sync_fetch_and_or_4.S - -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_or_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#define or_4(rD, rN, rM) orr rD, rN, rM
+
+SYNC_OP_4(or_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_or_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_or_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_or_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_or_8.S (revision 351984)
@@ -0,0 +1,25 @@
+//===-- sync_fetch_and_or_8.S - -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_or_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define or_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) \
+ orr rD_LO, rN_LO, rM_LO ; \
+ orr rD_HI, rN_HI, rM_HI
+
+SYNC_OP_8(or_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_or_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_sub_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_sub_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_sub_4.S (revision 351984)
@@ -0,0 +1,22 @@
+//===-- sync_fetch_and_sub_4.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_sub_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+// "subs" is 2 bytes shorter than "sub".
+#define sub_4(rD, rN, rM) sub rD, rN, rM
+
+SYNC_OP_4(sub_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_sub_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_sub_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_sub_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_sub_8.S (revision 351984)
@@ -0,0 +1,25 @@
+//===-- sync_fetch_and_sub_8.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_sub_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define sub_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) \
+ subs rD_LO, rN_LO, rM_LO ; \
+ sbc rD_HI, rN_HI, rM_HI
+
+SYNC_OP_8(sub_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_sub_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umax_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umax_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umax_4.S (revision 351984)
@@ -0,0 +1,21 @@
+//===-- sync_fetch_and_umax_4.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_umax_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#define umax_4(rD, rN, rM) MINMAX_4(rD, rN, rM, hi)
+
+SYNC_OP_4(umax_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umax_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umax_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umax_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umax_8.S (revision 351984)
@@ -0,0 +1,23 @@
+//===-- sync_fetch_and_umax_8.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_umax_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define umax_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) MINMAX_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI, hi)
+
+SYNC_OP_8(umax_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umax_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umin_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umin_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umin_4.S (revision 351984)
@@ -0,0 +1,21 @@
+//===-- sync_fetch_and_umin_4.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_umin_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#define umin_4(rD, rN, rM) MINMAX_4(rD, rN, rM, lo)
+
+SYNC_OP_4(umin_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umin_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umin_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umin_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umin_8.S (revision 351984)
@@ -0,0 +1,23 @@
+//===-- sync_fetch_and_umin_8.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_umin_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define umin_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) MINMAX_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI, lo)
+
+SYNC_OP_8(umin_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_umin_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_xor_4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_xor_4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_xor_4.S (revision 351984)
@@ -0,0 +1,21 @@
+//===-- sync_fetch_and_xor_4.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_xor_4 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#define xor_4(rD, rN, rM) eor rD, rN, rM
+
+SYNC_OP_4(xor_4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_xor_4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_xor_8.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_xor_8.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_xor_8.S (revision 351984)
@@ -0,0 +1,25 @@
+//===-- sync_fetch_and_xor_8.S - ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __sync_fetch_and_xor_8 function for the ARM
+// architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sync-ops.h"
+
+#if __ARM_ARCH_PROFILE != 'M'
+#define xor_8(rD_LO, rD_HI, rN_LO, rN_HI, rM_LO, rM_HI) \
+ eor rD_LO, rN_LO, rM_LO ; \
+ eor rD_HI, rN_HI, rM_HI
+
+SYNC_OP_8(xor_8)
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_fetch_and_xor_8.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_synchronize.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_synchronize.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_synchronize.S (revision 351984)
@@ -0,0 +1,35 @@
+//===-- sync_synchronize - Implement memory barrier * ----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// When compiling a use of the gcc built-in __sync_synchronize() in thumb1 mode
+// the compiler may emit a call to __sync_synchronize.
+// On Darwin the implementation jumps to an OS supplied function named
+// OSMemoryBarrier
+
+ .text
+ .syntax unified
+
+#if __APPLE__
+
+ .p2align 2
+DEFINE_COMPILERRT_PRIVATE_FUNCTION(__sync_synchronize)
+ stmfd sp!, {r7, lr}
+ add r7, sp, #0
+ bl _OSMemoryBarrier
+ ldmfd sp!, {r7, pc}
+END_COMPILERRT_FUNCTION(__sync_synchronize)
+
+ // tell linker it can break up file at label boundaries
+ .subsections_via_symbols
+
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/sync_synchronize.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/truncdfsf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/truncdfsf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/truncdfsf2vfp.S (revision 351984)
@@ -0,0 +1,32 @@
+//===-- truncdfsf2vfp.S - Implement truncdfsf2vfp -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern float __truncdfsf2vfp(double a);
+//
+// Converts double precision float to signle precision result.
+// Uses Darwin calling convention where a double precision parameter is
+// passed in a R0/R1 pair and a signle precision result is returned in R0.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__truncdfsf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcvt.f32.f64 s0, d0
+#else
+ vmov d7, r0, r1 // load double from r0/r1 pair
+ vcvt.f32.f64 s15, d7 // convert double to single (trucate precision)
+ vmov r0, s15 // return result in r0
+#endif
+ bx lr
+END_COMPILERRT_FUNCTION(__truncdfsf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/truncdfsf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/udivmodsi4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/udivmodsi4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/udivmodsi4.S (revision 351984)
@@ -0,0 +1,178 @@
+//===-- udivmodsi4.S - 32-bit unsigned integer divide and modulus ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __udivmodsi4 (32-bit unsigned integer divide and
+// modulus) function for the ARM 32-bit architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+@ unsigned int __udivmodsi4(unsigned int divident, unsigned int divisor,
+@ unsigned int *remainder)
+@ Calculate the quotient and remainder of the (unsigned) division. The return
+@ value is the quotient, the remainder is placed in the variable.
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__udivmodsi4)
+#if __ARM_ARCH_EXT_IDIV__
+ tst r1, r1
+ beq LOCAL_LABEL(divby0)
+ mov r3, r0
+ udiv r0, r3, r1
+ mls r1, r0, r1, r3
+ str r1, [r2]
+ bx lr
+#else
+ cmp r1, #1
+ bcc LOCAL_LABEL(divby0)
+ beq LOCAL_LABEL(divby1)
+ cmp r0, r1
+ bcc LOCAL_LABEL(quotient0)
+
+ // Implement division using binary long division algorithm.
+ //
+ // r0 is the numerator, r1 the denominator.
+ //
+ // The code before JMP computes the correct shift I, so that
+ // r0 and (r1 << I) have the highest bit set in the same position.
+ // At the time of JMP, ip := .Ldiv0block - 12 * I.
+ // This depends on the fixed instruction size of block.
+ // For ARM mode, this is 12 Bytes, for THUMB mode 14 Bytes.
+ //
+ // block(shift) implements the test-and-update-quotient core.
+ // It assumes (r0 << shift) can be computed without overflow and
+ // that (r0 << shift) < 2 * r1. The quotient is stored in r3.
+
+# ifdef __ARM_FEATURE_CLZ
+ clz ip, r0
+ clz r3, r1
+ // r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3.
+ sub r3, r3, ip
+# if defined(USE_THUMB_2)
+ adr ip, LOCAL_LABEL(div0block) + 1
+ sub ip, ip, r3, lsl #1
+# else
+ adr ip, LOCAL_LABEL(div0block)
+# endif
+ sub ip, ip, r3, lsl #2
+ sub ip, ip, r3, lsl #3
+ mov r3, #0
+ bx ip
+# else
+# if defined(USE_THUMB_2)
+# error THUMB mode requires CLZ or UDIV
+# endif
+ str r4, [sp, #-8]!
+
+ mov r4, r0
+ adr ip, LOCAL_LABEL(div0block)
+
+ lsr r3, r4, #16
+ cmp r3, r1
+ movhs r4, r3
+ subhs ip, ip, #(16 * 12)
+
+ lsr r3, r4, #8
+ cmp r3, r1
+ movhs r4, r3
+ subhs ip, ip, #(8 * 12)
+
+ lsr r3, r4, #4
+ cmp r3, r1
+ movhs r4, r3
+ subhs ip, #(4 * 12)
+
+ lsr r3, r4, #2
+ cmp r3, r1
+ movhs r4, r3
+ subhs ip, ip, #(2 * 12)
+
+ // Last block, no need to update r3 or r4.
+ cmp r1, r4, lsr #1
+ subls ip, ip, #(1 * 12)
+
+ ldr r4, [sp], #8 // restore r4, we are done with it.
+ mov r3, #0
+
+ JMP(ip)
+# endif
+
+#define IMM #
+
+#define block(shift) \
+ cmp r0, r1, lsl IMM shift; \
+ ITT(hs); \
+ WIDE(addhs) r3, r3, IMM (1 << shift); \
+ WIDE(subhs) r0, r0, r1, lsl IMM shift
+
+ block(31)
+ block(30)
+ block(29)
+ block(28)
+ block(27)
+ block(26)
+ block(25)
+ block(24)
+ block(23)
+ block(22)
+ block(21)
+ block(20)
+ block(19)
+ block(18)
+ block(17)
+ block(16)
+ block(15)
+ block(14)
+ block(13)
+ block(12)
+ block(11)
+ block(10)
+ block(9)
+ block(8)
+ block(7)
+ block(6)
+ block(5)
+ block(4)
+ block(3)
+ block(2)
+ block(1)
+LOCAL_LABEL(div0block):
+ block(0)
+
+ str r0, [r2]
+ mov r0, r3
+ JMP(lr)
+
+LOCAL_LABEL(quotient0):
+ str r0, [r2]
+ mov r0, #0
+ JMP(lr)
+
+LOCAL_LABEL(divby1):
+ mov r3, #0
+ str r3, [r2]
+ JMP(lr)
+#endif // __ARM_ARCH_EXT_IDIV__
+
+LOCAL_LABEL(divby0):
+ mov r0, #0
+#ifdef __ARM_EABI__
+ b __aeabi_idiv0
+#else
+ JMP(lr)
+#endif
+
+END_COMPILERRT_FUNCTION(__udivmodsi4)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/udivmodsi4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/udivsi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/udivsi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/udivsi3.S (revision 351984)
@@ -0,0 +1,261 @@
+//===-- udivsi3.S - 32-bit unsigned integer divide ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __udivsi3 (32-bit unsigned integer divide)
+// function for the ARM 32-bit architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+ .syntax unified
+ .text
+
+DEFINE_CODE_STATE
+
+ .p2align 2
+DEFINE_AEABI_FUNCTION_ALIAS(__aeabi_uidiv, __udivsi3)
+
+@ unsigned int __udivsi3(unsigned int divident, unsigned int divisor)
+@ Calculate and return the quotient of the (unsigned) division.
+
+DEFINE_COMPILERRT_FUNCTION(__udivsi3)
+#if __ARM_ARCH_EXT_IDIV__
+ tst r1, r1
+ beq LOCAL_LABEL(divby0)
+ udiv r0, r0, r1
+ bx lr
+
+LOCAL_LABEL(divby0):
+ mov r0, #0
+# ifdef __ARM_EABI__
+ b __aeabi_idiv0
+# else
+ JMP(lr)
+# endif
+
+#else // ! __ARM_ARCH_EXT_IDIV__
+ cmp r1, #1
+ bcc LOCAL_LABEL(divby0)
+#if defined(USE_THUMB_1)
+ bne LOCAL_LABEL(num_neq_denom)
+ JMP(lr)
+LOCAL_LABEL(num_neq_denom):
+#else
+ IT(eq)
+ JMPc(lr, eq)
+#endif
+ cmp r0, r1
+#if defined(USE_THUMB_1)
+ bhs LOCAL_LABEL(num_ge_denom)
+ movs r0, #0
+ JMP(lr)
+LOCAL_LABEL(num_ge_denom):
+#else
+ ITT(cc)
+ movcc r0, #0
+ JMPc(lr, cc)
+#endif
+
+ // Implement division using binary long division algorithm.
+ //
+ // r0 is the numerator, r1 the denominator.
+ //
+ // The code before JMP computes the correct shift I, so that
+ // r0 and (r1 << I) have the highest bit set in the same position.
+ // At the time of JMP, ip := .Ldiv0block - 12 * I.
+ // This depends on the fixed instruction size of block.
+ // For ARM mode, this is 12 Bytes, for THUMB mode 14 Bytes.
+ //
+ // block(shift) implements the test-and-update-quotient core.
+ // It assumes (r0 << shift) can be computed without overflow and
+ // that (r0 << shift) < 2 * r1. The quotient is stored in r3.
+
+# if defined(__ARM_FEATURE_CLZ)
+ clz ip, r0
+ clz r3, r1
+ // r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3.
+ sub r3, r3, ip
+# if defined(USE_THUMB_2)
+ adr ip, LOCAL_LABEL(div0block) + 1
+ sub ip, ip, r3, lsl #1
+# else
+ adr ip, LOCAL_LABEL(div0block)
+# endif
+ sub ip, ip, r3, lsl #2
+ sub ip, ip, r3, lsl #3
+ mov r3, #0
+ bx ip
+# else // No CLZ Feature
+# if defined(USE_THUMB_2)
+# error THUMB mode requires CLZ or UDIV
+# endif
+# if defined(USE_THUMB_1)
+# define BLOCK_SIZE 10
+# else
+# define BLOCK_SIZE 12
+# endif
+
+ mov r2, r0
+# if defined(USE_THUMB_1)
+ mov ip, r0
+ adr r0, LOCAL_LABEL(div0block)
+ adds r0, #1
+# else
+ adr ip, LOCAL_LABEL(div0block)
+# endif
+ lsrs r3, r2, #16
+ cmp r3, r1
+# if defined(USE_THUMB_1)
+ blo LOCAL_LABEL(skip_16)
+ movs r2, r3
+ subs r0, r0, #(16 * BLOCK_SIZE)
+LOCAL_LABEL(skip_16):
+# else
+ movhs r2, r3
+ subhs ip, ip, #(16 * BLOCK_SIZE)
+# endif
+
+ lsrs r3, r2, #8
+ cmp r3, r1
+# if defined(USE_THUMB_1)
+ blo LOCAL_LABEL(skip_8)
+ movs r2, r3
+ subs r0, r0, #(8 * BLOCK_SIZE)
+LOCAL_LABEL(skip_8):
+# else
+ movhs r2, r3
+ subhs ip, ip, #(8 * BLOCK_SIZE)
+# endif
+
+ lsrs r3, r2, #4
+ cmp r3, r1
+# if defined(USE_THUMB_1)
+ blo LOCAL_LABEL(skip_4)
+ movs r2, r3
+ subs r0, r0, #(4 * BLOCK_SIZE)
+LOCAL_LABEL(skip_4):
+# else
+ movhs r2, r3
+ subhs ip, #(4 * BLOCK_SIZE)
+# endif
+
+ lsrs r3, r2, #2
+ cmp r3, r1
+# if defined(USE_THUMB_1)
+ blo LOCAL_LABEL(skip_2)
+ movs r2, r3
+ subs r0, r0, #(2 * BLOCK_SIZE)
+LOCAL_LABEL(skip_2):
+# else
+ movhs r2, r3
+ subhs ip, ip, #(2 * BLOCK_SIZE)
+# endif
+
+ // Last block, no need to update r2 or r3.
+# if defined(USE_THUMB_1)
+ lsrs r3, r2, #1
+ cmp r3, r1
+ blo LOCAL_LABEL(skip_1)
+ subs r0, r0, #(1 * BLOCK_SIZE)
+LOCAL_LABEL(skip_1):
+ movs r2, r0
+ mov r0, ip
+ movs r3, #0
+ JMP (r2)
+
+# else
+ cmp r1, r2, lsr #1
+ subls ip, ip, #(1 * BLOCK_SIZE)
+
+ movs r3, #0
+
+ JMP(ip)
+# endif
+# endif // __ARM_FEATURE_CLZ
+
+
+#define IMM #
+ // due to the range limit of branch in Thumb1, we have to place the
+ // block closer
+LOCAL_LABEL(divby0):
+ movs r0, #0
+# if defined(__ARM_EABI__)
+ push {r7, lr}
+ bl __aeabi_idiv0 // due to relocation limit, can't use b.
+ pop {r7, pc}
+# else
+ JMP(lr)
+# endif
+
+
+#if defined(USE_THUMB_1)
+#define block(shift) \
+ lsls r2, r1, IMM shift; \
+ cmp r0, r2; \
+ blo LOCAL_LABEL(block_skip_##shift); \
+ subs r0, r0, r2; \
+ LOCAL_LABEL(block_skip_##shift) :; \
+ adcs r3, r3 // same as ((r3 << 1) | Carry). Carry is set if r0 >= r2.
+
+ // TODO: if current location counter is not not word aligned, we don't
+ // need the .p2align and nop
+ // Label div0block must be word-aligned. First align block 31
+ .p2align 2
+ nop // Padding to align div0block as 31 blocks = 310 bytes
+
+#else
+#define block(shift) \
+ cmp r0, r1, lsl IMM shift; \
+ ITT(hs); \
+ WIDE(addhs) r3, r3, IMM (1 << shift); \
+ WIDE(subhs) r0, r0, r1, lsl IMM shift
+#endif
+
+ block(31)
+ block(30)
+ block(29)
+ block(28)
+ block(27)
+ block(26)
+ block(25)
+ block(24)
+ block(23)
+ block(22)
+ block(21)
+ block(20)
+ block(19)
+ block(18)
+ block(17)
+ block(16)
+ block(15)
+ block(14)
+ block(13)
+ block(12)
+ block(11)
+ block(10)
+ block(9)
+ block(8)
+ block(7)
+ block(6)
+ block(5)
+ block(4)
+ block(3)
+ block(2)
+ block(1)
+LOCAL_LABEL(div0block):
+ block(0)
+
+ mov r0, r3
+ JMP(lr)
+#endif // __ARM_ARCH_EXT_IDIV__
+
+END_COMPILERRT_FUNCTION(__udivsi3)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/udivsi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/umodsi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/umodsi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/umodsi3.S (revision 351984)
@@ -0,0 +1,156 @@
+//===-- umodsi3.S - 32-bit unsigned integer modulus -----------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the __umodsi3 (32-bit unsigned integer modulus)
+// function for the ARM 32-bit architecture.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+ .syntax unified
+ .text
+ DEFINE_CODE_STATE
+
+@ unsigned int __umodsi3(unsigned int divident, unsigned int divisor)
+@ Calculate and return the remainder of the (unsigned) division.
+
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__umodsi3)
+#if __ARM_ARCH_EXT_IDIV__
+ tst r1, r1
+ beq LOCAL_LABEL(divby0)
+ udiv r2, r0, r1
+ mls r0, r2, r1, r0
+ bx lr
+#else
+ cmp r1, #1
+ bcc LOCAL_LABEL(divby0)
+ ITT(eq)
+ moveq r0, #0
+ JMPc(lr, eq)
+ cmp r0, r1
+ IT(cc)
+ JMPc(lr, cc)
+
+ // Implement division using binary long division algorithm.
+ //
+ // r0 is the numerator, r1 the denominator.
+ //
+ // The code before JMP computes the correct shift I, so that
+ // r0 and (r1 << I) have the highest bit set in the same position.
+ // At the time of JMP, ip := .Ldiv0block - 8 * I.
+ // This depends on the fixed instruction size of block.
+ // For ARM mode, this is 8 Bytes, for THUMB mode 10 Bytes.
+ //
+ // block(shift) implements the test-and-update-quotient core.
+ // It assumes (r0 << shift) can be computed without overflow and
+ // that (r0 << shift) < 2 * r1. The quotient is stored in r3.
+
+# ifdef __ARM_FEATURE_CLZ
+ clz ip, r0
+ clz r3, r1
+ // r0 >= r1 implies clz(r0) <= clz(r1), so ip <= r3.
+ sub r3, r3, ip
+# if defined(USE_THUMB_2)
+ adr ip, LOCAL_LABEL(div0block) + 1
+ sub ip, ip, r3, lsl #1
+# else
+ adr ip, LOCAL_LABEL(div0block)
+# endif
+ sub ip, ip, r3, lsl #3
+ bx ip
+# else
+# if defined(USE_THUMB_2)
+# error THUMB mode requires CLZ or UDIV
+# endif
+ mov r2, r0
+ adr ip, LOCAL_LABEL(div0block)
+
+ lsr r3, r2, #16
+ cmp r3, r1
+ movhs r2, r3
+ subhs ip, ip, #(16 * 8)
+
+ lsr r3, r2, #8
+ cmp r3, r1
+ movhs r2, r3
+ subhs ip, ip, #(8 * 8)
+
+ lsr r3, r2, #4
+ cmp r3, r1
+ movhs r2, r3
+ subhs ip, #(4 * 8)
+
+ lsr r3, r2, #2
+ cmp r3, r1
+ movhs r2, r3
+ subhs ip, ip, #(2 * 8)
+
+ // Last block, no need to update r2 or r3.
+ cmp r1, r2, lsr #1
+ subls ip, ip, #(1 * 8)
+
+ JMP(ip)
+# endif
+
+#define IMM #
+
+#define block(shift) \
+ cmp r0, r1, lsl IMM shift; \
+ IT(hs); \
+ WIDE(subhs) r0, r0, r1, lsl IMM shift
+
+ block(31)
+ block(30)
+ block(29)
+ block(28)
+ block(27)
+ block(26)
+ block(25)
+ block(24)
+ block(23)
+ block(22)
+ block(21)
+ block(20)
+ block(19)
+ block(18)
+ block(17)
+ block(16)
+ block(15)
+ block(14)
+ block(13)
+ block(12)
+ block(11)
+ block(10)
+ block(9)
+ block(8)
+ block(7)
+ block(6)
+ block(5)
+ block(4)
+ block(3)
+ block(2)
+ block(1)
+LOCAL_LABEL(div0block):
+ block(0)
+ JMP(lr)
+#endif // __ARM_ARCH_EXT_IDIV__
+
+LOCAL_LABEL(divby0):
+ mov r0, #0
+#ifdef __ARM_EABI__
+ b __aeabi_idiv0
+#else
+ JMP(lr)
+#endif
+
+END_COMPILERRT_FUNCTION(__umodsi3)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/umodsi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/unorddf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/unorddf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/unorddf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- unorddf2vfp.S - Implement unorddf2vfp ------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __unorddf2vfp(double a, double b);
+//
+// Returns one iff a or b is NaN
+// Uses Darwin calling convention where double precision arguments are passsed
+// like in GPR pairs.
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__unorddf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f64 d0, d1
+#else
+ vmov d6, r0, r1 // load r0/r1 pair in double register
+ vmov d7, r2, r3 // load r2/r3 pair in double register
+ vcmp.f64 d6, d7
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(vs)
+ movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
+ movvc r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__unorddf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/unorddf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/unordsf2vfp.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/unordsf2vfp.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/unordsf2vfp.S (revision 351984)
@@ -0,0 +1,36 @@
+//===-- unordsf2vfp.S - Implement unordsf2vfp -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// extern int __unordsf2vfp(float a, float b);
+//
+// Returns one iff a or b is NaN
+// Uses Darwin calling convention where single precision arguments are passsed
+// like 32-bit ints
+//
+ .syntax unified
+ .p2align 2
+DEFINE_COMPILERRT_FUNCTION(__unordsf2vfp)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+ vcmp.f32 s0, s1
+#else
+ vmov s14, r0 // move from GPR 0 to float register
+ vmov s15, r1 // move from GPR 1 to float register
+ vcmp.f32 s14, s15
+#endif
+ vmrs apsr_nzcv, fpscr
+ ITE(vs)
+ movvs r0, #1 // set result register to 1 if "overflow" (any NaNs)
+ movvc r0, #0
+ bx lr
+END_COMPILERRT_FUNCTION(__unordsf2vfp)
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/arm/unordsf2vfp.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashldi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashldi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashldi3.c (revision 351984)
@@ -0,0 +1,38 @@
+// ====-- ashldi3.c - Implement __ashldi3 ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ashldi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a << b
+
+// Precondition: 0 <= b < bits_in_dword
+
+COMPILER_RT_ABI di_int __ashldi3(di_int a, si_int b) {
+ const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT);
+ dwords input;
+ dwords result;
+ input.all = a;
+ if (b & bits_in_word) /* bits_in_word <= b < bits_in_dword */ {
+ result.s.low = 0;
+ result.s.high = input.s.low << (b - bits_in_word);
+ } else /* 0 <= b < bits_in_word */ {
+ if (b == 0)
+ return a;
+ result.s.low = input.s.low << b;
+ result.s.high = (input.s.high << b) | (input.s.low >> (bits_in_word - b));
+ }
+ return result.all;
+}
+
+#if defined(__ARM_EABI__)
+COMPILER_RT_ALIAS(__ashldi3, __aeabi_llsl)
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashldi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashlti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashlti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashlti3.c (revision 351984)
@@ -0,0 +1,38 @@
+//===-- ashlti3.c - Implement __ashlti3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ashlti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a << b
+
+// Precondition: 0 <= b < bits_in_tword
+
+COMPILER_RT_ABI ti_int __ashlti3(ti_int a, si_int b) {
+ const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
+ twords input;
+ twords result;
+ input.all = a;
+ if (b & bits_in_dword) /* bits_in_dword <= b < bits_in_tword */ {
+ result.s.low = 0;
+ result.s.high = input.s.low << (b - bits_in_dword);
+ } else /* 0 <= b < bits_in_dword */ {
+ if (b == 0)
+ return a;
+ result.s.low = input.s.low << b;
+ result.s.high = (input.s.high << b) | (input.s.low >> (bits_in_dword - b));
+ }
+ return result.all;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashlti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashrdi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashrdi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashrdi3.c (revision 351984)
@@ -0,0 +1,39 @@
+//===-- ashrdi3.c - Implement __ashrdi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ashrdi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: arithmetic a >> b
+
+// Precondition: 0 <= b < bits_in_dword
+
+COMPILER_RT_ABI di_int __ashrdi3(di_int a, si_int b) {
+ const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT);
+ dwords input;
+ dwords result;
+ input.all = a;
+ if (b & bits_in_word) /* bits_in_word <= b < bits_in_dword */ {
+ // result.s.high = input.s.high < 0 ? -1 : 0
+ result.s.high = input.s.high >> (bits_in_word - 1);
+ result.s.low = input.s.high >> (b - bits_in_word);
+ } else /* 0 <= b < bits_in_word */ {
+ if (b == 0)
+ return a;
+ result.s.high = input.s.high >> b;
+ result.s.low = (input.s.high << (bits_in_word - b)) | (input.s.low >> b);
+ }
+ return result.all;
+}
+
+#if defined(__ARM_EABI__)
+COMPILER_RT_ALIAS(__ashrdi3, __aeabi_lasr)
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashrdi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashrti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashrti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashrti3.c (revision 351984)
@@ -0,0 +1,39 @@
+//===-- ashrti3.c - Implement __ashrti3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ashrti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: arithmetic a >> b
+
+// Precondition: 0 <= b < bits_in_tword
+
+COMPILER_RT_ABI ti_int __ashrti3(ti_int a, si_int b) {
+ const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
+ twords input;
+ twords result;
+ input.all = a;
+ if (b & bits_in_dword) /* bits_in_dword <= b < bits_in_tword */ {
+ // result.s.high = input.s.high < 0 ? -1 : 0
+ result.s.high = input.s.high >> (bits_in_dword - 1);
+ result.s.low = input.s.high >> (b - bits_in_dword);
+ } else /* 0 <= b < bits_in_dword */ {
+ if (b == 0)
+ return a;
+ result.s.high = input.s.high >> b;
+ result.s.low = (input.s.high << (bits_in_dword - b)) | (input.s.low >> b);
+ }
+ return result.all;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ashrti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/assembly.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/assembly.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/assembly.h (revision 351984)
@@ -0,0 +1,200 @@
+//===-- assembly.h - compiler-rt assembler support macros -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines macros for use in compiler-rt assembler source.
+// This file is not part of the interface of this library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef COMPILERRT_ASSEMBLY_H
+#define COMPILERRT_ASSEMBLY_H
+
+#if defined(__POWERPC__) || defined(__powerpc__) || defined(__ppc__)
+#define SEPARATOR @
+#else
+#define SEPARATOR ;
+#endif
+
+#if defined(__APPLE__)
+#define HIDDEN(name) .private_extern name
+#define LOCAL_LABEL(name) L_##name
+// tell linker it can break up file at label boundaries
+#define FILE_LEVEL_DIRECTIVE .subsections_via_symbols
+#define SYMBOL_IS_FUNC(name)
+#define CONST_SECTION .const
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#elif defined(__ELF__)
+
+#define HIDDEN(name) .hidden name
+#define LOCAL_LABEL(name) .L_##name
+#define FILE_LEVEL_DIRECTIVE
+#if defined(__arm__)
+#define SYMBOL_IS_FUNC(name) .type name,%function
+#else
+#define SYMBOL_IS_FUNC(name) .type name,@function
+#endif
+#define CONST_SECTION .section .rodata
+
+#if defined(__GNU__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
+ defined(__linux__)
+#define NO_EXEC_STACK_DIRECTIVE .section .note.GNU-stack,"",%progbits
+#else
+#define NO_EXEC_STACK_DIRECTIVE
+#endif
+
+#else // !__APPLE__ && !__ELF__
+
+#define HIDDEN(name)
+#define LOCAL_LABEL(name) .L ## name
+#define FILE_LEVEL_DIRECTIVE
+#define SYMBOL_IS_FUNC(name) \
+ .def name SEPARATOR \
+ .scl 2 SEPARATOR \
+ .type 32 SEPARATOR \
+ .endef
+#define CONST_SECTION .section .rdata,"rd"
+
+#define NO_EXEC_STACK_DIRECTIVE
+
+#endif
+
+#if defined(__arm__)
+
+// Determine actual [ARM][THUMB[1][2]] ISA using compiler predefined macros:
+// - for '-mthumb -march=armv6' compiler defines '__thumb__'
+// - for '-mthumb -march=armv7' compiler defines '__thumb__' and '__thumb2__'
+#if defined(__thumb2__) || defined(__thumb__)
+#define DEFINE_CODE_STATE .thumb SEPARATOR
+#define DECLARE_FUNC_ENCODING .thumb_func SEPARATOR
+#if defined(__thumb2__)
+#define USE_THUMB_2
+#define IT(cond) it cond
+#define ITT(cond) itt cond
+#define ITE(cond) ite cond
+#else
+#define USE_THUMB_1
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif // defined(__thumb__2)
+#else // !defined(__thumb2__) && !defined(__thumb__)
+#define DEFINE_CODE_STATE .arm SEPARATOR
+#define DECLARE_FUNC_ENCODING
+#define IT(cond)
+#define ITT(cond)
+#define ITE(cond)
+#endif
+
+#if defined(USE_THUMB_1) && defined(USE_THUMB_2)
+#error "USE_THUMB_1 and USE_THUMB_2 can't be defined together."
+#endif
+
+#if defined(__ARM_ARCH_4T__) || __ARM_ARCH >= 5
+#define ARM_HAS_BX
+#endif
+#if !defined(__ARM_FEATURE_CLZ) && !defined(USE_THUMB_1) && \
+ (__ARM_ARCH >= 6 || (__ARM_ARCH == 5 && !defined(__ARM_ARCH_5__)))
+#define __ARM_FEATURE_CLZ
+#endif
+
+#ifdef ARM_HAS_BX
+#define JMP(r) bx r
+#define JMPc(r, c) bx##c r
+#else
+#define JMP(r) mov pc, r
+#define JMPc(r, c) mov##c pc, r
+#endif
+
+// pop {pc} can't switch Thumb mode on ARMv4T
+#if __ARM_ARCH >= 5
+#define POP_PC() pop {pc}
+#else
+#define POP_PC() \
+ pop {ip}; \
+ JMP(ip)
+#endif
+
+#if defined(USE_THUMB_2)
+#define WIDE(op) op.w
+#else
+#define WIDE(op) op
+#endif
+#else // !defined(__arm)
+#define DECLARE_FUNC_ENCODING
+#define DEFINE_CODE_STATE
+#endif
+
+#define GLUE2(a, b) a##b
+#define GLUE(a, b) GLUE2(a, b)
+#define SYMBOL_NAME(name) GLUE(__USER_LABEL_PREFIX__, name)
+
+#ifdef VISIBILITY_HIDDEN
+#define DECLARE_SYMBOL_VISIBILITY(name) \
+ HIDDEN(SYMBOL_NAME(name)) SEPARATOR
+#else
+#define DECLARE_SYMBOL_VISIBILITY(name)
+#endif
+
+#define DEFINE_COMPILERRT_FUNCTION(name) \
+ DEFINE_CODE_STATE \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) \
+ DECLARE_FUNC_ENCODING \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_THUMB_FUNCTION(name) \
+ DEFINE_CODE_STATE \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(name) SEPARATOR \
+ .thumb_func SEPARATOR \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION(name) \
+ DEFINE_CODE_STATE \
+ FILE_LEVEL_DIRECTIVE SEPARATOR \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ HIDDEN(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
+ SYMBOL_NAME(name):
+
+#define DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(name) \
+ DEFINE_CODE_STATE \
+ .globl name SEPARATOR \
+ SYMBOL_IS_FUNC(name) SEPARATOR \
+ HIDDEN(name) SEPARATOR \
+ DECLARE_FUNC_ENCODING \
+ name:
+
+#define DEFINE_COMPILERRT_FUNCTION_ALIAS(name, target) \
+ .globl SYMBOL_NAME(name) SEPARATOR \
+ SYMBOL_IS_FUNC(SYMBOL_NAME(name)) SEPARATOR \
+ DECLARE_SYMBOL_VISIBILITY(SYMBOL_NAME(name)) SEPARATOR \
+ .set SYMBOL_NAME(name), SYMBOL_NAME(target) SEPARATOR
+
+#if defined(__ARM_EABI__)
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name) \
+ DEFINE_COMPILERRT_FUNCTION_ALIAS(aeabi_name, name)
+#else
+#define DEFINE_AEABI_FUNCTION_ALIAS(aeabi_name, name)
+#endif
+
+#ifdef __ELF__
+#define END_COMPILERRT_FUNCTION(name) \
+ .size SYMBOL_NAME(name), . - SYMBOL_NAME(name)
+#else
+#define END_COMPILERRT_FUNCTION(name)
+#endif
+
+#endif // COMPILERRT_ASSEMBLY_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/assembly.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic.c (revision 351984)
@@ -0,0 +1,342 @@
+//===-- atomic.c - Implement support functions for atomic operations.------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// atomic.c defines a set of functions for performing atomic accesses on
+// arbitrary-sized memory locations. This design uses locks that should
+// be fast in the uncontended case, for two reasons:
+//
+// 1) This code must work with C programs that do not link to anything
+// (including pthreads) and so it should not depend on any pthread
+// functions.
+// 2) Atomic operations, rather than explicit mutexes, are most commonly used
+// on code where contended operations are rate.
+//
+// To avoid needing a per-object lock, this code allocates an array of
+// locks and hashes the object pointers to find the one that it should use.
+// For operations that must be atomic on two locations, the lower lock is
+// always acquired first, to avoid deadlock.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdint.h>
+#include <string.h>
+
+#include "assembly.h"
+
+// Clang objects if you redefine a builtin. This little hack allows us to
+// define a function with the same name as an intrinsic.
+#pragma redefine_extname __atomic_load_c SYMBOL_NAME(__atomic_load)
+#pragma redefine_extname __atomic_store_c SYMBOL_NAME(__atomic_store)
+#pragma redefine_extname __atomic_exchange_c SYMBOL_NAME(__atomic_exchange)
+#pragma redefine_extname __atomic_compare_exchange_c SYMBOL_NAME( \
+ __atomic_compare_exchange)
+
+/// Number of locks. This allocates one page on 32-bit platforms, two on
+/// 64-bit. This can be specified externally if a different trade between
+/// memory usage and contention probability is required for a given platform.
+#ifndef SPINLOCK_COUNT
+#define SPINLOCK_COUNT (1 << 10)
+#endif
+static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
+
+////////////////////////////////////////////////////////////////////////////////
+// Platform-specific lock implementation. Falls back to spinlocks if none is
+// defined. Each platform should define the Lock type, and corresponding
+// lock() and unlock() functions.
+////////////////////////////////////////////////////////////////////////////////
+#ifdef __FreeBSD__
+#include <errno.h>
+#include <machine/atomic.h>
+#include <sys/types.h>
+#include <sys/umtx.h>
+typedef struct _usem Lock;
+__inline static void unlock(Lock *l) {
+ __c11_atomic_store((_Atomic(uint32_t) *)&l->_count, 1, __ATOMIC_RELEASE);
+ __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
+ if (l->_has_waiters)
+ _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
+}
+__inline static void lock(Lock *l) {
+ uint32_t old = 1;
+ while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t) *)&l->_count,
+ &old, 0, __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED)) {
+ _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
+ old = 1;
+ }
+}
+/// locks for atomic operations
+static Lock locks[SPINLOCK_COUNT] = {[0 ... SPINLOCK_COUNT - 1] = {0, 1, 0}};
+
+#elif defined(__APPLE__)
+#include <libkern/OSAtomic.h>
+typedef OSSpinLock Lock;
+__inline static void unlock(Lock *l) { OSSpinLockUnlock(l); }
+/// Locks a lock. In the current implementation, this is potentially
+/// unbounded in the contended case.
+__inline static void lock(Lock *l) { OSSpinLockLock(l); }
+static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
+
+#else
+typedef _Atomic(uintptr_t) Lock;
+/// Unlock a lock. This is a release operation.
+__inline static void unlock(Lock *l) {
+ __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
+}
+/// Locks a lock. In the current implementation, this is potentially
+/// unbounded in the contended case.
+__inline static void lock(Lock *l) {
+ uintptr_t old = 0;
+ while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
+ __ATOMIC_RELAXED))
+ old = 0;
+}
+/// locks for atomic operations
+static Lock locks[SPINLOCK_COUNT];
+#endif
+
+/// Returns a lock to use for a given pointer.
+static __inline Lock *lock_for_pointer(void *ptr) {
+ intptr_t hash = (intptr_t)ptr;
+ // Disregard the lowest 4 bits. We want all values that may be part of the
+ // same memory operation to hash to the same value and therefore use the same
+ // lock.
+ hash >>= 4;
+ // Use the next bits as the basis for the hash
+ intptr_t low = hash & SPINLOCK_MASK;
+ // Now use the high(er) set of bits to perturb the hash, so that we don't
+ // get collisions from atomic fields in a single object
+ hash >>= 16;
+ hash ^= low;
+ // Return a pointer to the word to use
+ return locks + (hash & SPINLOCK_MASK);
+}
+
+/// Macros for determining whether a size is lock free. Clang can not yet
+/// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are
+/// not lock free.
+#define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
+#define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
+#define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
+#define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
+#define IS_LOCK_FREE_16 0
+
+/// Macro that calls the compiler-generated lock-free versions of functions
+/// when they exist.
+#define LOCK_FREE_CASES() \
+ do { \
+ switch (size) { \
+ case 1: \
+ if (IS_LOCK_FREE_1) { \
+ LOCK_FREE_ACTION(uint8_t); \
+ } \
+ break; \
+ case 2: \
+ if (IS_LOCK_FREE_2) { \
+ LOCK_FREE_ACTION(uint16_t); \
+ } \
+ break; \
+ case 4: \
+ if (IS_LOCK_FREE_4) { \
+ LOCK_FREE_ACTION(uint32_t); \
+ } \
+ break; \
+ case 8: \
+ if (IS_LOCK_FREE_8) { \
+ LOCK_FREE_ACTION(uint64_t); \
+ } \
+ break; \
+ case 16: \
+ if (IS_LOCK_FREE_16) { \
+ /* FIXME: __uint128_t isn't available on 32 bit platforms. \
+ LOCK_FREE_ACTION(__uint128_t);*/ \
+ } \
+ break; \
+ } \
+ } while (0)
+
+/// An atomic load operation. This is atomic with respect to the source
+/// pointer only.
+void __atomic_load_c(int size, void *src, void *dest, int model) {
+#define LOCK_FREE_ACTION(type) \
+ *((type *)dest) = __c11_atomic_load((_Atomic(type) *)src, model); \
+ return;
+ LOCK_FREE_CASES();
+#undef LOCK_FREE_ACTION
+ Lock *l = lock_for_pointer(src);
+ lock(l);
+ memcpy(dest, src, size);
+ unlock(l);
+}
+
+/// An atomic store operation. This is atomic with respect to the destination
+/// pointer only.
+void __atomic_store_c(int size, void *dest, void *src, int model) {
+#define LOCK_FREE_ACTION(type) \
+ __c11_atomic_store((_Atomic(type) *)dest, *(type *)src, model); \
+ return;
+ LOCK_FREE_CASES();
+#undef LOCK_FREE_ACTION
+ Lock *l = lock_for_pointer(dest);
+ lock(l);
+ memcpy(dest, src, size);
+ unlock(l);
+}
+
+/// Atomic compare and exchange operation. If the value at *ptr is identical
+/// to the value at *expected, then this copies value at *desired to *ptr. If
+/// they are not, then this stores the current value from *ptr in *expected.
+///
+/// This function returns 1 if the exchange takes place or 0 if it fails.
+int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
+ void *desired, int success, int failure) {
+#define LOCK_FREE_ACTION(type) \
+ return __c11_atomic_compare_exchange_strong( \
+ (_Atomic(type) *)ptr, (type *)expected, *(type *)desired, success, \
+ failure)
+ LOCK_FREE_CASES();
+#undef LOCK_FREE_ACTION
+ Lock *l = lock_for_pointer(ptr);
+ lock(l);
+ if (memcmp(ptr, expected, size) == 0) {
+ memcpy(ptr, desired, size);
+ unlock(l);
+ return 1;
+ }
+ memcpy(expected, ptr, size);
+ unlock(l);
+ return 0;
+}
+
+/// Performs an atomic exchange operation between two pointers. This is atomic
+/// with respect to the target address.
+void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
+#define LOCK_FREE_ACTION(type) \
+ *(type *)old = \
+ __c11_atomic_exchange((_Atomic(type) *)ptr, *(type *)val, model); \
+ return;
+ LOCK_FREE_CASES();
+#undef LOCK_FREE_ACTION
+ Lock *l = lock_for_pointer(ptr);
+ lock(l);
+ memcpy(old, ptr, size);
+ memcpy(ptr, val, size);
+ unlock(l);
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Where the size is known at compile time, the compiler may emit calls to
+// specialised versions of the above functions.
+////////////////////////////////////////////////////////////////////////////////
+#ifdef __SIZEOF_INT128__
+#define OPTIMISED_CASES \
+ OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
+ OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
+ OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
+ OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t) \
+ OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)
+#else
+#define OPTIMISED_CASES \
+ OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t) \
+ OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t) \
+ OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t) \
+ OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)
+#endif
+
+#define OPTIMISED_CASE(n, lockfree, type) \
+ type __atomic_load_##n(type *src, int model) { \
+ if (lockfree) \
+ return __c11_atomic_load((_Atomic(type) *)src, model); \
+ Lock *l = lock_for_pointer(src); \
+ lock(l); \
+ type val = *src; \
+ unlock(l); \
+ return val; \
+ }
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+
+#define OPTIMISED_CASE(n, lockfree, type) \
+ void __atomic_store_##n(type *dest, type val, int model) { \
+ if (lockfree) { \
+ __c11_atomic_store((_Atomic(type) *)dest, val, model); \
+ return; \
+ } \
+ Lock *l = lock_for_pointer(dest); \
+ lock(l); \
+ *dest = val; \
+ unlock(l); \
+ return; \
+ }
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+
+#define OPTIMISED_CASE(n, lockfree, type) \
+ type __atomic_exchange_##n(type *dest, type val, int model) { \
+ if (lockfree) \
+ return __c11_atomic_exchange((_Atomic(type) *)dest, val, model); \
+ Lock *l = lock_for_pointer(dest); \
+ lock(l); \
+ type tmp = *dest; \
+ *dest = val; \
+ unlock(l); \
+ return tmp; \
+ }
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+
+#define OPTIMISED_CASE(n, lockfree, type) \
+ int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired, \
+ int success, int failure) { \
+ if (lockfree) \
+ return __c11_atomic_compare_exchange_strong( \
+ (_Atomic(type) *)ptr, expected, desired, success, failure); \
+ Lock *l = lock_for_pointer(ptr); \
+ lock(l); \
+ if (*ptr == *expected) { \
+ *ptr = desired; \
+ unlock(l); \
+ return 1; \
+ } \
+ *expected = *ptr; \
+ unlock(l); \
+ return 0; \
+ }
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+
+////////////////////////////////////////////////////////////////////////////////
+// Atomic read-modify-write operations for integers of various sizes.
+////////////////////////////////////////////////////////////////////////////////
+#define ATOMIC_RMW(n, lockfree, type, opname, op) \
+ type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) { \
+ if (lockfree) \
+ return __c11_atomic_fetch_##opname((_Atomic(type) *)ptr, val, model); \
+ Lock *l = lock_for_pointer(ptr); \
+ lock(l); \
+ type tmp = *ptr; \
+ *ptr = tmp op val; \
+ unlock(l); \
+ return tmp; \
+ }
+
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
+#define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
+OPTIMISED_CASES
+#undef OPTIMISED_CASE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_clear.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_clear.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_clear.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- atomic_flag_clear.c -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements atomic_flag_clear from C11's stdatomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __has_include
+#define __has_include(inc) 0
+#endif
+
+#if __has_include(<stdatomic.h>)
+
+#include <stdatomic.h>
+#undef atomic_flag_clear
+void atomic_flag_clear(volatile atomic_flag *object) {
+ __c11_atomic_store(&(object)->_Value, 0, __ATOMIC_SEQ_CST);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_clear.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_clear_explicit.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_clear_explicit.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_clear_explicit.c (revision 351984)
@@ -0,0 +1,26 @@
+//===-- atomic_flag_clear_explicit.c --------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements atomic_flag_clear_explicit from C11's stdatomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __has_include
+#define __has_include(inc) 0
+#endif
+
+#if __has_include(<stdatomic.h>)
+
+#include <stdatomic.h>
+#undef atomic_flag_clear_explicit
+void atomic_flag_clear_explicit(volatile atomic_flag *object,
+ memory_order order) {
+ __c11_atomic_store(&(object)->_Value, 0, order);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_clear_explicit.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_test_and_set.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_test_and_set.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_test_and_set.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- atomic_flag_test_and_set.c ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements atomic_flag_test_and_set from C11's stdatomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __has_include
+#define __has_include(inc) 0
+#endif
+
+#if __has_include(<stdatomic.h>)
+
+#include <stdatomic.h>
+#undef atomic_flag_test_and_set
+_Bool atomic_flag_test_and_set(volatile atomic_flag *object) {
+ return __c11_atomic_exchange(&(object)->_Value, 1, __ATOMIC_SEQ_CST);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_test_and_set.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_test_and_set_explicit.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_test_and_set_explicit.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_test_and_set_explicit.c (revision 351984)
@@ -0,0 +1,26 @@
+//===-- atomic_flag_test_and_set_explicit.c -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements atomic_flag_test_and_set_explicit from C11's stdatomic.h
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __has_include
+#define __has_include(inc) 0
+#endif
+
+#if __has_include(<stdatomic.h>)
+
+#include <stdatomic.h>
+#undef atomic_flag_test_and_set_explicit
+_Bool atomic_flag_test_and_set_explicit(volatile atomic_flag *object,
+ memory_order order) {
+ return __c11_atomic_exchange(&(object)->_Value, 1, order);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_flag_test_and_set_explicit.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_signal_fence.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_signal_fence.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_signal_fence.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- atomic_signal_fence.c ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements atomic_signal_fence from C11's stdatomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __has_include
+#define __has_include(inc) 0
+#endif
+
+#if __has_include(<stdatomic.h>)
+
+#include <stdatomic.h>
+#undef atomic_signal_fence
+void atomic_signal_fence(memory_order order) {
+ __c11_atomic_signal_fence(order);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_signal_fence.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_thread_fence.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_thread_fence.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_thread_fence.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- atomic_thread_fence.c ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements atomic_thread_fence from C11's stdatomic.h.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef __has_include
+#define __has_include(inc) 0
+#endif
+
+#if __has_include(<stdatomic.h>)
+
+#include <stdatomic.h>
+#undef atomic_thread_fence
+void atomic_thread_fence(memory_order order) {
+ __c11_atomic_thread_fence(order);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/atomic_thread_fence.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/bswapdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/bswapdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/bswapdi2.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- bswapdi2.c - Implement __bswapdi2 ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __bswapdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI uint64_t __bswapdi2(uint64_t u) {
+ return (
+ (((u)&0xff00000000000000ULL) >> 56) |
+ (((u)&0x00ff000000000000ULL) >> 40) |
+ (((u)&0x0000ff0000000000ULL) >> 24) |
+ (((u)&0x000000ff00000000ULL) >> 8) |
+ (((u)&0x00000000ff000000ULL) << 8) |
+ (((u)&0x0000000000ff0000ULL) << 24) |
+ (((u)&0x000000000000ff00ULL) << 40) |
+ (((u)&0x00000000000000ffULL) << 56));
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/bswapdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/bswapsi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/bswapsi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/bswapsi2.c (revision 351984)
@@ -0,0 +1,20 @@
+//===-- bswapsi2.c - Implement __bswapsi2 ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __bswapsi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI uint32_t __bswapsi2(uint32_t u) {
+ return ((((u)&0xff000000) >> 24) |
+ (((u)&0x00ff0000) >> 8) |
+ (((u)&0x0000ff00) << 8) |
+ (((u)&0x000000ff) << 24));
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/bswapsi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clear_cache.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clear_cache.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clear_cache.c (revision 351984)
@@ -0,0 +1,184 @@
+//===-- clear_cache.c - Implement __clear_cache ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+#include <assert.h>
+#include <stddef.h>
+
+#if __APPLE__
+#include <libkern/OSCacheControl.h>
+#endif
+
+#if defined(_WIN32)
+// Forward declare Win32 APIs since the GCC mode driver does not handle the
+// newer SDKs as well as needed.
+uint32_t FlushInstructionCache(uintptr_t hProcess, void *lpBaseAddress,
+ uintptr_t dwSize);
+uintptr_t GetCurrentProcess(void);
+#endif
+
+#if defined(__FreeBSD__) && defined(__arm__)
+#include <machine/sysarch.h>
+#include <sys/types.h>
+#endif
+
+#if defined(__NetBSD__) && defined(__arm__)
+#include <machine/sysarch.h>
+#endif
+
+#if defined(__OpenBSD__) && defined(__mips__)
+#include <machine/sysarch.h>
+#include <sys/types.h>
+#endif
+
+#if defined(__linux__) && defined(__mips__)
+#include <sys/cachectl.h>
+#include <sys/syscall.h>
+#include <unistd.h>
+#if defined(__ANDROID__) && defined(__LP64__)
+// clear_mips_cache - Invalidates instruction cache for Mips.
+static void clear_mips_cache(const void *Addr, size_t Size) {
+ __asm__ volatile(
+ ".set push\n"
+ ".set noreorder\n"
+ ".set noat\n"
+ "beq %[Size], $zero, 20f\n" // If size == 0, branch around.
+ "nop\n"
+ "daddu %[Size], %[Addr], %[Size]\n" // Calculate end address + 1
+ "rdhwr $v0, $1\n" // Get step size for SYNCI.
+ // $1 is $HW_SYNCI_Step
+ "beq $v0, $zero, 20f\n" // If no caches require
+ // synchronization, branch
+ // around.
+ "nop\n"
+ "10:\n"
+ "synci 0(%[Addr])\n" // Synchronize all caches around
+ // address.
+ "daddu %[Addr], %[Addr], $v0\n" // Add step size.
+ "sltu $at, %[Addr], %[Size]\n" // Compare current with end
+ // address.
+ "bne $at, $zero, 10b\n" // Branch if more to do.
+ "nop\n"
+ "sync\n" // Clear memory hazards.
+ "20:\n"
+ "bal 30f\n"
+ "nop\n"
+ "30:\n"
+ "daddiu $ra, $ra, 12\n" // $ra has a value of $pc here.
+ // Add offset of 12 to point to the
+ // instruction after the last nop.
+ //
+ "jr.hb $ra\n" // Return, clearing instruction
+ // hazards.
+ "nop\n"
+ ".set pop\n"
+ : [ Addr ] "+r"(Addr), [ Size ] "+r"(Size)::"at", "ra", "v0", "memory");
+}
+#endif
+#endif
+
+// The compiler generates calls to __clear_cache() when creating
+// trampoline functions on the stack for use with nested functions.
+// It is expected to invalidate the instruction cache for the
+// specified range.
+
+void __clear_cache(void *start, void *end) {
+#if __i386__ || __x86_64__ || defined(_M_IX86) || defined(_M_X64)
+// Intel processors have a unified instruction and data cache
+// so there is nothing to do
+#elif defined(_WIN32) && (defined(__arm__) || defined(__aarch64__))
+ FlushInstructionCache(GetCurrentProcess(), start, end - start);
+#elif defined(__arm__) && !defined(__APPLE__)
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+ struct arm_sync_icache_args arg;
+
+ arg.addr = (uintptr_t)start;
+ arg.len = (uintptr_t)end - (uintptr_t)start;
+
+ sysarch(ARM_SYNC_ICACHE, &arg);
+#elif defined(__linux__)
+// We used to include asm/unistd.h for the __ARM_NR_cacheflush define, but
+// it also brought many other unused defines, as well as a dependency on
+// kernel headers to be installed.
+//
+// This value is stable at least since Linux 3.13 and should remain so for
+// compatibility reasons, warranting it's re-definition here.
+#define __ARM_NR_cacheflush 0x0f0002
+ register int start_reg __asm("r0") = (int)(intptr_t)start;
+ const register int end_reg __asm("r1") = (int)(intptr_t)end;
+ const register int flags __asm("r2") = 0;
+ const register int syscall_nr __asm("r7") = __ARM_NR_cacheflush;
+ __asm __volatile("svc 0x0"
+ : "=r"(start_reg)
+ : "r"(syscall_nr), "r"(start_reg), "r"(end_reg), "r"(flags));
+ assert(start_reg == 0 && "Cache flush syscall failed.");
+#else
+ compilerrt_abort();
+#endif
+#elif defined(__linux__) && defined(__mips__)
+ const uintptr_t start_int = (uintptr_t)start;
+ const uintptr_t end_int = (uintptr_t)end;
+#if defined(__ANDROID__) && defined(__LP64__)
+ // Call synci implementation for short address range.
+ const uintptr_t address_range_limit = 256;
+ if ((end_int - start_int) <= address_range_limit) {
+ clear_mips_cache(start, (end_int - start_int));
+ } else {
+ syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
+ }
+#else
+ syscall(__NR_cacheflush, start, (end_int - start_int), BCACHE);
+#endif
+#elif defined(__mips__) && defined(__OpenBSD__)
+ cacheflush(start, (uintptr_t)end - (uintptr_t)start, BCACHE);
+#elif defined(__aarch64__) && !defined(__APPLE__)
+ uint64_t xstart = (uint64_t)(uintptr_t)start;
+ uint64_t xend = (uint64_t)(uintptr_t)end;
+ uint64_t addr;
+
+ // Get Cache Type Info
+ uint64_t ctr_el0;
+ __asm __volatile("mrs %0, ctr_el0" : "=r"(ctr_el0));
+
+ // dc & ic instructions must use 64bit registers so we don't use
+ // uintptr_t in case this runs in an IPL32 environment.
+ const size_t dcache_line_size = 4 << ((ctr_el0 >> 16) & 15);
+ for (addr = xstart & ~(dcache_line_size - 1); addr < xend;
+ addr += dcache_line_size)
+ __asm __volatile("dc cvau, %0" ::"r"(addr));
+ __asm __volatile("dsb ish");
+
+ const size_t icache_line_size = 4 << ((ctr_el0 >> 0) & 15);
+ for (addr = xstart & ~(icache_line_size - 1); addr < xend;
+ addr += icache_line_size)
+ __asm __volatile("ic ivau, %0" ::"r"(addr));
+ __asm __volatile("isb sy");
+#elif defined(__powerpc64__)
+ const size_t line_size = 32;
+ const size_t len = (uintptr_t)end - (uintptr_t)start;
+
+ const uintptr_t mask = ~(line_size - 1);
+ const uintptr_t start_line = ((uintptr_t)start) & mask;
+ const uintptr_t end_line = ((uintptr_t)start + len + line_size - 1) & mask;
+
+ for (uintptr_t line = start_line; line < end_line; line += line_size)
+ __asm__ volatile("dcbf 0, %0" : : "r"(line));
+ __asm__ volatile("sync");
+
+ for (uintptr_t line = start_line; line < end_line; line += line_size)
+ __asm__ volatile("icbi 0, %0" : : "r"(line));
+ __asm__ volatile("isync");
+#else
+#if __APPLE__
+ // On Darwin, sys_icache_invalidate() provides this functionality
+ sys_icache_invalidate(start, end - start);
+#else
+ compilerrt_abort();
+#endif
+#endif
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clear_cache.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzdi2.c (revision 351984)
@@ -0,0 +1,35 @@
+//===-- clzdi2.c - Implement __clzdi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __clzdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: the number of leading 0-bits
+
+#if !defined(__clang__) && \
+ ((defined(__sparc__) && defined(__arch64__)) || defined(__mips64) || \
+ (defined(__riscv) && __SIZEOF_POINTER__ >= 8))
+// On 64-bit architectures with neither a native clz instruction nor a native
+// ctz instruction, gcc resolves __builtin_clz to __clzdi2 rather than
+// __clzsi2, leading to infinite recursion.
+#define __builtin_clz(a) __clzsi2(a)
+extern si_int __clzsi2(si_int);
+#endif
+
+// Precondition: a != 0
+
+COMPILER_RT_ABI si_int __clzdi2(di_int a) {
+ dwords x;
+ x.all = a;
+ const si_int f = -(x.s.high == 0);
+ return __builtin_clz((x.s.high & ~f) | (x.s.low & f)) +
+ (f & ((si_int)(sizeof(si_int) * CHAR_BIT)));
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzsi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzsi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzsi2.c (revision 351984)
@@ -0,0 +1,48 @@
+//===-- clzsi2.c - Implement __clzsi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __clzsi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: the number of leading 0-bits
+
+// Precondition: a != 0
+
+COMPILER_RT_ABI si_int __clzsi2(si_int a) {
+ su_int x = (su_int)a;
+ si_int t = ((x & 0xFFFF0000) == 0) << 4; // if (x is small) t = 16 else 0
+ x >>= 16 - t; // x = [0 - 0xFFFF]
+ su_int r = t; // r = [0, 16]
+ // return r + clz(x)
+ t = ((x & 0xFF00) == 0) << 3;
+ x >>= 8 - t; // x = [0 - 0xFF]
+ r += t; // r = [0, 8, 16, 24]
+ // return r + clz(x)
+ t = ((x & 0xF0) == 0) << 2;
+ x >>= 4 - t; // x = [0 - 0xF]
+ r += t; // r = [0, 4, 8, 12, 16, 20, 24, 28]
+ // return r + clz(x)
+ t = ((x & 0xC) == 0) << 1;
+ x >>= 2 - t; // x = [0 - 3]
+ r += t; // r = [0 - 30] and is even
+ // return r + clz(x)
+ // switch (x)
+ // {
+ // case 0:
+ // return r + 2;
+ // case 1:
+ // return r + 1;
+ // case 2:
+ // case 3:
+ // return r;
+ // }
+ return r + ((2 - x) & -((x & 2) == 0));
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzsi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzti2.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- clzti2.c - Implement __clzti2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __clzti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: the number of leading 0-bits
+
+// Precondition: a != 0
+
+COMPILER_RT_ABI si_int __clzti2(ti_int a) {
+ twords x;
+ x.all = a;
+ const di_int f = -(x.s.high == 0);
+ return __builtin_clzll((x.s.high & ~f) | (x.s.low & f)) +
+ ((si_int)f & ((si_int)(sizeof(di_int) * CHAR_BIT)));
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/clzti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cmpdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cmpdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cmpdi2.c (revision 351984)
@@ -0,0 +1,42 @@
+//===-- cmpdi2.c - Implement __cmpdi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __cmpdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: if (a < b) returns 0
+// if (a == b) returns 1
+// if (a > b) returns 2
+
+COMPILER_RT_ABI si_int __cmpdi2(di_int a, di_int b) {
+ dwords x;
+ x.all = a;
+ dwords y;
+ y.all = b;
+ if (x.s.high < y.s.high)
+ return 0;
+ if (x.s.high > y.s.high)
+ return 2;
+ if (x.s.low < y.s.low)
+ return 0;
+ if (x.s.low > y.s.low)
+ return 2;
+ return 1;
+}
+
+#ifdef __ARM_EABI__
+// Returns: if (a < b) returns -1
+// if (a == b) returns 0
+// if (a > b) returns 1
+COMPILER_RT_ABI si_int __aeabi_lcmp(di_int a, di_int b) {
+ return __cmpdi2(a, b) - 1;
+}
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cmpdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cmpti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cmpti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cmpti2.c (revision 351984)
@@ -0,0 +1,37 @@
+//===-- cmpti2.c - Implement __cmpti2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __cmpti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: if (a < b) returns 0
+// if (a == b) returns 1
+// if (a > b) returns 2
+
+COMPILER_RT_ABI si_int __cmpti2(ti_int a, ti_int b) {
+ twords x;
+ x.all = a;
+ twords y;
+ y.all = b;
+ if (x.s.high < y.s.high)
+ return 0;
+ if (x.s.high > y.s.high)
+ return 2;
+ if (x.s.low < y.s.low)
+ return 0;
+ if (x.s.low > y.s.low)
+ return 2;
+ return 1;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/cmpti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparedf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparedf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparedf2.c (revision 351984)
@@ -0,0 +1,151 @@
+//===-- lib/comparedf2.c - Double-precision comparisons -----------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// // This file implements the following soft-float comparison routines:
+//
+// __eqdf2 __gedf2 __unorddf2
+// __ledf2 __gtdf2
+// __ltdf2
+// __nedf2
+//
+// The semantics of the routines grouped in each column are identical, so there
+// is a single implementation for each, and wrappers to provide the other names.
+//
+// The main routines behave as follows:
+//
+// __ledf2(a,b) returns -1 if a < b
+// 0 if a == b
+// 1 if a > b
+// 1 if either a or b is NaN
+//
+// __gedf2(a,b) returns -1 if a < b
+// 0 if a == b
+// 1 if a > b
+// -1 if either a or b is NaN
+//
+// __unorddf2(a,b) returns 0 if both a and b are numbers
+// 1 if either a or b is NaN
+//
+// Note that __ledf2( ) and __gedf2( ) are identical except in their handling of
+// NaN values.
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+
+enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 };
+
+COMPILER_RT_ABI enum LE_RESULT __ledf2(fp_t a, fp_t b) {
+
+ const srep_t aInt = toRep(a);
+ const srep_t bInt = toRep(b);
+ const rep_t aAbs = aInt & absMask;
+ const rep_t bAbs = bInt & absMask;
+
+ // If either a or b is NaN, they are unordered.
+ if (aAbs > infRep || bAbs > infRep)
+ return LE_UNORDERED;
+
+ // If a and b are both zeros, they are equal.
+ if ((aAbs | bAbs) == 0)
+ return LE_EQUAL;
+
+ // If at least one of a and b is positive, we get the same result comparing
+ // a and b as signed integers as we would with a floating-point compare.
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt)
+ return LE_LESS;
+ else if (aInt == bInt)
+ return LE_EQUAL;
+ else
+ return LE_GREATER;
+ }
+
+ // Otherwise, both are negative, so we need to flip the sense of the
+ // comparison to get the correct result. (This assumes a twos- or ones-
+ // complement integer representation; if integers are represented in a
+ // sign-magnitude representation, then this flip is incorrect).
+ else {
+ if (aInt > bInt)
+ return LE_LESS;
+ else if (aInt == bInt)
+ return LE_EQUAL;
+ else
+ return LE_GREATER;
+ }
+}
+
+#if defined(__ELF__)
+// Alias for libgcc compatibility
+COMPILER_RT_ALIAS(__ledf2, __cmpdf2)
+#endif
+COMPILER_RT_ALIAS(__ledf2, __eqdf2)
+COMPILER_RT_ALIAS(__ledf2, __ltdf2)
+COMPILER_RT_ALIAS(__ledf2, __nedf2)
+
+enum GE_RESULT {
+ GE_LESS = -1,
+ GE_EQUAL = 0,
+ GE_GREATER = 1,
+ GE_UNORDERED = -1 // Note: different from LE_UNORDERED
+};
+
+COMPILER_RT_ABI enum GE_RESULT __gedf2(fp_t a, fp_t b) {
+
+ const srep_t aInt = toRep(a);
+ const srep_t bInt = toRep(b);
+ const rep_t aAbs = aInt & absMask;
+ const rep_t bAbs = bInt & absMask;
+
+ if (aAbs > infRep || bAbs > infRep)
+ return GE_UNORDERED;
+ if ((aAbs | bAbs) == 0)
+ return GE_EQUAL;
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt)
+ return GE_LESS;
+ else if (aInt == bInt)
+ return GE_EQUAL;
+ else
+ return GE_GREATER;
+ } else {
+ if (aInt > bInt)
+ return GE_LESS;
+ else if (aInt == bInt)
+ return GE_EQUAL;
+ else
+ return GE_GREATER;
+ }
+}
+
+COMPILER_RT_ALIAS(__gedf2, __gtdf2)
+
+COMPILER_RT_ABI int
+__unorddf2(fp_t a, fp_t b) {
+ const rep_t aAbs = toRep(a) & absMask;
+ const rep_t bAbs = toRep(b) & absMask;
+ return aAbs > infRep || bAbs > infRep;
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI int __aeabi_dcmpun(fp_t a, fp_t b) { return __unorddf2(a, b); }
+#else
+COMPILER_RT_ALIAS(__unorddf2, __aeabi_dcmpun)
+#endif
+#endif
+
+#if defined(_WIN32) && !defined(__MINGW32__)
+// The alias mechanism doesn't work on Windows except for MinGW, so emit
+// wrapper functions.
+int __eqdf2(fp_t a, fp_t b) { return __ledf2(a, b); }
+int __ltdf2(fp_t a, fp_t b) { return __ledf2(a, b); }
+int __nedf2(fp_t a, fp_t b) { return __ledf2(a, b); }
+int __gtdf2(fp_t a, fp_t b) { return __gedf2(a, b); }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparedf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparesf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparesf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparesf2.c (revision 351984)
@@ -0,0 +1,151 @@
+//===-- lib/comparesf2.c - Single-precision comparisons -----------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the following soft-fp_t comparison routines:
+//
+// __eqsf2 __gesf2 __unordsf2
+// __lesf2 __gtsf2
+// __ltsf2
+// __nesf2
+//
+// The semantics of the routines grouped in each column are identical, so there
+// is a single implementation for each, and wrappers to provide the other names.
+//
+// The main routines behave as follows:
+//
+// __lesf2(a,b) returns -1 if a < b
+// 0 if a == b
+// 1 if a > b
+// 1 if either a or b is NaN
+//
+// __gesf2(a,b) returns -1 if a < b
+// 0 if a == b
+// 1 if a > b
+// -1 if either a or b is NaN
+//
+// __unordsf2(a,b) returns 0 if both a and b are numbers
+// 1 if either a or b is NaN
+//
+// Note that __lesf2( ) and __gesf2( ) are identical except in their handling of
+// NaN values.
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 };
+
+COMPILER_RT_ABI enum LE_RESULT __lesf2(fp_t a, fp_t b) {
+
+ const srep_t aInt = toRep(a);
+ const srep_t bInt = toRep(b);
+ const rep_t aAbs = aInt & absMask;
+ const rep_t bAbs = bInt & absMask;
+
+ // If either a or b is NaN, they are unordered.
+ if (aAbs > infRep || bAbs > infRep)
+ return LE_UNORDERED;
+
+ // If a and b are both zeros, they are equal.
+ if ((aAbs | bAbs) == 0)
+ return LE_EQUAL;
+
+ // If at least one of a and b is positive, we get the same result comparing
+ // a and b as signed integers as we would with a fp_ting-point compare.
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt)
+ return LE_LESS;
+ else if (aInt == bInt)
+ return LE_EQUAL;
+ else
+ return LE_GREATER;
+ }
+
+ // Otherwise, both are negative, so we need to flip the sense of the
+ // comparison to get the correct result. (This assumes a twos- or ones-
+ // complement integer representation; if integers are represented in a
+ // sign-magnitude representation, then this flip is incorrect).
+ else {
+ if (aInt > bInt)
+ return LE_LESS;
+ else if (aInt == bInt)
+ return LE_EQUAL;
+ else
+ return LE_GREATER;
+ }
+}
+
+#if defined(__ELF__)
+// Alias for libgcc compatibility
+COMPILER_RT_ALIAS(__lesf2, __cmpsf2)
+#endif
+COMPILER_RT_ALIAS(__lesf2, __eqsf2)
+COMPILER_RT_ALIAS(__lesf2, __ltsf2)
+COMPILER_RT_ALIAS(__lesf2, __nesf2)
+
+enum GE_RESULT {
+ GE_LESS = -1,
+ GE_EQUAL = 0,
+ GE_GREATER = 1,
+ GE_UNORDERED = -1 // Note: different from LE_UNORDERED
+};
+
+COMPILER_RT_ABI enum GE_RESULT __gesf2(fp_t a, fp_t b) {
+
+ const srep_t aInt = toRep(a);
+ const srep_t bInt = toRep(b);
+ const rep_t aAbs = aInt & absMask;
+ const rep_t bAbs = bInt & absMask;
+
+ if (aAbs > infRep || bAbs > infRep)
+ return GE_UNORDERED;
+ if ((aAbs | bAbs) == 0)
+ return GE_EQUAL;
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt)
+ return GE_LESS;
+ else if (aInt == bInt)
+ return GE_EQUAL;
+ else
+ return GE_GREATER;
+ } else {
+ if (aInt > bInt)
+ return GE_LESS;
+ else if (aInt == bInt)
+ return GE_EQUAL;
+ else
+ return GE_GREATER;
+ }
+}
+
+COMPILER_RT_ALIAS(__gesf2, __gtsf2)
+
+COMPILER_RT_ABI int
+__unordsf2(fp_t a, fp_t b) {
+ const rep_t aAbs = toRep(a) & absMask;
+ const rep_t bAbs = toRep(b) & absMask;
+ return aAbs > infRep || bAbs > infRep;
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI int __aeabi_fcmpun(fp_t a, fp_t b) { return __unordsf2(a, b); }
+#else
+COMPILER_RT_ALIAS(__unordsf2, __aeabi_fcmpun)
+#endif
+#endif
+
+#if defined(_WIN32) && !defined(__MINGW32__)
+// The alias mechanism doesn't work on Windows except for MinGW, so emit
+// wrapper functions.
+int __eqsf2(fp_t a, fp_t b) { return __lesf2(a, b); }
+int __ltsf2(fp_t a, fp_t b) { return __lesf2(a, b); }
+int __nesf2(fp_t a, fp_t b) { return __lesf2(a, b); }
+int __gtsf2(fp_t a, fp_t b) { return __gesf2(a, b); }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparesf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparetf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparetf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparetf2.c (revision 351984)
@@ -0,0 +1,134 @@
+//===-- lib/comparetf2.c - Quad-precision comparisons -------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// // This file implements the following soft-float comparison routines:
+//
+// __eqtf2 __getf2 __unordtf2
+// __letf2 __gttf2
+// __lttf2
+// __netf2
+//
+// The semantics of the routines grouped in each column are identical, so there
+// is a single implementation for each, and wrappers to provide the other names.
+//
+// The main routines behave as follows:
+//
+// __letf2(a,b) returns -1 if a < b
+// 0 if a == b
+// 1 if a > b
+// 1 if either a or b is NaN
+//
+// __getf2(a,b) returns -1 if a < b
+// 0 if a == b
+// 1 if a > b
+// -1 if either a or b is NaN
+//
+// __unordtf2(a,b) returns 0 if both a and b are numbers
+// 1 if either a or b is NaN
+//
+// Note that __letf2( ) and __getf2( ) are identical except in their handling of
+// NaN values.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+enum LE_RESULT { LE_LESS = -1, LE_EQUAL = 0, LE_GREATER = 1, LE_UNORDERED = 1 };
+
+COMPILER_RT_ABI enum LE_RESULT __letf2(fp_t a, fp_t b) {
+
+ const srep_t aInt = toRep(a);
+ const srep_t bInt = toRep(b);
+ const rep_t aAbs = aInt & absMask;
+ const rep_t bAbs = bInt & absMask;
+
+ // If either a or b is NaN, they are unordered.
+ if (aAbs > infRep || bAbs > infRep)
+ return LE_UNORDERED;
+
+ // If a and b are both zeros, they are equal.
+ if ((aAbs | bAbs) == 0)
+ return LE_EQUAL;
+
+ // If at least one of a and b is positive, we get the same result comparing
+ // a and b as signed integers as we would with a floating-point compare.
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt)
+ return LE_LESS;
+ else if (aInt == bInt)
+ return LE_EQUAL;
+ else
+ return LE_GREATER;
+ } else {
+ // Otherwise, both are negative, so we need to flip the sense of the
+ // comparison to get the correct result. (This assumes a twos- or ones-
+ // complement integer representation; if integers are represented in a
+ // sign-magnitude representation, then this flip is incorrect).
+ if (aInt > bInt)
+ return LE_LESS;
+ else if (aInt == bInt)
+ return LE_EQUAL;
+ else
+ return LE_GREATER;
+ }
+}
+
+#if defined(__ELF__)
+// Alias for libgcc compatibility
+COMPILER_RT_ALIAS(__letf2, __cmptf2)
+#endif
+COMPILER_RT_ALIAS(__letf2, __eqtf2)
+COMPILER_RT_ALIAS(__letf2, __lttf2)
+COMPILER_RT_ALIAS(__letf2, __netf2)
+
+enum GE_RESULT {
+ GE_LESS = -1,
+ GE_EQUAL = 0,
+ GE_GREATER = 1,
+ GE_UNORDERED = -1 // Note: different from LE_UNORDERED
+};
+
+COMPILER_RT_ABI enum GE_RESULT __getf2(fp_t a, fp_t b) {
+
+ const srep_t aInt = toRep(a);
+ const srep_t bInt = toRep(b);
+ const rep_t aAbs = aInt & absMask;
+ const rep_t bAbs = bInt & absMask;
+
+ if (aAbs > infRep || bAbs > infRep)
+ return GE_UNORDERED;
+ if ((aAbs | bAbs) == 0)
+ return GE_EQUAL;
+ if ((aInt & bInt) >= 0) {
+ if (aInt < bInt)
+ return GE_LESS;
+ else if (aInt == bInt)
+ return GE_EQUAL;
+ else
+ return GE_GREATER;
+ } else {
+ if (aInt > bInt)
+ return GE_LESS;
+ else if (aInt == bInt)
+ return GE_EQUAL;
+ else
+ return GE_GREATER;
+ }
+}
+
+COMPILER_RT_ALIAS(__getf2, __gttf2)
+
+COMPILER_RT_ABI int __unordtf2(fp_t a, fp_t b) {
+ const rep_t aAbs = toRep(a) & absMask;
+ const rep_t bAbs = toRep(b) & absMask;
+ return aAbs > infRep || bAbs > infRep;
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/comparetf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzdi2.c (revision 351984)
@@ -0,0 +1,35 @@
+//===-- ctzdi2.c - Implement __ctzdi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ctzdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: the number of trailing 0-bits
+
+#if !defined(__clang__) && \
+ ((defined(__sparc__) && defined(__arch64__)) || defined(__mips64) || \
+ (defined(__riscv) && __SIZEOF_POINTER__ >= 8))
+// On 64-bit architectures with neither a native clz instruction nor a native
+// ctz instruction, gcc resolves __builtin_ctz to __ctzdi2 rather than
+// __ctzsi2, leading to infinite recursion.
+#define __builtin_ctz(a) __ctzsi2(a)
+extern si_int __ctzsi2(si_int);
+#endif
+
+// Precondition: a != 0
+
+COMPILER_RT_ABI si_int __ctzdi2(di_int a) {
+ dwords x;
+ x.all = a;
+ const si_int f = -(x.s.low == 0);
+ return __builtin_ctz((x.s.high & f) | (x.s.low & ~f)) +
+ (f & ((si_int)(sizeof(si_int) * CHAR_BIT)));
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzsi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzsi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzsi2.c (revision 351984)
@@ -0,0 +1,53 @@
+//===-- ctzsi2.c - Implement __ctzsi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ctzsi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: the number of trailing 0-bits
+
+// Precondition: a != 0
+
+COMPILER_RT_ABI si_int __ctzsi2(si_int a) {
+ su_int x = (su_int)a;
+ si_int t = ((x & 0x0000FFFF) == 0)
+ << 4; // if (x has no small bits) t = 16 else 0
+ x >>= t; // x = [0 - 0xFFFF] + higher garbage bits
+ su_int r = t; // r = [0, 16]
+ // return r + ctz(x)
+ t = ((x & 0x00FF) == 0) << 3;
+ x >>= t; // x = [0 - 0xFF] + higher garbage bits
+ r += t; // r = [0, 8, 16, 24]
+ // return r + ctz(x)
+ t = ((x & 0x0F) == 0) << 2;
+ x >>= t; // x = [0 - 0xF] + higher garbage bits
+ r += t; // r = [0, 4, 8, 12, 16, 20, 24, 28]
+ // return r + ctz(x)
+ t = ((x & 0x3) == 0) << 1;
+ x >>= t;
+ x &= 3; // x = [0 - 3]
+ r += t; // r = [0 - 30] and is even
+ // return r + ctz(x)
+
+ // The branch-less return statement below is equivalent
+ // to the following switch statement:
+ // switch (x)
+ // {
+ // case 0:
+ // return r + 2;
+ // case 2:
+ // return r + 1;
+ // case 1:
+ // case 3:
+ // return r;
+ // }
+ return r + ((2 - (x >> 1)) & -((x & 1) == 0));
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzsi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzti2.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- ctzti2.c - Implement __ctzti2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ctzti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: the number of trailing 0-bits
+
+// Precondition: a != 0
+
+COMPILER_RT_ABI si_int __ctzti2(ti_int a) {
+ twords x;
+ x.all = a;
+ const di_int f = -(x.s.low == 0);
+ return __builtin_ctzll((x.s.high & f) | (x.s.low & ~f)) +
+ ((si_int)f & ((si_int)(sizeof(di_int) * CHAR_BIT)));
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ctzti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdc3.c (revision 351984)
@@ -0,0 +1,53 @@
+//===-- divdc3.c - Implement __divdc3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divdc3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+#include "int_lib.h"
+#include "int_math.h"
+
+// Returns: the quotient of (a + ib) / (c + id)
+
+COMPILER_RT_ABI Dcomplex __divdc3(double __a, double __b, double __c,
+ double __d) {
+ int __ilogbw = 0;
+ double __logbw = __compiler_rt_logb(crt_fmax(crt_fabs(__c), crt_fabs(__d)));
+ if (crt_isfinite(__logbw)) {
+ __ilogbw = (int)__logbw;
+ __c = crt_scalbn(__c, -__ilogbw);
+ __d = crt_scalbn(__d, -__ilogbw);
+ }
+ double __denom = __c * __c + __d * __d;
+ Dcomplex z;
+ COMPLEX_REAL(z) = crt_scalbn((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEX_IMAGINARY(z) =
+ crt_scalbn((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
+ if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
+ COMPLEX_REAL(z) = crt_copysign(CRT_INFINITY, __c) * __a;
+ COMPLEX_IMAGINARY(z) = crt_copysign(CRT_INFINITY, __c) * __b;
+ } else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
+ crt_isfinite(__d)) {
+ __a = crt_copysign(crt_isinf(__a) ? 1.0 : 0.0, __a);
+ __b = crt_copysign(crt_isinf(__b) ? 1.0 : 0.0, __b);
+ COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
+ } else if (crt_isinf(__logbw) && __logbw > 0.0 && crt_isfinite(__a) &&
+ crt_isfinite(__b)) {
+ __c = crt_copysign(crt_isinf(__c) ? 1.0 : 0.0, __c);
+ __d = crt_copysign(crt_isinf(__d) ? 1.0 : 0.0, __d);
+ COMPLEX_REAL(z) = 0.0 * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = 0.0 * (__b * __c - __a * __d);
+ }
+ }
+ return z;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdf3.c (revision 351984)
@@ -0,0 +1,210 @@
+//===-- lib/divdf3.c - Double-precision division ------------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements double-precision soft-float division
+// with the IEEE-754 default rounding (to nearest, ties to even).
+//
+// For simplicity, this implementation currently flushes denormals to zero.
+// It should be a fairly straightforward exercise to implement gradual
+// underflow with correct rounding.
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+
+COMPILER_RT_ABI fp_t __divdf3(fp_t a, fp_t b) {
+
+ const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
+ const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
+ const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit;
+
+ rep_t aSignificand = toRep(a) & significandMask;
+ rep_t bSignificand = toRep(b) & significandMask;
+ int scale = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if (aExponent - 1U >= maxExponent - 1U ||
+ bExponent - 1U >= maxExponent - 1U) {
+
+ const rep_t aAbs = toRep(a) & absMask;
+ const rep_t bAbs = toRep(b) & absMask;
+
+ // NaN / anything = qNaN
+ if (aAbs > infRep)
+ return fromRep(toRep(a) | quietBit);
+ // anything / NaN = qNaN
+ if (bAbs > infRep)
+ return fromRep(toRep(b) | quietBit);
+
+ if (aAbs == infRep) {
+ // infinity / infinity = NaN
+ if (bAbs == infRep)
+ return fromRep(qnanRep);
+ // infinity / anything else = +/- infinity
+ else
+ return fromRep(aAbs | quotientSign);
+ }
+
+ // anything else / infinity = +/- 0
+ if (bAbs == infRep)
+ return fromRep(quotientSign);
+
+ if (!aAbs) {
+ // zero / zero = NaN
+ if (!bAbs)
+ return fromRep(qnanRep);
+ // zero / anything else = +/- zero
+ else
+ return fromRep(quotientSign);
+ }
+ // anything else / zero = +/- infinity
+ if (!bAbs)
+ return fromRep(infRep | quotientSign);
+
+ // One or both of a or b is denormal. The other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if (aAbs < implicitBit)
+ scale += normalize(&aSignificand);
+ if (bAbs < implicitBit)
+ scale -= normalize(&bSignificand);
+ }
+
+ // Set the implicit significand bit. If we fell through from the
+ // denormal path it was already set by normalize( ), but setting it twice
+ // won't hurt anything.
+ aSignificand |= implicitBit;
+ bSignificand |= implicitBit;
+ int quotientExponent = aExponent - bExponent + scale;
+
+ // Align the significand of b as a Q31 fixed-point number in the range
+ // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
+ // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
+ // is accurate to about 3.5 binary digits.
+ const uint32_t q31b = bSignificand >> 21;
+ uint32_t recip32 = UINT32_C(0x7504f333) - q31b;
+ // 0x7504F333 / 2^32 + 1 = 3/4 + 1/sqrt(2)
+
+ // Now refine the reciprocal estimate using a Newton-Raphson iteration:
+ //
+ // x1 = x0 * (2 - x0 * b)
+ //
+ // This doubles the number of correct binary digits in the approximation
+ // with each iteration.
+ uint32_t correction32;
+ correction32 = -((uint64_t)recip32 * q31b >> 32);
+ recip32 = (uint64_t)recip32 * correction32 >> 31;
+ correction32 = -((uint64_t)recip32 * q31b >> 32);
+ recip32 = (uint64_t)recip32 * correction32 >> 31;
+ correction32 = -((uint64_t)recip32 * q31b >> 32);
+ recip32 = (uint64_t)recip32 * correction32 >> 31;
+
+ // The reciprocal may have overflowed to zero if the upper half of b is
+ // exactly 1.0. This would sabatoge the full-width final stage of the
+ // computation that follows, so we adjust the reciprocal down by one bit.
+ recip32--;
+
+ // We need to perform one more iteration to get us to 56 binary digits.
+ // The last iteration needs to happen with extra precision.
+ const uint32_t q63blo = bSignificand << 11;
+ uint64_t correction, reciprocal;
+ correction = -((uint64_t)recip32 * q31b + ((uint64_t)recip32 * q63blo >> 32));
+ uint32_t cHi = correction >> 32;
+ uint32_t cLo = correction;
+ reciprocal = (uint64_t)recip32 * cHi + ((uint64_t)recip32 * cLo >> 32);
+
+ // Adjust the final 64-bit reciprocal estimate downward to ensure that it is
+ // strictly smaller than the infinitely precise exact reciprocal. Because
+ // the computation of the Newton-Raphson step is truncating at every step,
+ // this adjustment is small; most of the work is already done.
+ reciprocal -= 2;
+
+ // The numerical reciprocal is accurate to within 2^-56, lies in the
+ // interval [0.5, 1.0), and is strictly smaller than the true reciprocal
+ // of b. Multiplying a by this reciprocal thus gives a numerical q = a/b
+ // in Q53 with the following properties:
+ //
+ // 1. q < a/b
+ // 2. q is in the interval [0.5, 2.0)
+ // 3. The error in q is bounded away from 2^-53 (actually, we have a
+ // couple of bits to spare, but this is all we need).
+
+ // We need a 64 x 64 multiply high to compute q, which isn't a basic
+ // operation in C, so we need to be a little bit fussy.
+ rep_t quotient, quotientLo;
+ wideMultiply(aSignificand << 2, reciprocal, &quotient, &quotientLo);
+
+ // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
+ // In either case, we are going to compute a residual of the form
+ //
+ // r = a - q*b
+ //
+ // We know from the construction of q that r satisfies:
+ //
+ // 0 <= r < ulp(q)*b
+ //
+ // If r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
+ // already have the correct result. The exact halfway case cannot occur.
+ // We also take this time to right shift quotient if it falls in the [1,2)
+ // range and adjust the exponent accordingly.
+ rep_t residual;
+ if (quotient < (implicitBit << 1)) {
+ residual = (aSignificand << 53) - quotient * bSignificand;
+ quotientExponent--;
+ } else {
+ quotient >>= 1;
+ residual = (aSignificand << 52) - quotient * bSignificand;
+ }
+
+ const int writtenExponent = quotientExponent + exponentBias;
+
+ if (writtenExponent >= maxExponent) {
+ // If we have overflowed the exponent, return infinity.
+ return fromRep(infRep | quotientSign);
+ }
+
+ else if (writtenExponent < 1) {
+ if (writtenExponent == 0) {
+ // Check whether the rounded result is normal.
+ const bool round = (residual << 1) > bSignificand;
+ // Clear the implicit bit.
+ rep_t absResult = quotient & significandMask;
+ // Round.
+ absResult += round;
+ if (absResult & ~significandMask) {
+ // The rounded result is normal; return it.
+ return fromRep(absResult | quotientSign);
+ }
+ }
+ // Flush denormals to zero. In the future, it would be nice to add
+ // code to round them correctly.
+ return fromRep(quotientSign);
+ }
+
+ else {
+ const bool round = (residual << 1) > bSignificand;
+ // Clear the implicit bit.
+ rep_t absResult = quotient & significandMask;
+ // Insert the exponent.
+ absResult |= (rep_t)writtenExponent << significandBits;
+ // Round.
+ absResult += round;
+ // Insert the sign and return.
+ const double result = fromRep(absResult | quotientSign);
+ return result;
+ }
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_ddiv(fp_t a, fp_t b) { return __divdf3(a, b); }
+#else
+COMPILER_RT_ALIAS(__divdf3, __aeabi_ddiv)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdi3.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- divdi3.c - Implement __divdi3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divdi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a / b
+
+COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b) {
+ const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1;
+ di_int s_a = a >> bits_in_dword_m1; // s_a = a < 0 ? -1 : 0
+ di_int s_b = b >> bits_in_dword_m1; // s_b = b < 0 ? -1 : 0
+ a = (a ^ s_a) - s_a; // negate if s_a == -1
+ b = (b ^ s_b) - s_b; // negate if s_b == -1
+ s_a ^= s_b; // sign of quotient
+ return (__udivmoddi4(a, b, (du_int *)0) ^ s_a) - s_a; // negate if s_a == -1
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divdi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divmoddi4.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divmoddi4.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divmoddi4.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- divmoddi4.c - Implement __divmoddi4 -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divmoddi4 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a / b, *rem = a % b
+
+COMPILER_RT_ABI di_int __divmoddi4(di_int a, di_int b, di_int *rem) {
+ di_int d = __divdi3(a, b);
+ *rem = a - (d * b);
+ return d;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divmoddi4.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divmodsi4.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divmodsi4.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divmodsi4.c (revision 351984)
@@ -0,0 +1,22 @@
+//===-- divmodsi4.c - Implement __divmodsi4
+//--------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divmodsi4 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a / b, *rem = a % b
+
+COMPILER_RT_ABI si_int __divmodsi4(si_int a, si_int b, si_int *rem) {
+ si_int d = __divsi3(a, b);
+ *rem = a - (d * b);
+ return d;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divmodsi4.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsc3.c (revision 351984)
@@ -0,0 +1,53 @@
+//===-- divsc3.c - Implement __divsc3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divsc3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+#include "int_lib.h"
+#include "int_math.h"
+
+// Returns: the quotient of (a + ib) / (c + id)
+
+COMPILER_RT_ABI Fcomplex __divsc3(float __a, float __b, float __c, float __d) {
+ int __ilogbw = 0;
+ float __logbw =
+ __compiler_rt_logbf(crt_fmaxf(crt_fabsf(__c), crt_fabsf(__d)));
+ if (crt_isfinite(__logbw)) {
+ __ilogbw = (int)__logbw;
+ __c = crt_scalbnf(__c, -__ilogbw);
+ __d = crt_scalbnf(__d, -__ilogbw);
+ }
+ float __denom = __c * __c + __d * __d;
+ Fcomplex z;
+ COMPLEX_REAL(z) = crt_scalbnf((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEX_IMAGINARY(z) =
+ crt_scalbnf((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
+ if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
+ COMPLEX_REAL(z) = crt_copysignf(CRT_INFINITY, __c) * __a;
+ COMPLEX_IMAGINARY(z) = crt_copysignf(CRT_INFINITY, __c) * __b;
+ } else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
+ crt_isfinite(__d)) {
+ __a = crt_copysignf(crt_isinf(__a) ? 1 : 0, __a);
+ __b = crt_copysignf(crt_isinf(__b) ? 1 : 0, __b);
+ COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
+ } else if (crt_isinf(__logbw) && __logbw > 0 && crt_isfinite(__a) &&
+ crt_isfinite(__b)) {
+ __c = crt_copysignf(crt_isinf(__c) ? 1 : 0, __c);
+ __d = crt_copysignf(crt_isinf(__d) ? 1 : 0, __d);
+ COMPLEX_REAL(z) = 0 * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = 0 * (__b * __c - __a * __d);
+ }
+ }
+ return z;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsf3.c (revision 351984)
@@ -0,0 +1,194 @@
+//===-- lib/divsf3.c - Single-precision division ------------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements single-precision soft-float division
+// with the IEEE-754 default rounding (to nearest, ties to even).
+//
+// For simplicity, this implementation currently flushes denormals to zero.
+// It should be a fairly straightforward exercise to implement gradual
+// underflow with correct rounding.
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+COMPILER_RT_ABI fp_t __divsf3(fp_t a, fp_t b) {
+
+ const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
+ const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
+ const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit;
+
+ rep_t aSignificand = toRep(a) & significandMask;
+ rep_t bSignificand = toRep(b) & significandMask;
+ int scale = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if (aExponent - 1U >= maxExponent - 1U ||
+ bExponent - 1U >= maxExponent - 1U) {
+
+ const rep_t aAbs = toRep(a) & absMask;
+ const rep_t bAbs = toRep(b) & absMask;
+
+ // NaN / anything = qNaN
+ if (aAbs > infRep)
+ return fromRep(toRep(a) | quietBit);
+ // anything / NaN = qNaN
+ if (bAbs > infRep)
+ return fromRep(toRep(b) | quietBit);
+
+ if (aAbs == infRep) {
+ // infinity / infinity = NaN
+ if (bAbs == infRep)
+ return fromRep(qnanRep);
+ // infinity / anything else = +/- infinity
+ else
+ return fromRep(aAbs | quotientSign);
+ }
+
+ // anything else / infinity = +/- 0
+ if (bAbs == infRep)
+ return fromRep(quotientSign);
+
+ if (!aAbs) {
+ // zero / zero = NaN
+ if (!bAbs)
+ return fromRep(qnanRep);
+ // zero / anything else = +/- zero
+ else
+ return fromRep(quotientSign);
+ }
+ // anything else / zero = +/- infinity
+ if (!bAbs)
+ return fromRep(infRep | quotientSign);
+
+ // One or both of a or b is denormal. The other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if (aAbs < implicitBit)
+ scale += normalize(&aSignificand);
+ if (bAbs < implicitBit)
+ scale -= normalize(&bSignificand);
+ }
+
+ // Set the implicit significand bit. If we fell through from the
+ // denormal path it was already set by normalize( ), but setting it twice
+ // won't hurt anything.
+ aSignificand |= implicitBit;
+ bSignificand |= implicitBit;
+ int quotientExponent = aExponent - bExponent + scale;
+ // 0x7504F333 / 2^32 + 1 = 3/4 + 1/sqrt(2)
+
+ // Align the significand of b as a Q31 fixed-point number in the range
+ // [1, 2.0) and get a Q32 approximate reciprocal using a small minimax
+ // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
+ // is accurate to about 3.5 binary digits.
+ uint32_t q31b = bSignificand << 8;
+ uint32_t reciprocal = UINT32_C(0x7504f333) - q31b;
+
+ // Now refine the reciprocal estimate using a Newton-Raphson iteration:
+ //
+ // x1 = x0 * (2 - x0 * b)
+ //
+ // This doubles the number of correct binary digits in the approximation
+ // with each iteration.
+ uint32_t correction;
+ correction = -((uint64_t)reciprocal * q31b >> 32);
+ reciprocal = (uint64_t)reciprocal * correction >> 31;
+ correction = -((uint64_t)reciprocal * q31b >> 32);
+ reciprocal = (uint64_t)reciprocal * correction >> 31;
+ correction = -((uint64_t)reciprocal * q31b >> 32);
+ reciprocal = (uint64_t)reciprocal * correction >> 31;
+
+ // Adust the final 32-bit reciprocal estimate downward to ensure that it is
+ // strictly smaller than the infinitely precise exact reciprocal. Because
+ // the computation of the Newton-Raphson step is truncating at every step,
+ // this adjustment is small; most of the work is already done.
+ reciprocal -= 2;
+
+ // The numerical reciprocal is accurate to within 2^-28, lies in the
+ // interval [0x1.000000eep-1, 0x1.fffffffcp-1], and is strictly smaller
+ // than the true reciprocal of b. Multiplying a by this reciprocal thus
+ // gives a numerical q = a/b in Q24 with the following properties:
+ //
+ // 1. q < a/b
+ // 2. q is in the interval [0x1.000000eep-1, 0x1.fffffffcp0)
+ // 3. The error in q is at most 2^-24 + 2^-27 -- the 2^24 term comes
+ // from the fact that we truncate the product, and the 2^27 term
+ // is the error in the reciprocal of b scaled by the maximum
+ // possible value of a. As a consequence of this error bound,
+ // either q or nextafter(q) is the correctly rounded.
+ rep_t quotient = (uint64_t)reciprocal * (aSignificand << 1) >> 32;
+
+ // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
+ // In either case, we are going to compute a residual of the form
+ //
+ // r = a - q*b
+ //
+ // We know from the construction of q that r satisfies:
+ //
+ // 0 <= r < ulp(q)*b
+ //
+ // If r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
+ // already have the correct result. The exact halfway case cannot occur.
+ // We also take this time to right shift quotient if it falls in the [1,2)
+ // range and adjust the exponent accordingly.
+ rep_t residual;
+ if (quotient < (implicitBit << 1)) {
+ residual = (aSignificand << 24) - quotient * bSignificand;
+ quotientExponent--;
+ } else {
+ quotient >>= 1;
+ residual = (aSignificand << 23) - quotient * bSignificand;
+ }
+
+ const int writtenExponent = quotientExponent + exponentBias;
+
+ if (writtenExponent >= maxExponent) {
+ // If we have overflowed the exponent, return infinity.
+ return fromRep(infRep | quotientSign);
+ }
+
+ else if (writtenExponent < 1) {
+ if (writtenExponent == 0) {
+ // Check whether the rounded result is normal.
+ const bool round = (residual << 1) > bSignificand;
+ // Clear the implicit bit.
+ rep_t absResult = quotient & significandMask;
+ // Round.
+ absResult += round;
+ if (absResult & ~significandMask) {
+ // The rounded result is normal; return it.
+ return fromRep(absResult | quotientSign);
+ }
+ }
+ // Flush denormals to zero. In the future, it would be nice to add
+ // code to round them correctly.
+ return fromRep(quotientSign);
+ }
+
+ else {
+ const bool round = (residual << 1) > bSignificand;
+ // Clear the implicit bit.
+ rep_t absResult = quotient & significandMask;
+ // Insert the exponent.
+ absResult |= (rep_t)writtenExponent << significandBits;
+ // Round.
+ absResult += round;
+ // Insert the sign and return.
+ return fromRep(absResult | quotientSign);
+ }
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_fdiv(fp_t a, fp_t b) { return __divsf3(a, b); }
+#else
+COMPILER_RT_ALIAS(__divsf3, __aeabi_fdiv)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsi3.c (revision 351984)
@@ -0,0 +1,35 @@
+//===-- divsi3.c - Implement __divsi3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divsi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a / b
+
+COMPILER_RT_ABI si_int __divsi3(si_int a, si_int b) {
+ const int bits_in_word_m1 = (int)(sizeof(si_int) * CHAR_BIT) - 1;
+ si_int s_a = a >> bits_in_word_m1; // s_a = a < 0 ? -1 : 0
+ si_int s_b = b >> bits_in_word_m1; // s_b = b < 0 ? -1 : 0
+ a = (a ^ s_a) - s_a; // negate if s_a == -1
+ b = (b ^ s_b) - s_b; // negate if s_b == -1
+ s_a ^= s_b; // sign of quotient
+ //
+ // On CPUs without unsigned hardware division support,
+ // this calls __udivsi3 (notice the cast to su_int).
+ // On CPUs with unsigned hardware division support,
+ // this uses the unsigned division instruction.
+ //
+ return ((su_int)a / (su_int)b ^ s_a) - s_a; // negate if s_a == -1
+}
+
+#if defined(__ARM_EABI__)
+COMPILER_RT_ALIAS(__divsi3, __aeabi_idiv)
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divsi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divtc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divtc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divtc3.c (revision 351984)
@@ -0,0 +1,54 @@
+//===-- divtc3.c - Implement __divtc3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divtc3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+#include "int_lib.h"
+#include "int_math.h"
+
+// Returns: the quotient of (a + ib) / (c + id)
+
+COMPILER_RT_ABI Lcomplex __divtc3(long double __a, long double __b,
+ long double __c, long double __d) {
+ int __ilogbw = 0;
+ long double __logbw =
+ __compiler_rt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
+ if (crt_isfinite(__logbw)) {
+ __ilogbw = (int)__logbw;
+ __c = crt_scalbnl(__c, -__ilogbw);
+ __d = crt_scalbnl(__d, -__ilogbw);
+ }
+ long double __denom = __c * __c + __d * __d;
+ Lcomplex z;
+ COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEX_IMAGINARY(z) =
+ crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
+ if ((__denom == 0.0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
+ COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a;
+ COMPLEX_IMAGINARY(z) = crt_copysignl(CRT_INFINITY, __c) * __b;
+ } else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
+ crt_isfinite(__d)) {
+ __a = crt_copysignl(crt_isinf(__a) ? 1.0 : 0.0, __a);
+ __b = crt_copysignl(crt_isinf(__b) ? 1.0 : 0.0, __b);
+ COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
+ } else if (crt_isinf(__logbw) && __logbw > 0.0 && crt_isfinite(__a) &&
+ crt_isfinite(__b)) {
+ __c = crt_copysignl(crt_isinf(__c) ? 1.0 : 0.0, __c);
+ __d = crt_copysignl(crt_isinf(__d) ? 1.0 : 0.0, __d);
+ COMPLEX_REAL(z) = 0.0 * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = 0.0 * (__b * __c - __a * __d);
+ }
+ }
+ return z;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divtc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divtf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divtf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divtf3.c (revision 351984)
@@ -0,0 +1,221 @@
+//===-- lib/divtf3.c - Quad-precision division --------------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements quad-precision soft-float division
+// with the IEEE-754 default rounding (to nearest, ties to even).
+//
+// For simplicity, this implementation currently flushes denormals to zero.
+// It should be a fairly straightforward exercise to implement gradual
+// underflow with correct rounding.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+COMPILER_RT_ABI fp_t __divtf3(fp_t a, fp_t b) {
+
+ const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
+ const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
+ const rep_t quotientSign = (toRep(a) ^ toRep(b)) & signBit;
+
+ rep_t aSignificand = toRep(a) & significandMask;
+ rep_t bSignificand = toRep(b) & significandMask;
+ int scale = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if (aExponent - 1U >= maxExponent - 1U ||
+ bExponent - 1U >= maxExponent - 1U) {
+
+ const rep_t aAbs = toRep(a) & absMask;
+ const rep_t bAbs = toRep(b) & absMask;
+
+ // NaN / anything = qNaN
+ if (aAbs > infRep)
+ return fromRep(toRep(a) | quietBit);
+ // anything / NaN = qNaN
+ if (bAbs > infRep)
+ return fromRep(toRep(b) | quietBit);
+
+ if (aAbs == infRep) {
+ // infinity / infinity = NaN
+ if (bAbs == infRep)
+ return fromRep(qnanRep);
+ // infinity / anything else = +/- infinity
+ else
+ return fromRep(aAbs | quotientSign);
+ }
+
+ // anything else / infinity = +/- 0
+ if (bAbs == infRep)
+ return fromRep(quotientSign);
+
+ if (!aAbs) {
+ // zero / zero = NaN
+ if (!bAbs)
+ return fromRep(qnanRep);
+ // zero / anything else = +/- zero
+ else
+ return fromRep(quotientSign);
+ }
+ // anything else / zero = +/- infinity
+ if (!bAbs)
+ return fromRep(infRep | quotientSign);
+
+ // One or both of a or b is denormal. The other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if (aAbs < implicitBit)
+ scale += normalize(&aSignificand);
+ if (bAbs < implicitBit)
+ scale -= normalize(&bSignificand);
+ }
+
+ // Set the implicit significand bit. If we fell through from the
+ // denormal path it was already set by normalize( ), but setting it twice
+ // won't hurt anything.
+ aSignificand |= implicitBit;
+ bSignificand |= implicitBit;
+ int quotientExponent = aExponent - bExponent + scale;
+
+ // Align the significand of b as a Q63 fixed-point number in the range
+ // [1, 2.0) and get a Q64 approximate reciprocal using a small minimax
+ // polynomial approximation: reciprocal = 3/4 + 1/sqrt(2) - b/2. This
+ // is accurate to about 3.5 binary digits.
+ const uint64_t q63b = bSignificand >> 49;
+ uint64_t recip64 = UINT64_C(0x7504f333F9DE6484) - q63b;
+ // 0x7504f333F9DE6484 / 2^64 + 1 = 3/4 + 1/sqrt(2)
+
+ // Now refine the reciprocal estimate using a Newton-Raphson iteration:
+ //
+ // x1 = x0 * (2 - x0 * b)
+ //
+ // This doubles the number of correct binary digits in the approximation
+ // with each iteration.
+ uint64_t correction64;
+ correction64 = -((rep_t)recip64 * q63b >> 64);
+ recip64 = (rep_t)recip64 * correction64 >> 63;
+ correction64 = -((rep_t)recip64 * q63b >> 64);
+ recip64 = (rep_t)recip64 * correction64 >> 63;
+ correction64 = -((rep_t)recip64 * q63b >> 64);
+ recip64 = (rep_t)recip64 * correction64 >> 63;
+ correction64 = -((rep_t)recip64 * q63b >> 64);
+ recip64 = (rep_t)recip64 * correction64 >> 63;
+ correction64 = -((rep_t)recip64 * q63b >> 64);
+ recip64 = (rep_t)recip64 * correction64 >> 63;
+
+ // The reciprocal may have overflowed to zero if the upper half of b is
+ // exactly 1.0. This would sabatoge the full-width final stage of the
+ // computation that follows, so we adjust the reciprocal down by one bit.
+ recip64--;
+
+ // We need to perform one more iteration to get us to 112 binary digits;
+ // The last iteration needs to happen with extra precision.
+ const uint64_t q127blo = bSignificand << 15;
+ rep_t correction, reciprocal;
+
+ // NOTE: This operation is equivalent to __multi3, which is not implemented
+ // in some architechure
+ rep_t r64q63, r64q127, r64cH, r64cL, dummy;
+ wideMultiply((rep_t)recip64, (rep_t)q63b, &dummy, &r64q63);
+ wideMultiply((rep_t)recip64, (rep_t)q127blo, &dummy, &r64q127);
+
+ correction = -(r64q63 + (r64q127 >> 64));
+
+ uint64_t cHi = correction >> 64;
+ uint64_t cLo = correction;
+
+ wideMultiply((rep_t)recip64, (rep_t)cHi, &dummy, &r64cH);
+ wideMultiply((rep_t)recip64, (rep_t)cLo, &dummy, &r64cL);
+
+ reciprocal = r64cH + (r64cL >> 64);
+
+ // Adjust the final 128-bit reciprocal estimate downward to ensure that it
+ // is strictly smaller than the infinitely precise exact reciprocal. Because
+ // the computation of the Newton-Raphson step is truncating at every step,
+ // this adjustment is small; most of the work is already done.
+ reciprocal -= 2;
+
+ // The numerical reciprocal is accurate to within 2^-112, lies in the
+ // interval [0.5, 1.0), and is strictly smaller than the true reciprocal
+ // of b. Multiplying a by this reciprocal thus gives a numerical q = a/b
+ // in Q127 with the following properties:
+ //
+ // 1. q < a/b
+ // 2. q is in the interval [0.5, 2.0)
+ // 3. The error in q is bounded away from 2^-113 (actually, we have a
+ // couple of bits to spare, but this is all we need).
+
+ // We need a 128 x 128 multiply high to compute q, which isn't a basic
+ // operation in C, so we need to be a little bit fussy.
+ rep_t quotient, quotientLo;
+ wideMultiply(aSignificand << 2, reciprocal, &quotient, &quotientLo);
+
+ // Two cases: quotient is in [0.5, 1.0) or quotient is in [1.0, 2.0).
+ // In either case, we are going to compute a residual of the form
+ //
+ // r = a - q*b
+ //
+ // We know from the construction of q that r satisfies:
+ //
+ // 0 <= r < ulp(q)*b
+ //
+ // If r is greater than 1/2 ulp(q)*b, then q rounds up. Otherwise, we
+ // already have the correct result. The exact halfway case cannot occur.
+ // We also take this time to right shift quotient if it falls in the [1,2)
+ // range and adjust the exponent accordingly.
+ rep_t residual;
+ rep_t qb;
+
+ if (quotient < (implicitBit << 1)) {
+ wideMultiply(quotient, bSignificand, &dummy, &qb);
+ residual = (aSignificand << 113) - qb;
+ quotientExponent--;
+ } else {
+ quotient >>= 1;
+ wideMultiply(quotient, bSignificand, &dummy, &qb);
+ residual = (aSignificand << 112) - qb;
+ }
+
+ const int writtenExponent = quotientExponent + exponentBias;
+
+ if (writtenExponent >= maxExponent) {
+ // If we have overflowed the exponent, return infinity.
+ return fromRep(infRep | quotientSign);
+ } else if (writtenExponent < 1) {
+ if (writtenExponent == 0) {
+ // Check whether the rounded result is normal.
+ const bool round = (residual << 1) > bSignificand;
+ // Clear the implicit bit.
+ rep_t absResult = quotient & significandMask;
+ // Round.
+ absResult += round;
+ if (absResult & ~significandMask) {
+ // The rounded result is normal; return it.
+ return fromRep(absResult | quotientSign);
+ }
+ }
+ // Flush denormals to zero. In the future, it would be nice to add
+ // code to round them correctly.
+ return fromRep(quotientSign);
+ } else {
+ const bool round = (residual << 1) >= bSignificand;
+ // Clear the implicit bit.
+ rep_t absResult = quotient & significandMask;
+ // Insert the exponent.
+ absResult |= (rep_t)writtenExponent << significandBits;
+ // Round.
+ absResult += round;
+ // Insert the sign and return.
+ const long double result = fromRep(absResult | quotientSign);
+ return result;
+ }
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divtf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divti3.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- divti3.c - Implement __divti3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a / b
+
+COMPILER_RT_ABI ti_int __divti3(ti_int a, ti_int b) {
+ const int bits_in_tword_m1 = (int)(sizeof(ti_int) * CHAR_BIT) - 1;
+ ti_int s_a = a >> bits_in_tword_m1; // s_a = a < 0 ? -1 : 0
+ ti_int s_b = b >> bits_in_tword_m1; // s_b = b < 0 ? -1 : 0
+ a = (a ^ s_a) - s_a; // negate if s_a == -1
+ b = (b ^ s_b) - s_b; // negate if s_b == -1
+ s_a ^= s_b; // sign of quotient
+ return (__udivmodti4(a, b, (tu_int *)0) ^ s_a) - s_a; // negate if s_a == -1
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divxc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divxc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divxc3.c (revision 351984)
@@ -0,0 +1,55 @@
+//===-- divxc3.c - Implement __divxc3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __divxc3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#if !_ARCH_PPC
+
+#include "int_lib.h"
+#include "int_math.h"
+
+// Returns: the quotient of (a + ib) / (c + id)
+
+COMPILER_RT_ABI Lcomplex __divxc3(long double __a, long double __b,
+ long double __c, long double __d) {
+ int __ilogbw = 0;
+ long double __logbw = crt_logbl(crt_fmaxl(crt_fabsl(__c), crt_fabsl(__d)));
+ if (crt_isfinite(__logbw)) {
+ __ilogbw = (int)__logbw;
+ __c = crt_scalbnl(__c, -__ilogbw);
+ __d = crt_scalbnl(__d, -__ilogbw);
+ }
+ long double __denom = __c * __c + __d * __d;
+ Lcomplex z;
+ COMPLEX_REAL(z) = crt_scalbnl((__a * __c + __b * __d) / __denom, -__ilogbw);
+ COMPLEX_IMAGINARY(z) =
+ crt_scalbnl((__b * __c - __a * __d) / __denom, -__ilogbw);
+ if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
+ if ((__denom == 0) && (!crt_isnan(__a) || !crt_isnan(__b))) {
+ COMPLEX_REAL(z) = crt_copysignl(CRT_INFINITY, __c) * __a;
+ COMPLEX_IMAGINARY(z) = crt_copysignl(CRT_INFINITY, __c) * __b;
+ } else if ((crt_isinf(__a) || crt_isinf(__b)) && crt_isfinite(__c) &&
+ crt_isfinite(__d)) {
+ __a = crt_copysignl(crt_isinf(__a) ? 1 : 0, __a);
+ __b = crt_copysignl(crt_isinf(__b) ? 1 : 0, __b);
+ COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__b * __c - __a * __d);
+ } else if (crt_isinf(__logbw) && __logbw > 0 && crt_isfinite(__a) &&
+ crt_isfinite(__b)) {
+ __c = crt_copysignl(crt_isinf(__c) ? 1 : 0, __c);
+ __d = crt_copysignl(crt_isinf(__d) ? 1 : 0, __d);
+ COMPLEX_REAL(z) = 0 * (__a * __c + __b * __d);
+ COMPLEX_IMAGINARY(z) = 0 * (__b * __c - __a * __d);
+ }
+ }
+ return z;
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/divxc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/emutls.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/emutls.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/emutls.c (revision 351984)
@@ -0,0 +1,383 @@
+//===---------- emutls.c - Implements __emutls_get_address ---------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "int_lib.h"
+
+#ifdef __BIONIC__
+// There are 4 pthread key cleanup rounds on Bionic. Delay emutls deallocation
+// to round 2. We need to delay deallocation because:
+// - Android versions older than M lack __cxa_thread_atexit_impl, so apps
+// use a pthread key destructor to call C++ destructors.
+// - Apps might use __thread/thread_local variables in pthread destructors.
+// We can't wait until the final two rounds, because jemalloc needs two rounds
+// after the final malloc/free call to free its thread-specific data (see
+// https://reviews.llvm.org/D46978#1107507).
+#define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 1
+#else
+#define EMUTLS_SKIP_DESTRUCTOR_ROUNDS 0
+#endif
+
+typedef struct emutls_address_array {
+ uintptr_t skip_destructor_rounds;
+ uintptr_t size; // number of elements in the 'data' array
+ void *data[];
+} emutls_address_array;
+
+static void emutls_shutdown(emutls_address_array *array);
+
+#ifndef _WIN32
+
+#include <pthread.h>
+
+static pthread_mutex_t emutls_mutex = PTHREAD_MUTEX_INITIALIZER;
+static pthread_key_t emutls_pthread_key;
+static bool emutls_key_created = false;
+
+typedef unsigned int gcc_word __attribute__((mode(word)));
+typedef unsigned int gcc_pointer __attribute__((mode(pointer)));
+
+// Default is not to use posix_memalign, so systems like Android
+// can use thread local data without heavier POSIX memory allocators.
+#ifndef EMUTLS_USE_POSIX_MEMALIGN
+#define EMUTLS_USE_POSIX_MEMALIGN 0
+#endif
+
+static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
+ void *base;
+#if EMUTLS_USE_POSIX_MEMALIGN
+ if (posix_memalign(&base, align, size) != 0)
+ abort();
+#else
+#define EXTRA_ALIGN_PTR_BYTES (align - 1 + sizeof(void *))
+ char *object;
+ if ((object = (char *)malloc(EXTRA_ALIGN_PTR_BYTES + size)) == NULL)
+ abort();
+ base = (void *)(((uintptr_t)(object + EXTRA_ALIGN_PTR_BYTES)) &
+ ~(uintptr_t)(align - 1));
+
+ ((void **)base)[-1] = object;
+#endif
+ return base;
+}
+
+static __inline void emutls_memalign_free(void *base) {
+#if EMUTLS_USE_POSIX_MEMALIGN
+ free(base);
+#else
+ // The mallocated address is in ((void**)base)[-1]
+ free(((void **)base)[-1]);
+#endif
+}
+
+static __inline void emutls_setspecific(emutls_address_array *value) {
+ pthread_setspecific(emutls_pthread_key, (void *)value);
+}
+
+static __inline emutls_address_array *emutls_getspecific() {
+ return (emutls_address_array *)pthread_getspecific(emutls_pthread_key);
+}
+
+static void emutls_key_destructor(void *ptr) {
+ emutls_address_array *array = (emutls_address_array *)ptr;
+ if (array->skip_destructor_rounds > 0) {
+ // emutls is deallocated using a pthread key destructor. These
+ // destructors are called in several rounds to accommodate destructor
+ // functions that (re)initialize key values with pthread_setspecific.
+ // Delay the emutls deallocation to accommodate other end-of-thread
+ // cleanup tasks like calling thread_local destructors (e.g. the
+ // __cxa_thread_atexit fallback in libc++abi).
+ array->skip_destructor_rounds--;
+ emutls_setspecific(array);
+ } else {
+ emutls_shutdown(array);
+ free(ptr);
+ }
+}
+
+static __inline void emutls_init(void) {
+ if (pthread_key_create(&emutls_pthread_key, emutls_key_destructor) != 0)
+ abort();
+ emutls_key_created = true;
+}
+
+static __inline void emutls_init_once(void) {
+ static pthread_once_t once = PTHREAD_ONCE_INIT;
+ pthread_once(&once, emutls_init);
+}
+
+static __inline void emutls_lock() { pthread_mutex_lock(&emutls_mutex); }
+
+static __inline void emutls_unlock() { pthread_mutex_unlock(&emutls_mutex); }
+
+#else // _WIN32
+
+#include <assert.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <windows.h>
+
+static LPCRITICAL_SECTION emutls_mutex;
+static DWORD emutls_tls_index = TLS_OUT_OF_INDEXES;
+
+typedef uintptr_t gcc_word;
+typedef void *gcc_pointer;
+
+static void win_error(DWORD last_err, const char *hint) {
+ char *buffer = NULL;
+ if (FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER |
+ FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_MAX_WIDTH_MASK,
+ NULL, last_err, 0, (LPSTR)&buffer, 1, NULL)) {
+ fprintf(stderr, "Windows error: %s\n", buffer);
+ } else {
+ fprintf(stderr, "Unkown Windows error: %s\n", hint);
+ }
+ LocalFree(buffer);
+}
+
+static __inline void win_abort(DWORD last_err, const char *hint) {
+ win_error(last_err, hint);
+ abort();
+}
+
+static __inline void *emutls_memalign_alloc(size_t align, size_t size) {
+ void *base = _aligned_malloc(size, align);
+ if (!base)
+ win_abort(GetLastError(), "_aligned_malloc");
+ return base;
+}
+
+static __inline void emutls_memalign_free(void *base) { _aligned_free(base); }
+
+static void emutls_exit(void) {
+ if (emutls_mutex) {
+ DeleteCriticalSection(emutls_mutex);
+ _aligned_free(emutls_mutex);
+ emutls_mutex = NULL;
+ }
+ if (emutls_tls_index != TLS_OUT_OF_INDEXES) {
+ emutls_shutdown((emutls_address_array *)TlsGetValue(emutls_tls_index));
+ TlsFree(emutls_tls_index);
+ emutls_tls_index = TLS_OUT_OF_INDEXES;
+ }
+}
+
+#pragma warning(push)
+#pragma warning(disable : 4100)
+static BOOL CALLBACK emutls_init(PINIT_ONCE p0, PVOID p1, PVOID *p2) {
+ emutls_mutex =
+ (LPCRITICAL_SECTION)_aligned_malloc(sizeof(CRITICAL_SECTION), 16);
+ if (!emutls_mutex) {
+ win_error(GetLastError(), "_aligned_malloc");
+ return FALSE;
+ }
+ InitializeCriticalSection(emutls_mutex);
+
+ emutls_tls_index = TlsAlloc();
+ if (emutls_tls_index == TLS_OUT_OF_INDEXES) {
+ emutls_exit();
+ win_error(GetLastError(), "TlsAlloc");
+ return FALSE;
+ }
+ atexit(&emutls_exit);
+ return TRUE;
+}
+
+static __inline void emutls_init_once(void) {
+ static INIT_ONCE once;
+ InitOnceExecuteOnce(&once, emutls_init, NULL, NULL);
+}
+
+static __inline void emutls_lock() { EnterCriticalSection(emutls_mutex); }
+
+static __inline void emutls_unlock() { LeaveCriticalSection(emutls_mutex); }
+
+static __inline void emutls_setspecific(emutls_address_array *value) {
+ if (TlsSetValue(emutls_tls_index, (LPVOID)value) == 0)
+ win_abort(GetLastError(), "TlsSetValue");
+}
+
+static __inline emutls_address_array *emutls_getspecific() {
+ LPVOID value = TlsGetValue(emutls_tls_index);
+ if (value == NULL) {
+ const DWORD err = GetLastError();
+ if (err != ERROR_SUCCESS)
+ win_abort(err, "TlsGetValue");
+ }
+ return (emutls_address_array *)value;
+}
+
+// Provide atomic load/store functions for emutls_get_index if built with MSVC.
+#if !defined(__ATOMIC_RELEASE)
+#include <intrin.h>
+
+enum { __ATOMIC_ACQUIRE = 2, __ATOMIC_RELEASE = 3 };
+
+static __inline uintptr_t __atomic_load_n(void *ptr, unsigned type) {
+ assert(type == __ATOMIC_ACQUIRE);
+ // These return the previous value - but since we do an OR with 0,
+ // it's equivalent to a plain load.
+#ifdef _WIN64
+ return InterlockedOr64(ptr, 0);
+#else
+ return InterlockedOr(ptr, 0);
+#endif
+}
+
+static __inline void __atomic_store_n(void *ptr, uintptr_t val, unsigned type) {
+ assert(type == __ATOMIC_RELEASE);
+ InterlockedExchangePointer((void *volatile *)ptr, (void *)val);
+}
+
+#endif // __ATOMIC_RELEASE
+
+#pragma warning(pop)
+
+#endif // _WIN32
+
+static size_t emutls_num_object = 0; // number of allocated TLS objects
+
+// Free the allocated TLS data
+static void emutls_shutdown(emutls_address_array *array) {
+ if (array) {
+ uintptr_t i;
+ for (i = 0; i < array->size; ++i) {
+ if (array->data[i])
+ emutls_memalign_free(array->data[i]);
+ }
+ }
+}
+
+// For every TLS variable xyz,
+// there is one __emutls_control variable named __emutls_v.xyz.
+// If xyz has non-zero initial value, __emutls_v.xyz's "value"
+// will point to __emutls_t.xyz, which has the initial value.
+typedef struct __emutls_control {
+ // Must use gcc_word here, instead of size_t, to match GCC. When
+ // gcc_word is larger than size_t, the upper extra bits are all
+ // zeros. We can use variables of size_t to operate on size and
+ // align.
+ gcc_word size; // size of the object in bytes
+ gcc_word align; // alignment of the object in bytes
+ union {
+ uintptr_t index; // data[index-1] is the object address
+ void *address; // object address, when in single thread env
+ } object;
+ void *value; // null or non-zero initial value for the object
+} __emutls_control;
+
+// Emulated TLS objects are always allocated at run-time.
+static __inline void *emutls_allocate_object(__emutls_control *control) {
+ // Use standard C types, check with gcc's emutls.o.
+ COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(gcc_pointer));
+ COMPILE_TIME_ASSERT(sizeof(uintptr_t) == sizeof(void *));
+
+ size_t size = control->size;
+ size_t align = control->align;
+ void *base;
+ if (align < sizeof(void *))
+ align = sizeof(void *);
+ // Make sure that align is power of 2.
+ if ((align & (align - 1)) != 0)
+ abort();
+
+ base = emutls_memalign_alloc(align, size);
+ if (control->value)
+ memcpy(base, control->value, size);
+ else
+ memset(base, 0, size);
+ return base;
+}
+
+// Returns control->object.index; set index if not allocated yet.
+static __inline uintptr_t emutls_get_index(__emutls_control *control) {
+ uintptr_t index = __atomic_load_n(&control->object.index, __ATOMIC_ACQUIRE);
+ if (!index) {
+ emutls_init_once();
+ emutls_lock();
+ index = control->object.index;
+ if (!index) {
+ index = ++emutls_num_object;
+ __atomic_store_n(&control->object.index, index, __ATOMIC_RELEASE);
+ }
+ emutls_unlock();
+ }
+ return index;
+}
+
+// Updates newly allocated thread local emutls_address_array.
+static __inline void emutls_check_array_set_size(emutls_address_array *array,
+ uintptr_t size) {
+ if (array == NULL)
+ abort();
+ array->size = size;
+ emutls_setspecific(array);
+}
+
+// Returns the new 'data' array size, number of elements,
+// which must be no smaller than the given index.
+static __inline uintptr_t emutls_new_data_array_size(uintptr_t index) {
+ // Need to allocate emutls_address_array with extra slots
+ // to store the header.
+ // Round up the emutls_address_array size to multiple of 16.
+ uintptr_t header_words = sizeof(emutls_address_array) / sizeof(void *);
+ return ((index + header_words + 15) & ~((uintptr_t)15)) - header_words;
+}
+
+// Returns the size in bytes required for an emutls_address_array with
+// N number of elements for data field.
+static __inline uintptr_t emutls_asize(uintptr_t N) {
+ return N * sizeof(void *) + sizeof(emutls_address_array);
+}
+
+// Returns the thread local emutls_address_array.
+// Extends its size if necessary to hold address at index.
+static __inline emutls_address_array *
+emutls_get_address_array(uintptr_t index) {
+ emutls_address_array *array = emutls_getspecific();
+ if (array == NULL) {
+ uintptr_t new_size = emutls_new_data_array_size(index);
+ array = (emutls_address_array *)malloc(emutls_asize(new_size));
+ if (array) {
+ memset(array->data, 0, new_size * sizeof(void *));
+ array->skip_destructor_rounds = EMUTLS_SKIP_DESTRUCTOR_ROUNDS;
+ }
+ emutls_check_array_set_size(array, new_size);
+ } else if (index > array->size) {
+ uintptr_t orig_size = array->size;
+ uintptr_t new_size = emutls_new_data_array_size(index);
+ array = (emutls_address_array *)realloc(array, emutls_asize(new_size));
+ if (array)
+ memset(array->data + orig_size, 0,
+ (new_size - orig_size) * sizeof(void *));
+ emutls_check_array_set_size(array, new_size);
+ }
+ return array;
+}
+
+void *__emutls_get_address(__emutls_control *control) {
+ uintptr_t index = emutls_get_index(control);
+ emutls_address_array *array = emutls_get_address_array(index--);
+ if (array->data[index] == NULL)
+ array->data[index] = emutls_allocate_object(control);
+ return array->data[index];
+}
+
+#ifdef __BIONIC__
+// Called by Bionic on dlclose to delete the emutls pthread key.
+__attribute__((visibility("hidden"))) void __emutls_unregister_key(void) {
+ if (emutls_key_created) {
+ pthread_key_delete(emutls_pthread_key);
+ emutls_key_created = false;
+ }
+}
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/emutls.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/enable_execute_stack.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/enable_execute_stack.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/enable_execute_stack.c (revision 351984)
@@ -0,0 +1,67 @@
+//===-- enable_execute_stack.c - Implement __enable_execute_stack ---------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifndef _WIN32
+#include <sys/mman.h>
+#endif
+
+// #include "config.h"
+// FIXME: CMake - include when cmake system is ready.
+// Remove #define HAVE_SYSCONF 1 line.
+#define HAVE_SYSCONF 1
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#else
+#ifndef __APPLE__
+#include <unistd.h>
+#endif // __APPLE__
+#endif // _WIN32
+
+#if __LP64__
+#define TRAMPOLINE_SIZE 48
+#else
+#define TRAMPOLINE_SIZE 40
+#endif
+
+// The compiler generates calls to __enable_execute_stack() when creating
+// trampoline functions on the stack for use with nested functions.
+// It is expected to mark the page(s) containing the address
+// and the next 48 bytes as executable. Since the stack is normally rw-
+// that means changing the protection on those page(s) to rwx.
+
+COMPILER_RT_ABI void __enable_execute_stack(void *addr) {
+
+#if _WIN32
+ MEMORY_BASIC_INFORMATION mbi;
+ if (!VirtualQuery(addr, &mbi, sizeof(mbi)))
+ return; // We should probably assert here because there is no return value
+ VirtualProtect(mbi.BaseAddress, mbi.RegionSize, PAGE_EXECUTE_READWRITE,
+ &mbi.Protect);
+#else
+#if __APPLE__
+ // On Darwin, pagesize is always 4096 bytes
+ const uintptr_t pageSize = 4096;
+#elif !defined(HAVE_SYSCONF)
+#error "HAVE_SYSCONF not defined! See enable_execute_stack.c"
+#else
+ const uintptr_t pageSize = sysconf(_SC_PAGESIZE);
+#endif // __APPLE__
+
+ const uintptr_t pageAlignMask = ~(pageSize - 1);
+ uintptr_t p = (uintptr_t)addr;
+ unsigned char *startPage = (unsigned char *)(p & pageAlignMask);
+ unsigned char *endPage =
+ (unsigned char *)((p + TRAMPOLINE_SIZE + pageSize) & pageAlignMask);
+ size_t length = endPage - startPage;
+ (void)mprotect((void *)startPage, length, PROT_READ | PROT_WRITE | PROT_EXEC);
+#endif
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/enable_execute_stack.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/eprintf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/eprintf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/eprintf.c (revision 351984)
@@ -0,0 +1,27 @@
+//===---------- eprintf.c - Implements __eprintf --------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+#include <stdio.h>
+
+// __eprintf() was used in an old version of <assert.h>.
+// It can eventually go away, but it is needed when linking
+// .o files built with the old <assert.h>.
+//
+// It should never be exported from a dylib, so it is marked
+// visibility hidden.
+#ifndef _WIN32
+__attribute__((visibility("hidden")))
+#endif
+COMPILER_RT_ABI void
+__eprintf(const char *format, const char *assertion_expression,
+ const char *line, const char *file) {
+ fprintf(stderr, format, assertion_expression, line, file);
+ fflush(stderr);
+ compilerrt_abort();
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/eprintf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extenddftf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extenddftf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extenddftf2.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- lib/extenddftf2.c - double -> quad conversion -------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#define SRC_DOUBLE
+#define DST_QUAD
+#include "fp_extend_impl.inc"
+
+COMPILER_RT_ABI long double __extenddftf2(double a) {
+ return __extendXfYf2__(a);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extenddftf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendhfsf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendhfsf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendhfsf2.c (revision 351984)
@@ -0,0 +1,27 @@
+//===-- lib/extendhfsf2.c - half -> single conversion -------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SRC_HALF
+#define DST_SINGLE
+#include "fp_extend_impl.inc"
+
+// Use a forwarding definition and noinline to implement a poor man's alias,
+// as there isn't a good cross-platform way of defining one.
+COMPILER_RT_ABI NOINLINE float __extendhfsf2(uint16_t a) {
+ return __extendXfYf2__(a);
+}
+
+COMPILER_RT_ABI float __gnu_h2f_ieee(uint16_t a) { return __extendhfsf2(a); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI float __aeabi_h2f(uint16_t a) { return __extendhfsf2(a); }
+#else
+COMPILER_RT_ALIAS(__extendhfsf2, __aeabi_h2f)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendhfsf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendsfdf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendsfdf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendsfdf2.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- lib/extendsfdf2.c - single -> double conversion -----------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SRC_SINGLE
+#define DST_DOUBLE
+#include "fp_extend_impl.inc"
+
+COMPILER_RT_ABI double __extendsfdf2(float a) { return __extendXfYf2__(a); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI double __aeabi_f2d(float a) { return __extendsfdf2(a); }
+#else
+COMPILER_RT_ALIAS(__extendsfdf2, __aeabi_f2d)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendsfdf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendsftf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendsftf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendsftf2.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- lib/extendsftf2.c - single -> quad conversion -------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#define SRC_SINGLE
+#define DST_QUAD
+#include "fp_extend_impl.inc"
+
+COMPILER_RT_ABI long double __extendsftf2(float a) {
+ return __extendXfYf2__(a);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/extendsftf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffsdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffsdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffsdi2.c (revision 351984)
@@ -0,0 +1,27 @@
+//===-- ffsdi2.c - Implement __ffsdi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ffsdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: the index of the least significant 1-bit in a, or
+// the value zero if a is zero. The least significant bit is index one.
+
+COMPILER_RT_ABI si_int __ffsdi2(di_int a) {
+ dwords x;
+ x.all = a;
+ if (x.s.low == 0) {
+ if (x.s.high == 0)
+ return 0;
+ return __builtin_ctz(x.s.high) + (1 + sizeof(si_int) * CHAR_BIT);
+ }
+ return __builtin_ctz(x.s.low) + 1;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffsdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffssi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffssi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffssi2.c (revision 351984)
@@ -0,0 +1,23 @@
+//===-- ffssi2.c - Implement __ffssi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ffssi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: the index of the least significant 1-bit in a, or
+// the value zero if a is zero. The least significant bit is index one.
+
+COMPILER_RT_ABI si_int __ffssi2(si_int a) {
+ if (a == 0) {
+ return 0;
+ }
+ return __builtin_ctz(a) + 1;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffssi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffsti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffsti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffsti2.c (revision 351984)
@@ -0,0 +1,31 @@
+//===-- ffsti2.c - Implement __ffsti2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ffsti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: the index of the least significant 1-bit in a, or
+// the value zero if a is zero. The least significant bit is index one.
+
+COMPILER_RT_ABI si_int __ffsti2(ti_int a) {
+ twords x;
+ x.all = a;
+ if (x.s.low == 0) {
+ if (x.s.high == 0)
+ return 0;
+ return __builtin_ctzll(x.s.high) + (1 + sizeof(di_int) * CHAR_BIT);
+ }
+ return __builtin_ctzll(x.s.low) + 1;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ffsti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfdi.c (revision 351984)
@@ -0,0 +1,44 @@
+//===-- fixdfdi.c - Implement __fixdfdi -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+
+#ifndef __SOFT_FP__
+// Support for systems that have hardware floating-point; can set the invalid
+// flag as a side-effect of computation.
+
+COMPILER_RT_ABI du_int __fixunsdfdi(double a);
+
+COMPILER_RT_ABI di_int __fixdfdi(double a) {
+ if (a < 0.0) {
+ return -__fixunsdfdi(-a);
+ }
+ return __fixunsdfdi(a);
+}
+
+#else
+// Support for systems that don't have hardware floating-point; there are no
+// flags to set, and we don't want to code-gen to an unknown soft-float
+// implementation.
+
+typedef di_int fixint_t;
+typedef du_int fixuint_t;
+#include "fp_fixint_impl.inc"
+
+COMPILER_RT_ABI di_int __fixdfdi(fp_t a) { return __fixint(a); }
+
+#endif
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI di_int __aeabi_d2lz(fp_t a) { return __fixdfdi(a); }
+#else
+COMPILER_RT_ALIAS(__fixdfdi, __aeabi_d2lz)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfsi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfsi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfsi.c (revision 351984)
@@ -0,0 +1,23 @@
+//===-- fixdfsi.c - Implement __fixdfsi -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+typedef si_int fixint_t;
+typedef su_int fixuint_t;
+#include "fp_fixint_impl.inc"
+
+COMPILER_RT_ABI si_int __fixdfsi(fp_t a) { return __fixint(a); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI si_int __aeabi_d2iz(fp_t a) { return __fixdfsi(a); }
+#else
+COMPILER_RT_ALIAS(__fixdfsi, __aeabi_d2iz)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfsi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfti.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfti.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfti.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- fixdfti.c - Implement __fixdfti -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+
+typedef ti_int fixint_t;
+typedef tu_int fixuint_t;
+#include "fp_fixint_impl.inc"
+
+COMPILER_RT_ABI ti_int __fixdfti(fp_t a) { return __fixint(a); }
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixdfti.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfdi.c (revision 351984)
@@ -0,0 +1,44 @@
+//===-- fixsfdi.c - Implement __fixsfdi -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+#ifndef __SOFT_FP__
+// Support for systems that have hardware floating-point; can set the invalid
+// flag as a side-effect of computation.
+
+COMPILER_RT_ABI du_int __fixunssfdi(float a);
+
+COMPILER_RT_ABI di_int __fixsfdi(float a) {
+ if (a < 0.0f) {
+ return -__fixunssfdi(-a);
+ }
+ return __fixunssfdi(a);
+}
+
+#else
+// Support for systems that don't have hardware floating-point; there are no
+// flags to set, and we don't want to code-gen to an unknown soft-float
+// implementation.
+
+typedef di_int fixint_t;
+typedef du_int fixuint_t;
+#include "fp_fixint_impl.inc"
+
+COMPILER_RT_ABI di_int __fixsfdi(fp_t a) { return __fixint(a); }
+
+#endif
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI di_int __aeabi_f2lz(fp_t a) { return __fixsfdi(a); }
+#else
+COMPILER_RT_ALIAS(__fixsfdi, __aeabi_f2lz)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfsi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfsi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfsi.c (revision 351984)
@@ -0,0 +1,23 @@
+//===-- fixsfsi.c - Implement __fixsfsi -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+typedef si_int fixint_t;
+typedef su_int fixuint_t;
+#include "fp_fixint_impl.inc"
+
+COMPILER_RT_ABI si_int __fixsfsi(fp_t a) { return __fixint(a); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI si_int __aeabi_f2iz(fp_t a) { return __fixsfsi(a); }
+#else
+COMPILER_RT_ALIAS(__fixsfsi, __aeabi_f2iz)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfsi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfti.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfti.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfti.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- fixsfti.c - Implement __fixsfti -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+typedef ti_int fixint_t;
+typedef tu_int fixuint_t;
+#include "fp_fixint_impl.inc"
+
+COMPILER_RT_ABI ti_int __fixsfti(fp_t a) { return __fixint(a); }
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixsfti.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfdi.c (revision 351984)
@@ -0,0 +1,18 @@
+//===-- fixtfdi.c - Implement __fixtfdi -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+typedef di_int fixint_t;
+typedef du_int fixuint_t;
+#include "fp_fixint_impl.inc"
+
+COMPILER_RT_ABI di_int __fixtfdi(fp_t a) { return __fixint(a); }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfsi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfsi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfsi.c (revision 351984)
@@ -0,0 +1,18 @@
+//===-- fixtfsi.c - Implement __fixtfsi -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+typedef si_int fixint_t;
+typedef su_int fixuint_t;
+#include "fp_fixint_impl.inc"
+
+COMPILER_RT_ABI si_int __fixtfsi(fp_t a) { return __fixint(a); }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfsi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfti.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfti.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfti.c (revision 351984)
@@ -0,0 +1,18 @@
+//===-- fixtfti.c - Implement __fixtfti -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+typedef ti_int fixint_t;
+typedef tu_int fixuint_t;
+#include "fp_fixint_impl.inc"
+
+COMPILER_RT_ABI ti_int __fixtfti(fp_t a) { return __fixint(a); }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixtfti.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfdi.c (revision 351984)
@@ -0,0 +1,42 @@
+//===-- fixunsdfdi.c - Implement __fixunsdfdi -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+
+#ifndef __SOFT_FP__
+// Support for systems that have hardware floating-point; can set the invalid
+// flag as a side-effect of computation.
+
+COMPILER_RT_ABI du_int __fixunsdfdi(double a) {
+ if (a <= 0.0)
+ return 0;
+ su_int high = a / 4294967296.f; // a / 0x1p32f;
+ su_int low = a - (double)high * 4294967296.f; // high * 0x1p32f;
+ return ((du_int)high << 32) | low;
+}
+
+#else
+// Support for systems that don't have hardware floating-point; there are no
+// flags to set, and we don't want to code-gen to an unknown soft-float
+// implementation.
+
+typedef du_int fixuint_t;
+#include "fp_fixuint_impl.inc"
+
+COMPILER_RT_ABI du_int __fixunsdfdi(fp_t a) { return __fixuint(a); }
+
+#endif
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI du_int __aeabi_d2ulz(fp_t a) { return __fixunsdfdi(a); }
+#else
+COMPILER_RT_ALIAS(__fixunsdfdi, __aeabi_d2ulz)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfsi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfsi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfsi.c (revision 351984)
@@ -0,0 +1,22 @@
+//===-- fixunsdfsi.c - Implement __fixunsdfsi -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+typedef su_int fixuint_t;
+#include "fp_fixuint_impl.inc"
+
+COMPILER_RT_ABI su_int __fixunsdfsi(fp_t a) { return __fixuint(a); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI su_int __aeabi_d2uiz(fp_t a) { return __fixunsdfsi(a); }
+#else
+COMPILER_RT_ALIAS(__fixunsdfsi, __aeabi_d2uiz)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfsi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfti.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfti.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfti.c (revision 351984)
@@ -0,0 +1,18 @@
+//===-- fixunsdfti.c - Implement __fixunsdfti -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+typedef tu_int fixuint_t;
+#include "fp_fixuint_impl.inc"
+
+COMPILER_RT_ABI tu_int __fixunsdfti(fp_t a) { return __fixuint(a); }
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsdfti.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfdi.c (revision 351984)
@@ -0,0 +1,43 @@
+//===-- fixunssfdi.c - Implement __fixunssfdi -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+#ifndef __SOFT_FP__
+// Support for systems that have hardware floating-point; can set the invalid
+// flag as a side-effect of computation.
+
+COMPILER_RT_ABI du_int __fixunssfdi(float a) {
+ if (a <= 0.0f)
+ return 0;
+ double da = a;
+ su_int high = da / 4294967296.f; // da / 0x1p32f;
+ su_int low = da - (double)high * 4294967296.f; // high * 0x1p32f;
+ return ((du_int)high << 32) | low;
+}
+
+#else
+// Support for systems that don't have hardware floating-point; there are no
+// flags to set, and we don't want to code-gen to an unknown soft-float
+// implementation.
+
+typedef du_int fixuint_t;
+#include "fp_fixuint_impl.inc"
+
+COMPILER_RT_ABI du_int __fixunssfdi(fp_t a) { return __fixuint(a); }
+
+#endif
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI du_int __aeabi_f2ulz(fp_t a) { return __fixunssfdi(a); }
+#else
+COMPILER_RT_ALIAS(__fixunssfdi, __aeabi_f2ulz)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfsi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfsi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfsi.c (revision 351984)
@@ -0,0 +1,26 @@
+//===-- fixunssfsi.c - Implement __fixunssfsi -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __fixunssfsi for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+typedef su_int fixuint_t;
+#include "fp_fixuint_impl.inc"
+
+COMPILER_RT_ABI su_int __fixunssfsi(fp_t a) { return __fixuint(a); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI su_int __aeabi_f2uiz(fp_t a) { return __fixunssfsi(a); }
+#else
+COMPILER_RT_ALIAS(__fixunssfsi, __aeabi_f2uiz)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfsi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfti.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfti.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfti.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- fixunssfti.c - Implement __fixunssfti -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __fixunssfti for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT)
+typedef tu_int fixuint_t;
+#include "fp_fixuint_impl.inc"
+
+COMPILER_RT_ABI tu_int __fixunssfti(fp_t a) { return __fixuint(a); }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunssfti.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfdi.c (revision 351984)
@@ -0,0 +1,17 @@
+//===-- fixunstfdi.c - Implement __fixunstfdi -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+typedef du_int fixuint_t;
+#include "fp_fixuint_impl.inc"
+
+COMPILER_RT_ABI du_int __fixunstfdi(fp_t a) { return __fixuint(a); }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfsi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfsi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfsi.c (revision 351984)
@@ -0,0 +1,17 @@
+//===-- fixunstfsi.c - Implement __fixunstfsi -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+typedef su_int fixuint_t;
+#include "fp_fixuint_impl.inc"
+
+COMPILER_RT_ABI su_int __fixunstfsi(fp_t a) { return __fixuint(a); }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfsi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfti.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfti.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfti.c (revision 351984)
@@ -0,0 +1,17 @@
+//===-- fixunstfsi.c - Implement __fixunstfsi -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+typedef tu_int fixuint_t;
+#include "fp_fixuint_impl.inc"
+
+COMPILER_RT_ABI tu_int __fixunstfti(fp_t a) { return __fixuint(a); }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunstfti.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfdi.c (revision 351984)
@@ -0,0 +1,39 @@
+//===-- fixunsxfdi.c - Implement __fixunsxfdi -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __fixunsxfdi for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#if !_ARCH_PPC
+
+#include "int_lib.h"
+
+// Returns: convert a to a unsigned long long, rounding toward zero.
+// Negative values all become zero.
+
+// Assumption: long double is an intel 80 bit floating point type padded with 6
+// bytes du_int is a 64 bit integral type value in long double is representable
+// in du_int or is negative (no range checking performed)
+
+// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
+// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+COMPILER_RT_ABI du_int __fixunsxfdi(long double a) {
+ long_double_bits fb;
+ fb.f = a;
+ int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
+ if (e < 0 || (fb.u.high.s.low & 0x00008000))
+ return 0;
+ if ((unsigned)e > sizeof(du_int) * CHAR_BIT)
+ return ~(du_int)0;
+ return fb.u.low.all >> (63 - e);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfsi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfsi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfsi.c (revision 351984)
@@ -0,0 +1,39 @@
+//===-- fixunsxfsi.c - Implement __fixunsxfsi -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __fixunsxfsi for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#if !_ARCH_PPC
+
+#include "int_lib.h"
+
+// Returns: convert a to a unsigned int, rounding toward zero.
+// Negative values all become zero.
+
+// Assumption: long double is an intel 80 bit floating point type padded with 6
+// bytes su_int is a 32 bit integral type value in long double is representable
+// in su_int or is negative
+
+// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
+// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+COMPILER_RT_ABI su_int __fixunsxfsi(long double a) {
+ long_double_bits fb;
+ fb.f = a;
+ int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
+ if (e < 0 || (fb.u.high.s.low & 0x00008000))
+ return 0;
+ if ((unsigned)e > sizeof(su_int) * CHAR_BIT)
+ return ~(su_int)0;
+ return fb.u.low.s.high >> (31 - e);
+}
+
+#endif // !_ARCH_PPC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfsi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfti.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfti.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfti.c (revision 351984)
@@ -0,0 +1,44 @@
+//===-- fixunsxfti.c - Implement __fixunsxfti -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __fixunsxfti for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: convert a to a unsigned long long, rounding toward zero.
+// Negative values all become zero.
+
+// Assumption: long double is an intel 80 bit floating point type padded with 6
+// bytes tu_int is a 128 bit integral type value in long double is representable
+// in tu_int or is negative
+
+// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
+// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+COMPILER_RT_ABI tu_int __fixunsxfti(long double a) {
+ long_double_bits fb;
+ fb.f = a;
+ int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
+ if (e < 0 || (fb.u.high.s.low & 0x00008000))
+ return 0;
+ if ((unsigned)e > sizeof(tu_int) * CHAR_BIT)
+ return ~(tu_int)0;
+ tu_int r = fb.u.low.all;
+ if (e > 63)
+ r <<= (e - 63);
+ else
+ r >>= (63 - e);
+ return r;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixunsxfti.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixxfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixxfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixxfdi.c (revision 351984)
@@ -0,0 +1,43 @@
+//===-- fixxfdi.c - Implement __fixxfdi -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __fixxfdi for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#if !_ARCH_PPC
+
+#include "int_lib.h"
+
+// Returns: convert a to a signed long long, rounding toward zero.
+
+// Assumption: long double is an intel 80 bit floating point type padded with 6
+// bytes di_int is a 64 bit integral type value in long double is representable
+// in di_int (no range checking performed)
+
+// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
+// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+COMPILER_RT_ABI di_int __fixxfdi(long double a) {
+ const di_int di_max = (di_int)((~(du_int)0) / 2);
+ const di_int di_min = -di_max - 1;
+ long_double_bits fb;
+ fb.f = a;
+ int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
+ if (e < 0)
+ return 0;
+ if ((unsigned)e >= sizeof(di_int) * CHAR_BIT)
+ return a > 0 ? di_max : di_min;
+ di_int s = -(si_int)((fb.u.high.s.low & 0x00008000) >> 15);
+ di_int r = fb.u.low.all;
+ r = (du_int)r >> (63 - e);
+ return (r ^ s) - s;
+}
+
+#endif // !_ARCH_PPC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixxfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixxfti.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixxfti.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixxfti.c (revision 351984)
@@ -0,0 +1,46 @@
+//===-- fixxfti.c - Implement __fixxfti -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __fixxfti for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: convert a to a signed long long, rounding toward zero.
+
+// Assumption: long double is an intel 80 bit floating point type padded with 6
+// bytes ti_int is a 128 bit integral type value in long double is representable
+// in ti_int
+
+// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
+// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+COMPILER_RT_ABI ti_int __fixxfti(long double a) {
+ const ti_int ti_max = (ti_int)((~(tu_int)0) / 2);
+ const ti_int ti_min = -ti_max - 1;
+ long_double_bits fb;
+ fb.f = a;
+ int e = (fb.u.high.s.low & 0x00007FFF) - 16383;
+ if (e < 0)
+ return 0;
+ ti_int s = -(si_int)((fb.u.high.s.low & 0x00008000) >> 15);
+ ti_int r = fb.u.low.all;
+ if ((unsigned)e >= sizeof(ti_int) * CHAR_BIT)
+ return a > 0 ? ti_max : ti_min;
+ if (e > 63)
+ r <<= (e - 63);
+ else
+ r >>= (63 - e);
+ return (r ^ s) - s;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fixxfti.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdidf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdidf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdidf.c (revision 351984)
@@ -0,0 +1,103 @@
+//===-- floatdidf.c - Implement __floatdidf -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatdidf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: convert a to a double, rounding toward even.
+
+// Assumption: double is a IEEE 64 bit floating point type
+// di_int is a 64 bit integral type
+
+// seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
+// mmmm
+
+#ifndef __SOFT_FP__
+// Support for systems that have hardware floating-point; we'll set the inexact
+// flag as a side-effect of this computation.
+
+COMPILER_RT_ABI double __floatdidf(di_int a) {
+ static const double twop52 = 4503599627370496.0; // 0x1.0p52
+ static const double twop32 = 4294967296.0; // 0x1.0p32
+
+ union {
+ int64_t x;
+ double d;
+ } low = {.d = twop52};
+
+ const double high = (int32_t)(a >> 32) * twop32;
+ low.x |= a & INT64_C(0x00000000ffffffff);
+
+ const double result = (high - twop52) + low.d;
+ return result;
+}
+
+#else
+// Support for systems that don't have hardware floating-point; there are no
+// flags to set, and we don't want to code-gen to an unknown soft-float
+// implementation.
+
+COMPILER_RT_ABI double __floatdidf(di_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(di_int) * CHAR_BIT;
+ const di_int s = a >> (N - 1);
+ a = (a ^ s) - s;
+ int sd = N - __builtin_clzll(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > DBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit DBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit DBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case DBL_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case DBL_MANT_DIG + 2:
+ break;
+ default:
+ a = ((du_int)a >> (sd - (DBL_MANT_DIG + 2))) |
+ ((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
+ if (a & ((du_int)1 << DBL_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to DBL_MANT_DIG bits
+ } else {
+ a <<= (DBL_MANT_DIG - sd);
+ // a is now rounded to DBL_MANT_DIG bits
+ }
+ double_bits fb;
+ fb.u.s.high = ((su_int)s & 0x80000000) | // sign
+ ((e + 1023) << 20) | // exponent
+ ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
+ fb.u.s.low = (su_int)a; // mantissa-low
+ return fb.f;
+}
+#endif
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI double __aeabi_l2d(di_int a) { return __floatdidf(a); }
+#else
+COMPILER_RT_ALIAS(__floatdidf, __aeabi_l2d)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdidf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdisf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdisf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdisf.c (revision 351984)
@@ -0,0 +1,75 @@
+//===-- floatdisf.c - Implement __floatdisf -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatdisf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+// Returns: convert a to a float, rounding toward even.
+
+// Assumption: float is a IEEE 32 bit floating point type
+// di_int is a 64 bit integral type
+
+// seee eeee emmm mmmm mmmm mmmm mmmm mmmm
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI float __floatdisf(di_int a) {
+ if (a == 0)
+ return 0.0F;
+ const unsigned N = sizeof(di_int) * CHAR_BIT;
+ const di_int s = a >> (N - 1);
+ a = (a ^ s) - s;
+ int sd = N - __builtin_clzll(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > FLT_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit FLT_MANT_DIG-1 bits to the right of 1
+ // Q = bit FLT_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case FLT_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case FLT_MANT_DIG + 2:
+ break;
+ default:
+ a = ((du_int)a >> (sd - (FLT_MANT_DIG + 2))) |
+ ((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
+ if (a & ((du_int)1 << FLT_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to FLT_MANT_DIG bits
+ } else {
+ a <<= (FLT_MANT_DIG - sd);
+ // a is now rounded to FLT_MANT_DIG bits
+ }
+ float_bits fb;
+ fb.u = ((su_int)s & 0x80000000) | // sign
+ ((e + 127) << 23) | // exponent
+ ((su_int)a & 0x007FFFFF); // mantissa
+ return fb.f;
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI float __aeabi_l2f(di_int a) { return __floatdisf(a); }
+#else
+COMPILER_RT_ALIAS(__floatdisf, __aeabi_l2f)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdisf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatditf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatditf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatditf.c (revision 351984)
@@ -0,0 +1,49 @@
+//===-- lib/floatditf.c - integer -> quad-precision conversion ----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements di_int to quad-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+COMPILER_RT_ABI fp_t __floatditf(di_int a) {
+
+ const int aWidth = sizeof a * CHAR_BIT;
+
+ // Handle zero as a special case to protect clz
+ if (a == 0)
+ return fromRep(0);
+
+ // All other cases begin by extracting the sign and absolute value of a
+ rep_t sign = 0;
+ du_int aAbs = (du_int)a;
+ if (a < 0) {
+ sign = signBit;
+ aAbs = ~(du_int)a + 1U;
+ }
+
+ // Exponent of (fp_t)a is the width of abs(a).
+ const int exponent = (aWidth - 1) - __builtin_clzll(aAbs);
+ rep_t result;
+
+ // Shift a into the significand field, rounding if it is a right-shift
+ const int shift = significandBits - exponent;
+ result = (rep_t)aAbs << shift ^ implicitBit;
+
+ // Insert the exponent
+ result += (rep_t)(exponent + exponentBias) << significandBits;
+ // Insert the sign bit and return
+ return fromRep(result | sign);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatditf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdixf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdixf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdixf.c (revision 351984)
@@ -0,0 +1,41 @@
+//===-- floatdixf.c - Implement __floatdixf -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatdixf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#if !_ARCH_PPC
+
+#include "int_lib.h"
+
+// Returns: convert a to a long double, rounding toward even.
+
+// Assumption: long double is a IEEE 80 bit floating point type padded to 128
+// bits di_int is a 64 bit integral type
+
+// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
+// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+COMPILER_RT_ABI long double __floatdixf(di_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(di_int) * CHAR_BIT;
+ const di_int s = a >> (N - 1);
+ a = (a ^ s) - s;
+ int clz = __builtin_clzll(a);
+ int e = (N - 1) - clz; // exponent
+ long_double_bits fb;
+ fb.u.high.s.low = ((su_int)s & 0x00008000) | // sign
+ (e + 16383); // exponent
+ fb.u.low.all = a << clz; // mantissa
+ return fb.f;
+}
+
+#endif // !_ARCH_PPC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatdixf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsidf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsidf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsidf.c (revision 351984)
@@ -0,0 +1,57 @@
+//===-- lib/floatsidf.c - integer -> double-precision conversion --*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements integer to double-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI fp_t __floatsidf(int a) {
+
+ const int aWidth = sizeof a * CHAR_BIT;
+
+ // Handle zero as a special case to protect clz
+ if (a == 0)
+ return fromRep(0);
+
+ // All other cases begin by extracting the sign and absolute value of a
+ rep_t sign = 0;
+ if (a < 0) {
+ sign = signBit;
+ a = -a;
+ }
+
+ // Exponent of (fp_t)a is the width of abs(a).
+ const int exponent = (aWidth - 1) - __builtin_clz(a);
+ rep_t result;
+
+ // Shift a into the significand field and clear the implicit bit. Extra
+ // cast to unsigned int is necessary to get the correct behavior for
+ // the input INT_MIN.
+ const int shift = significandBits - exponent;
+ result = (rep_t)(unsigned int)a << shift ^ implicitBit;
+
+ // Insert the exponent
+ result += (rep_t)(exponent + exponentBias) << significandBits;
+ // Insert the sign bit and return
+ return fromRep(result | sign);
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_i2d(int a) { return __floatsidf(a); }
+#else
+COMPILER_RT_ALIAS(__floatsidf, __aeabi_i2d)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsidf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsisf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsisf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsisf.c (revision 351984)
@@ -0,0 +1,65 @@
+//===-- lib/floatsisf.c - integer -> single-precision conversion --*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements integer to single-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI fp_t __floatsisf(int a) {
+
+ const int aWidth = sizeof a * CHAR_BIT;
+
+ // Handle zero as a special case to protect clz
+ if (a == 0)
+ return fromRep(0);
+
+ // All other cases begin by extracting the sign and absolute value of a
+ rep_t sign = 0;
+ if (a < 0) {
+ sign = signBit;
+ a = -a;
+ }
+
+ // Exponent of (fp_t)a is the width of abs(a).
+ const int exponent = (aWidth - 1) - __builtin_clz(a);
+ rep_t result;
+
+ // Shift a into the significand field, rounding if it is a right-shift
+ if (exponent <= significandBits) {
+ const int shift = significandBits - exponent;
+ result = (rep_t)a << shift ^ implicitBit;
+ } else {
+ const int shift = exponent - significandBits;
+ result = (rep_t)a >> shift ^ implicitBit;
+ rep_t round = (rep_t)a << (typeWidth - shift);
+ if (round > signBit)
+ result++;
+ if (round == signBit)
+ result += result & 1;
+ }
+
+ // Insert the exponent
+ result += (rep_t)(exponent + exponentBias) << significandBits;
+ // Insert the sign bit and return
+ return fromRep(result | sign);
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_i2f(int a) { return __floatsisf(a); }
+#else
+COMPILER_RT_ALIAS(__floatsisf, __aeabi_i2f)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsisf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsitf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsitf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsitf.c (revision 351984)
@@ -0,0 +1,49 @@
+//===-- lib/floatsitf.c - integer -> quad-precision conversion ----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements integer to quad-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+COMPILER_RT_ABI fp_t __floatsitf(int a) {
+
+ const int aWidth = sizeof a * CHAR_BIT;
+
+ // Handle zero as a special case to protect clz
+ if (a == 0)
+ return fromRep(0);
+
+ // All other cases begin by extracting the sign and absolute value of a
+ rep_t sign = 0;
+ unsigned aAbs = (unsigned)a;
+ if (a < 0) {
+ sign = signBit;
+ aAbs = ~(unsigned)a + 1U;
+ }
+
+ // Exponent of (fp_t)a is the width of abs(a).
+ const int exponent = (aWidth - 1) - __builtin_clz(aAbs);
+ rep_t result;
+
+ // Shift a into the significand field and clear the implicit bit.
+ const int shift = significandBits - exponent;
+ result = (rep_t)aAbs << shift ^ implicitBit;
+
+ // Insert the exponent
+ result += (rep_t)(exponent + exponentBias) << significandBits;
+ // Insert the sign bit and return
+ return fromRep(result | sign);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatsitf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattidf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattidf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattidf.c (revision 351984)
@@ -0,0 +1,73 @@
+//===-- floattidf.c - Implement __floattidf -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floattidf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: convert a to a double, rounding toward even.
+
+// Assumption: double is a IEEE 64 bit floating point type
+// ti_int is a 128 bit integral type
+
+// seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
+// mmmm
+
+COMPILER_RT_ABI double __floattidf(ti_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(ti_int) * CHAR_BIT;
+ const ti_int s = a >> (N - 1);
+ a = (a ^ s) - s;
+ int sd = N - __clzti2(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > DBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit DBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit DBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case DBL_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case DBL_MANT_DIG + 2:
+ break;
+ default:
+ a = ((tu_int)a >> (sd - (DBL_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
+ if (a & ((tu_int)1 << DBL_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to DBL_MANT_DIG bits
+ } else {
+ a <<= (DBL_MANT_DIG - sd);
+ // a is now rounded to DBL_MANT_DIG bits
+ }
+ double_bits fb;
+ fb.u.s.high = ((su_int)s & 0x80000000) | // sign
+ ((e + 1023) << 20) | // exponent
+ ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
+ fb.u.s.low = (su_int)a; // mantissa-low
+ return fb.f;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattidf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattisf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattisf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattisf.c (revision 351984)
@@ -0,0 +1,71 @@
+//===-- floattisf.c - Implement __floattisf -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floattisf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: convert a to a float, rounding toward even.
+
+// Assumption: float is a IEEE 32 bit floating point type
+// ti_int is a 128 bit integral type
+
+// seee eeee emmm mmmm mmmm mmmm mmmm mmmm
+
+COMPILER_RT_ABI float __floattisf(ti_int a) {
+ if (a == 0)
+ return 0.0F;
+ const unsigned N = sizeof(ti_int) * CHAR_BIT;
+ const ti_int s = a >> (N - 1);
+ a = (a ^ s) - s;
+ int sd = N - __clzti2(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > FLT_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit FLT_MANT_DIG-1 bits to the right of 1
+ // Q = bit FLT_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case FLT_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case FLT_MANT_DIG + 2:
+ break;
+ default:
+ a = ((tu_int)a >> (sd - (FLT_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
+ if (a & ((tu_int)1 << FLT_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to FLT_MANT_DIG bits
+ } else {
+ a <<= (FLT_MANT_DIG - sd);
+ // a is now rounded to FLT_MANT_DIG bits
+ }
+ float_bits fb;
+ fb.u = ((su_int)s & 0x80000000) | // sign
+ ((e + 127) << 23) | // exponent
+ ((su_int)a & 0x007FFFFF); // mantissa
+ return fb.f;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattisf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattitf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattitf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattitf.c (revision 351984)
@@ -0,0 +1,78 @@
+//===-- lib/floattitf.c - int128 -> quad-precision conversion -----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements ti_int to quad-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+#include "int_lib.h"
+
+// Returns: convert a ti_int to a fp_t, rounding toward even.
+
+// Assumption: fp_t is a IEEE 128 bit floating point type
+// ti_int is a 128 bit integral type
+
+// seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
+// mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+COMPILER_RT_ABI fp_t __floattitf(ti_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(ti_int) * CHAR_BIT;
+ const ti_int s = a >> (N - 1);
+ a = (a ^ s) - s;
+ int sd = N - __clzti2(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > LDBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit LDBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit LDBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case LDBL_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case LDBL_MANT_DIG + 2:
+ break;
+ default:
+ a = ((tu_int)a >> (sd - (LDBL_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
+ if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to LDBL_MANT_DIG bits
+ } else {
+ a <<= (LDBL_MANT_DIG - sd);
+ // a is now rounded to LDBL_MANT_DIG bits
+ }
+
+ long_double_bits fb;
+ fb.u.high.all = (s & 0x8000000000000000LL) // sign
+ | (du_int)(e + 16383) << 48 // exponent
+ | ((a >> 64) & 0x0000ffffffffffffLL); // significand
+ fb.u.low.all = (du_int)(a);
+ return fb.f;
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattitf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattixf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattixf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattixf.c (revision 351984)
@@ -0,0 +1,73 @@
+//===-- floattixf.c - Implement __floattixf -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floattixf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: convert a to a long double, rounding toward even.
+
+// Assumption: long double is a IEEE 80 bit floating point type padded to 128
+// bits ti_int is a 128 bit integral type
+
+// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
+// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+COMPILER_RT_ABI long double __floattixf(ti_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(ti_int) * CHAR_BIT;
+ const ti_int s = a >> (N - 1);
+ a = (a ^ s) - s;
+ int sd = N - __clzti2(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > LDBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit LDBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit LDBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case LDBL_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case LDBL_MANT_DIG + 2:
+ break;
+ default:
+ a = ((tu_int)a >> (sd - (LDBL_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
+ if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to LDBL_MANT_DIG bits
+ } else {
+ a <<= (LDBL_MANT_DIG - sd);
+ // a is now rounded to LDBL_MANT_DIG bits
+ }
+ long_double_bits fb;
+ fb.u.high.s.low = ((su_int)s & 0x8000) | // sign
+ (e + 16383); // exponent
+ fb.u.low.all = (du_int)a; // mantissa
+ return fb.f;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floattixf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundidf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundidf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundidf.c (revision 351984)
@@ -0,0 +1,106 @@
+//===-- floatundidf.c - Implement __floatundidf ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatundidf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+// Returns: convert a to a double, rounding toward even.
+
+// Assumption: double is a IEEE 64 bit floating point type
+// du_int is a 64 bit integral type
+
+// seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
+// mmmm
+
+#include "int_lib.h"
+
+#ifndef __SOFT_FP__
+// Support for systems that have hardware floating-point; we'll set the inexact
+// flag as a side-effect of this computation.
+
+COMPILER_RT_ABI double __floatundidf(du_int a) {
+ static const double twop52 = 4503599627370496.0; // 0x1.0p52
+ static const double twop84 = 19342813113834066795298816.0; // 0x1.0p84
+ static const double twop84_plus_twop52 =
+ 19342813118337666422669312.0; // 0x1.00000001p84
+
+ union {
+ uint64_t x;
+ double d;
+ } high = {.d = twop84};
+ union {
+ uint64_t x;
+ double d;
+ } low = {.d = twop52};
+
+ high.x |= a >> 32;
+ low.x |= a & UINT64_C(0x00000000ffffffff);
+
+ const double result = (high.d - twop84_plus_twop52) + low.d;
+ return result;
+}
+
+#else
+// Support for systems that don't have hardware floating-point; there are no
+// flags to set, and we don't want to code-gen to an unknown soft-float
+// implementation.
+
+COMPILER_RT_ABI double __floatundidf(du_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(du_int) * CHAR_BIT;
+ int sd = N - __builtin_clzll(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > DBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit DBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit DBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case DBL_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case DBL_MANT_DIG + 2:
+ break;
+ default:
+ a = (a >> (sd - (DBL_MANT_DIG + 2))) |
+ ((a & ((du_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
+ if (a & ((du_int)1 << DBL_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to DBL_MANT_DIG bits
+ } else {
+ a <<= (DBL_MANT_DIG - sd);
+ // a is now rounded to DBL_MANT_DIG bits
+ }
+ double_bits fb;
+ fb.u.s.high = ((e + 1023) << 20) | // exponent
+ ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
+ fb.u.s.low = (su_int)a; // mantissa-low
+ return fb.f;
+}
+#endif
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI double __aeabi_ul2d(du_int a) { return __floatundidf(a); }
+#else
+COMPILER_RT_ALIAS(__floatundidf, __aeabi_ul2d)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundidf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundisf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundisf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundisf.c (revision 351984)
@@ -0,0 +1,72 @@
+//===-- floatundisf.c - Implement __floatundisf ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatundisf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+// Returns: convert a to a float, rounding toward even.
+
+// Assumption: float is a IEEE 32 bit floating point type
+// du_int is a 64 bit integral type
+
+// seee eeee emmm mmmm mmmm mmmm mmmm mmmm
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI float __floatundisf(du_int a) {
+ if (a == 0)
+ return 0.0F;
+ const unsigned N = sizeof(du_int) * CHAR_BIT;
+ int sd = N - __builtin_clzll(a); // number of significant digits
+ int e = sd - 1; // 8 exponent
+ if (sd > FLT_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit FLT_MANT_DIG-1 bits to the right of 1
+ // Q = bit FLT_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case FLT_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case FLT_MANT_DIG + 2:
+ break;
+ default:
+ a = (a >> (sd - (FLT_MANT_DIG + 2))) |
+ ((a & ((du_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
+ if (a & ((du_int)1 << FLT_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to FLT_MANT_DIG bits
+ } else {
+ a <<= (FLT_MANT_DIG - sd);
+ // a is now rounded to FLT_MANT_DIG bits
+ }
+ float_bits fb;
+ fb.u = ((e + 127) << 23) | // exponent
+ ((su_int)a & 0x007FFFFF); // mantissa
+ return fb.f;
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI float __aeabi_ul2f(du_int a) { return __floatundisf(a); }
+#else
+COMPILER_RT_ALIAS(__floatundisf, __aeabi_ul2f)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundisf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunditf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunditf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunditf.c (revision 351984)
@@ -0,0 +1,40 @@
+//===-- lib/floatunditf.c - uint -> quad-precision conversion -----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements du_int to quad-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+COMPILER_RT_ABI fp_t __floatunditf(du_int a) {
+
+ const int aWidth = sizeof a * CHAR_BIT;
+
+ // Handle zero as a special case to protect clz
+ if (a == 0)
+ return fromRep(0);
+
+ // Exponent of (fp_t)a is the width of abs(a).
+ const int exponent = (aWidth - 1) - __builtin_clzll(a);
+ rep_t result;
+
+ // Shift a into the significand field and clear the implicit bit.
+ const int shift = significandBits - exponent;
+ result = (rep_t)a << shift ^ implicitBit;
+
+ // Insert the exponent
+ result += (rep_t)(exponent + exponentBias) << significandBits;
+ return fromRep(result);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunditf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundixf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundixf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundixf.c (revision 351984)
@@ -0,0 +1,37 @@
+//===-- floatundixf.c - Implement __floatundixf ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatundixf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#if !_ARCH_PPC
+
+#include "int_lib.h"
+
+// Returns: convert a to a long double, rounding toward even.
+
+// Assumption: long double is a IEEE 80 bit floating point type padded to 128
+// bits du_int is a 64 bit integral type
+
+// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
+// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+COMPILER_RT_ABI long double __floatundixf(du_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(du_int) * CHAR_BIT;
+ int clz = __builtin_clzll(a);
+ int e = (N - 1) - clz; // exponent
+ long_double_bits fb;
+ fb.u.high.s.low = (e + 16383); // exponent
+ fb.u.low.all = a << clz; // mantissa
+ return fb.f;
+}
+
+#endif // _ARCH_PPC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatundixf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsidf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsidf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsidf.c (revision 351984)
@@ -0,0 +1,47 @@
+//===-- lib/floatunsidf.c - uint -> double-precision conversion ---*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements unsigned integer to double-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI fp_t __floatunsidf(unsigned int a) {
+
+ const int aWidth = sizeof a * CHAR_BIT;
+
+ // Handle zero as a special case to protect clz
+ if (a == 0)
+ return fromRep(0);
+
+ // Exponent of (fp_t)a is the width of abs(a).
+ const int exponent = (aWidth - 1) - __builtin_clz(a);
+ rep_t result;
+
+ // Shift a into the significand field and clear the implicit bit.
+ const int shift = significandBits - exponent;
+ result = (rep_t)a << shift ^ implicitBit;
+
+ // Insert the exponent
+ result += (rep_t)(exponent + exponentBias) << significandBits;
+ return fromRep(result);
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_ui2d(unsigned int a) { return __floatunsidf(a); }
+#else
+COMPILER_RT_ALIAS(__floatunsidf, __aeabi_ui2d)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsidf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsisf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsisf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsisf.c (revision 351984)
@@ -0,0 +1,57 @@
+//===-- lib/floatunsisf.c - uint -> single-precision conversion ---*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements unsigned integer to single-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI fp_t __floatunsisf(unsigned int a) {
+
+ const int aWidth = sizeof a * CHAR_BIT;
+
+ // Handle zero as a special case to protect clz
+ if (a == 0)
+ return fromRep(0);
+
+ // Exponent of (fp_t)a is the width of abs(a).
+ const int exponent = (aWidth - 1) - __builtin_clz(a);
+ rep_t result;
+
+ // Shift a into the significand field, rounding if it is a right-shift
+ if (exponent <= significandBits) {
+ const int shift = significandBits - exponent;
+ result = (rep_t)a << shift ^ implicitBit;
+ } else {
+ const int shift = exponent - significandBits;
+ result = (rep_t)a >> shift ^ implicitBit;
+ rep_t round = (rep_t)a << (typeWidth - shift);
+ if (round > signBit)
+ result++;
+ if (round == signBit)
+ result += result & 1;
+ }
+
+ // Insert the exponent
+ result += (rep_t)(exponent + exponentBias) << significandBits;
+ return fromRep(result);
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_ui2f(unsigned int a) { return __floatunsisf(a); }
+#else
+COMPILER_RT_ALIAS(__floatunsisf, __aeabi_ui2f)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsisf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsitf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsitf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsitf.c (revision 351984)
@@ -0,0 +1,40 @@
+//===-- lib/floatunsitf.c - uint -> quad-precision conversion -----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements unsigned integer to quad-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+COMPILER_RT_ABI fp_t __floatunsitf(unsigned int a) {
+
+ const int aWidth = sizeof a * CHAR_BIT;
+
+ // Handle zero as a special case to protect clz
+ if (a == 0)
+ return fromRep(0);
+
+ // Exponent of (fp_t)a is the width of abs(a).
+ const int exponent = (aWidth - 1) - __builtin_clz(a);
+ rep_t result;
+
+ // Shift a into the significand field and clear the implicit bit.
+ const int shift = significandBits - exponent;
+ result = (rep_t)a << shift ^ implicitBit;
+
+ // Insert the exponent
+ result += (rep_t)(exponent + exponentBias) << significandBits;
+ return fromRep(result);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatunsitf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntidf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntidf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntidf.c (revision 351984)
@@ -0,0 +1,70 @@
+//===-- floatuntidf.c - Implement __floatuntidf ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatuntidf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: convert a to a double, rounding toward even.
+
+// Assumption: double is a IEEE 64 bit floating point type
+// tu_int is a 128 bit integral type
+
+// seee eeee eeee mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
+// mmmm
+
+COMPILER_RT_ABI double __floatuntidf(tu_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(tu_int) * CHAR_BIT;
+ int sd = N - __clzti2(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > DBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit DBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit DBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case DBL_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case DBL_MANT_DIG + 2:
+ break;
+ default:
+ a = (a >> (sd - (DBL_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + DBL_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to DBL_MANT_DIG or DBL_MANT_DIG+1 bits
+ if (a & ((tu_int)1 << DBL_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to DBL_MANT_DIG bits
+ } else {
+ a <<= (DBL_MANT_DIG - sd);
+ // a is now rounded to DBL_MANT_DIG bits
+ }
+ double_bits fb;
+ fb.u.s.high = ((e + 1023) << 20) | // exponent
+ ((su_int)(a >> 32) & 0x000FFFFF); // mantissa-high
+ fb.u.s.low = (su_int)a; // mantissa-low
+ return fb.f;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntidf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntisf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntisf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntisf.c (revision 351984)
@@ -0,0 +1,68 @@
+//===-- floatuntisf.c - Implement __floatuntisf ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatuntisf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: convert a to a float, rounding toward even.
+
+// Assumption: float is a IEEE 32 bit floating point type
+// tu_int is a 128 bit integral type
+
+// seee eeee emmm mmmm mmmm mmmm mmmm mmmm
+
+COMPILER_RT_ABI float __floatuntisf(tu_int a) {
+ if (a == 0)
+ return 0.0F;
+ const unsigned N = sizeof(tu_int) * CHAR_BIT;
+ int sd = N - __clzti2(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > FLT_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit FLT_MANT_DIG-1 bits to the right of 1
+ // Q = bit FLT_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case FLT_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case FLT_MANT_DIG + 2:
+ break;
+ default:
+ a = (a >> (sd - (FLT_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + FLT_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to FLT_MANT_DIG or FLT_MANT_DIG+1 bits
+ if (a & ((tu_int)1 << FLT_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to FLT_MANT_DIG bits
+ } else {
+ a <<= (FLT_MANT_DIG - sd);
+ // a is now rounded to FLT_MANT_DIG bits
+ }
+ float_bits fb;
+ fb.u = ((e + 127) << 23) | // exponent
+ ((su_int)a & 0x007FFFFF); // mantissa
+ return fb.f;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntisf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntitf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntitf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntitf.c (revision 351984)
@@ -0,0 +1,75 @@
+//===-- lib/floatuntitf.c - uint128 -> quad-precision conversion --*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements tu_int to quad-precision conversion for the
+// compiler-rt library in the IEEE-754 default round-to-nearest, ties-to-even
+// mode.
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+#include "int_lib.h"
+
+// Returns: convert a tu_int to a fp_t, rounding toward even.
+
+// Assumption: fp_t is a IEEE 128 bit floating point type
+// tu_int is a 128 bit integral type
+
+// seee eeee eeee eeee mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm
+// mmmm | mmmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+COMPILER_RT_ABI fp_t __floatuntitf(tu_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(tu_int) * CHAR_BIT;
+ int sd = N - __clzti2(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > LDBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit LDBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit LDBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case LDBL_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case LDBL_MANT_DIG + 2:
+ break;
+ default:
+ a = (a >> (sd - (LDBL_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
+ if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to LDBL_MANT_DIG bits
+ } else {
+ a <<= (LDBL_MANT_DIG - sd);
+ // a is now rounded to LDBL_MANT_DIG bits
+ }
+
+ long_double_bits fb;
+ fb.u.high.all = (du_int)(e + 16383) << 48 // exponent
+ | ((a >> 64) & 0x0000ffffffffffffLL); // significand
+ fb.u.low.all = (du_int)(a);
+ return fb.f;
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntitf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntixf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntixf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntixf.c (revision 351984)
@@ -0,0 +1,70 @@
+//===-- floatuntixf.c - Implement __floatuntixf ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatuntixf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: convert a to a long double, rounding toward even.
+
+// Assumption: long double is a IEEE 80 bit floating point type padded to 128
+// bits tu_int is a 128 bit integral type
+
+// gggg gggg gggg gggg gggg gggg gggg gggg | gggg gggg gggg gggg seee eeee eeee
+// eeee | 1mmm mmmm mmmm mmmm mmmm mmmm mmmm mmmm | mmmm mmmm mmmm mmmm mmmm
+// mmmm mmmm mmmm
+
+COMPILER_RT_ABI long double __floatuntixf(tu_int a) {
+ if (a == 0)
+ return 0.0;
+ const unsigned N = sizeof(tu_int) * CHAR_BIT;
+ int sd = N - __clzti2(a); // number of significant digits
+ int e = sd - 1; // exponent
+ if (sd > LDBL_MANT_DIG) {
+ // start: 0000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQxxxxxxxxxxxxxxxxxx
+ // finish: 000000000000000000000000000000000000001xxxxxxxxxxxxxxxxxxxxxxPQR
+ // 12345678901234567890123456
+ // 1 = msb 1 bit
+ // P = bit LDBL_MANT_DIG-1 bits to the right of 1
+ // Q = bit LDBL_MANT_DIG bits to the right of 1
+ // R = "or" of all bits to the right of Q
+ switch (sd) {
+ case LDBL_MANT_DIG + 1:
+ a <<= 1;
+ break;
+ case LDBL_MANT_DIG + 2:
+ break;
+ default:
+ a = (a >> (sd - (LDBL_MANT_DIG + 2))) |
+ ((a & ((tu_int)(-1) >> ((N + LDBL_MANT_DIG + 2) - sd))) != 0);
+ };
+ // finish:
+ a |= (a & 4) != 0; // Or P into R
+ ++a; // round - this step may add a significant bit
+ a >>= 2; // dump Q and R
+ // a is now rounded to LDBL_MANT_DIG or LDBL_MANT_DIG+1 bits
+ if (a & ((tu_int)1 << LDBL_MANT_DIG)) {
+ a >>= 1;
+ ++e;
+ }
+ // a is now rounded to LDBL_MANT_DIG bits
+ } else {
+ a <<= (LDBL_MANT_DIG - sd);
+ // a is now rounded to LDBL_MANT_DIG bits
+ }
+ long_double_bits fb;
+ fb.u.high.s.low = (e + 16383); // exponent
+ fb.u.low.all = (du_int)a; // mantissa
+ return fb.f;
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/floatuntixf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_add_impl.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_add_impl.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_add_impl.inc (revision 351984)
@@ -0,0 +1,157 @@
+//===----- lib/fp_add_impl.inc - floaing point addition -----------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements soft-float addition with the IEEE-754 default rounding
+// (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#include "fp_lib.h"
+
+static __inline fp_t __addXf3__(fp_t a, fp_t b) {
+ rep_t aRep = toRep(a);
+ rep_t bRep = toRep(b);
+ const rep_t aAbs = aRep & absMask;
+ const rep_t bAbs = bRep & absMask;
+
+ // Detect if a or b is zero, infinity, or NaN.
+ if (aAbs - REP_C(1) >= infRep - REP_C(1) ||
+ bAbs - REP_C(1) >= infRep - REP_C(1)) {
+ // NaN + anything = qNaN
+ if (aAbs > infRep)
+ return fromRep(toRep(a) | quietBit);
+ // anything + NaN = qNaN
+ if (bAbs > infRep)
+ return fromRep(toRep(b) | quietBit);
+
+ if (aAbs == infRep) {
+ // +/-infinity + -/+infinity = qNaN
+ if ((toRep(a) ^ toRep(b)) == signBit)
+ return fromRep(qnanRep);
+ // +/-infinity + anything remaining = +/- infinity
+ else
+ return a;
+ }
+
+ // anything remaining + +/-infinity = +/-infinity
+ if (bAbs == infRep)
+ return b;
+
+ // zero + anything = anything
+ if (!aAbs) {
+ // We need to get the sign right for zero + zero.
+ if (!bAbs)
+ return fromRep(toRep(a) & toRep(b));
+ else
+ return b;
+ }
+
+ // anything + zero = anything
+ if (!bAbs)
+ return a;
+ }
+
+ // Swap a and b if necessary so that a has the larger absolute value.
+ if (bAbs > aAbs) {
+ const rep_t temp = aRep;
+ aRep = bRep;
+ bRep = temp;
+ }
+
+ // Extract the exponent and significand from the (possibly swapped) a and b.
+ int aExponent = aRep >> significandBits & maxExponent;
+ int bExponent = bRep >> significandBits & maxExponent;
+ rep_t aSignificand = aRep & significandMask;
+ rep_t bSignificand = bRep & significandMask;
+
+ // Normalize any denormals, and adjust the exponent accordingly.
+ if (aExponent == 0)
+ aExponent = normalize(&aSignificand);
+ if (bExponent == 0)
+ bExponent = normalize(&bSignificand);
+
+ // The sign of the result is the sign of the larger operand, a. If they
+ // have opposite signs, we are performing a subtraction. Otherwise, we
+ // perform addition.
+ const rep_t resultSign = aRep & signBit;
+ const bool subtraction = (aRep ^ bRep) & signBit;
+
+ // Shift the significands to give us round, guard and sticky, and set the
+ // implicit significand bit. If we fell through from the denormal path it
+ // was already set by normalize( ), but setting it twice won't hurt
+ // anything.
+ aSignificand = (aSignificand | implicitBit) << 3;
+ bSignificand = (bSignificand | implicitBit) << 3;
+
+ // Shift the significand of b by the difference in exponents, with a sticky
+ // bottom bit to get rounding correct.
+ const unsigned int align = aExponent - bExponent;
+ if (align) {
+ if (align < typeWidth) {
+ const bool sticky = bSignificand << (typeWidth - align);
+ bSignificand = bSignificand >> align | sticky;
+ } else {
+ bSignificand = 1; // Set the sticky bit. b is known to be non-zero.
+ }
+ }
+ if (subtraction) {
+ aSignificand -= bSignificand;
+ // If a == -b, return +zero.
+ if (aSignificand == 0)
+ return fromRep(0);
+
+ // If partial cancellation occured, we need to left-shift the result
+ // and adjust the exponent.
+ if (aSignificand < implicitBit << 3) {
+ const int shift = rep_clz(aSignificand) - rep_clz(implicitBit << 3);
+ aSignificand <<= shift;
+ aExponent -= shift;
+ }
+ } else /* addition */ {
+ aSignificand += bSignificand;
+
+ // If the addition carried up, we need to right-shift the result and
+ // adjust the exponent.
+ if (aSignificand & implicitBit << 4) {
+ const bool sticky = aSignificand & 1;
+ aSignificand = aSignificand >> 1 | sticky;
+ aExponent += 1;
+ }
+ }
+
+ // If we have overflowed the type, return +/- infinity.
+ if (aExponent >= maxExponent)
+ return fromRep(infRep | resultSign);
+
+ if (aExponent <= 0) {
+ // The result is denormal before rounding. The exponent is zero and we
+ // need to shift the significand.
+ const int shift = 1 - aExponent;
+ const bool sticky = aSignificand << (typeWidth - shift);
+ aSignificand = aSignificand >> shift | sticky;
+ aExponent = 0;
+ }
+
+ // Low three bits are round, guard, and sticky.
+ const int roundGuardSticky = aSignificand & 0x7;
+
+ // Shift the significand into place, and mask off the implicit bit.
+ rep_t result = aSignificand >> 3 & significandMask;
+
+ // Insert the exponent and sign.
+ result |= (rep_t)aExponent << significandBits;
+ result |= resultSign;
+
+ // Perform the final rounding. The result may overflow to infinity, but
+ // that is the correct result in that case.
+ if (roundGuardSticky > 0x4)
+ result++;
+ if (roundGuardSticky == 0x4)
+ result += result & 1;
+ return fromRep(result);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_add_impl.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_extend.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_extend.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_extend.h (revision 351984)
@@ -0,0 +1,95 @@
+//===-lib/fp_extend.h - low precision -> high precision conversion -*- C
+//-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Set source and destination setting
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FP_EXTEND_HEADER
+#define FP_EXTEND_HEADER
+
+#include "int_lib.h"
+
+#if defined SRC_SINGLE
+typedef float src_t;
+typedef uint32_t src_rep_t;
+#define SRC_REP_C UINT32_C
+static const int srcSigBits = 23;
+#define src_rep_t_clz __builtin_clz
+
+#elif defined SRC_DOUBLE
+typedef double src_t;
+typedef uint64_t src_rep_t;
+#define SRC_REP_C UINT64_C
+static const int srcSigBits = 52;
+static __inline int src_rep_t_clz(src_rep_t a) {
+#if defined __LP64__
+ return __builtin_clzl(a);
+#else
+ if (a & REP_C(0xffffffff00000000))
+ return __builtin_clz(a >> 32);
+ else
+ return 32 + __builtin_clz(a & REP_C(0xffffffff));
+#endif
+}
+
+#elif defined SRC_HALF
+typedef uint16_t src_t;
+typedef uint16_t src_rep_t;
+#define SRC_REP_C UINT16_C
+static const int srcSigBits = 10;
+#define src_rep_t_clz __builtin_clz
+
+#else
+#error Source should be half, single, or double precision!
+#endif // end source precision
+
+#if defined DST_SINGLE
+typedef float dst_t;
+typedef uint32_t dst_rep_t;
+#define DST_REP_C UINT32_C
+static const int dstSigBits = 23;
+
+#elif defined DST_DOUBLE
+typedef double dst_t;
+typedef uint64_t dst_rep_t;
+#define DST_REP_C UINT64_C
+static const int dstSigBits = 52;
+
+#elif defined DST_QUAD
+typedef long double dst_t;
+typedef __uint128_t dst_rep_t;
+#define DST_REP_C (__uint128_t)
+static const int dstSigBits = 112;
+
+#else
+#error Destination should be single, double, or quad precision!
+#endif // end destination precision
+
+// End of specialization parameters. Two helper routines for conversion to and
+// from the representation of floating-point data as integer values follow.
+
+static __inline src_rep_t srcToRep(src_t x) {
+ const union {
+ src_t f;
+ src_rep_t i;
+ } rep = {.f = x};
+ return rep.i;
+}
+
+static __inline dst_t dstFromRep(dst_rep_t x) {
+ const union {
+ dst_t f;
+ dst_rep_t i;
+ } rep = {.i = x};
+ return rep.f;
+}
+// End helper routines. Conversion implementation follows.
+
+#endif // FP_EXTEND_HEADER
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_extend.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_extend_impl.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_extend_impl.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_extend_impl.inc (revision 351984)
@@ -0,0 +1,107 @@
+//=-lib/fp_extend_impl.inc - low precision -> high precision conversion -*-- -//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a fairly generic conversion from a narrower to a wider
+// IEEE-754 floating-point type. The constants and types defined following the
+// includes below parameterize the conversion.
+//
+// It does not support types that don't use the usual IEEE-754 interchange
+// formats; specifically, some work would be needed to adapt it to
+// (for example) the Intel 80-bit format or PowerPC double-double format.
+//
+// Note please, however, that this implementation is only intended to support
+// *widening* operations; if you need to convert to a *narrower* floating-point
+// type (e.g. double -> float), then this routine will not do what you want it
+// to.
+//
+// It also requires that integer types at least as large as both formats
+// are available on the target platform; this may pose a problem when trying
+// to add support for quad on some 32-bit systems, for example. You also may
+// run into trouble finding an appropriate CLZ function for wide source types;
+// you will likely need to roll your own on some platforms.
+//
+// Finally, the following assumptions are made:
+//
+// 1. Floating-point types and integer types have the same endianness on the
+// target platform.
+//
+// 2. Quiet NaNs, if supported, are indicated by the leading bit of the
+// significand field being set.
+//
+//===----------------------------------------------------------------------===//
+
+#include "fp_extend.h"
+
+static __inline dst_t __extendXfYf2__(src_t a) {
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const int srcBits = sizeof(src_t) * CHAR_BIT;
+ const int srcExpBits = srcBits - srcSigBits - 1;
+ const int srcInfExp = (1 << srcExpBits) - 1;
+ const int srcExpBias = srcInfExp >> 1;
+
+ const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits;
+ const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits;
+ const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits);
+ const src_rep_t srcAbsMask = srcSignMask - 1;
+ const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1);
+ const src_rep_t srcNaNCode = srcQNaN - 1;
+
+ const int dstBits = sizeof(dst_t) * CHAR_BIT;
+ const int dstExpBits = dstBits - dstSigBits - 1;
+ const int dstInfExp = (1 << dstExpBits) - 1;
+ const int dstExpBias = dstInfExp >> 1;
+
+ const dst_rep_t dstMinNormal = DST_REP_C(1) << dstSigBits;
+
+ // Break a into a sign and representation of the absolute value.
+ const src_rep_t aRep = srcToRep(a);
+ const src_rep_t aAbs = aRep & srcAbsMask;
+ const src_rep_t sign = aRep & srcSignMask;
+ dst_rep_t absResult;
+
+ // If sizeof(src_rep_t) < sizeof(int), the subtraction result is promoted
+ // to (signed) int. To avoid that, explicitly cast to src_rep_t.
+ if ((src_rep_t)(aAbs - srcMinNormal) < srcInfinity - srcMinNormal) {
+ // a is a normal number.
+ // Extend to the destination type by shifting the significand and
+ // exponent into the proper position and rebiasing the exponent.
+ absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits);
+ absResult += (dst_rep_t)(dstExpBias - srcExpBias) << dstSigBits;
+ }
+
+ else if (aAbs >= srcInfinity) {
+ // a is NaN or infinity.
+ // Conjure the result by beginning with infinity, then setting the qNaN
+ // bit (if needed) and right-aligning the rest of the trailing NaN
+ // payload field.
+ absResult = (dst_rep_t)dstInfExp << dstSigBits;
+ absResult |= (dst_rep_t)(aAbs & srcQNaN) << (dstSigBits - srcSigBits);
+ absResult |= (dst_rep_t)(aAbs & srcNaNCode) << (dstSigBits - srcSigBits);
+ }
+
+ else if (aAbs) {
+ // a is denormal.
+ // renormalize the significand and clear the leading bit, then insert
+ // the correct adjusted exponent in the destination type.
+ const int scale = src_rep_t_clz(aAbs) - src_rep_t_clz(srcMinNormal);
+ absResult = (dst_rep_t)aAbs << (dstSigBits - srcSigBits + scale);
+ absResult ^= dstMinNormal;
+ const int resultExponent = dstExpBias - srcExpBias - scale + 1;
+ absResult |= (dst_rep_t)resultExponent << dstSigBits;
+ }
+
+ else {
+ // a is zero.
+ absResult = 0;
+ }
+
+ // Apply the signbit to the absolute value.
+ const dst_rep_t result = absResult | (dst_rep_t)sign << (dstBits - srcBits);
+ return dstFromRep(result);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_extend_impl.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_fixint_impl.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_fixint_impl.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_fixint_impl.inc (revision 351984)
@@ -0,0 +1,40 @@
+//===-- lib/fixdfsi.c - Double-precision -> integer conversion ----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements float to integer conversion for the
+// compiler-rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "fp_lib.h"
+
+static __inline fixint_t __fixint(fp_t a) {
+ const fixint_t fixint_max = (fixint_t)((~(fixuint_t)0) / 2);
+ const fixint_t fixint_min = -fixint_max - 1;
+ // Break a into sign, exponent, significand parts.
+ const rep_t aRep = toRep(a);
+ const rep_t aAbs = aRep & absMask;
+ const fixint_t sign = aRep & signBit ? -1 : 1;
+ const int exponent = (aAbs >> significandBits) - exponentBias;
+ const rep_t significand = (aAbs & significandMask) | implicitBit;
+
+ // If exponent is negative, the result is zero.
+ if (exponent < 0)
+ return 0;
+
+ // If the value is too large for the integer type, saturate.
+ if ((unsigned)exponent >= sizeof(fixint_t) * CHAR_BIT)
+ return sign == 1 ? fixint_max : fixint_min;
+
+ // If 0 <= exponent < significandBits, right shift to get the result.
+ // Otherwise, shift left.
+ if (exponent < significandBits)
+ return sign * (significand >> (significandBits - exponent));
+ else
+ return sign * ((fixint_t)significand << (exponent - significandBits));
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_fixint_impl.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_fixuint_impl.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_fixuint_impl.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_fixuint_impl.inc (revision 351984)
@@ -0,0 +1,38 @@
+//===-- lib/fixdfsi.c - Double-precision -> integer conversion ----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements float to unsigned integer conversion for the
+// compiler-rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "fp_lib.h"
+
+static __inline fixuint_t __fixuint(fp_t a) {
+ // Break a into sign, exponent, significand parts.
+ const rep_t aRep = toRep(a);
+ const rep_t aAbs = aRep & absMask;
+ const int sign = aRep & signBit ? -1 : 1;
+ const int exponent = (aAbs >> significandBits) - exponentBias;
+ const rep_t significand = (aAbs & significandMask) | implicitBit;
+
+ // If either the value or the exponent is negative, the result is zero.
+ if (sign == -1 || exponent < 0)
+ return 0;
+
+ // If the value is too large for the integer type, saturate.
+ if ((unsigned)exponent >= sizeof(fixuint_t) * CHAR_BIT)
+ return ~(fixuint_t)0;
+
+ // If 0 <= exponent < significandBits, right shift to get the result.
+ // Otherwise, shift left.
+ if (exponent < significandBits)
+ return significand >> (significandBits - exponent);
+ else
+ return (fixuint_t)significand << (exponent - significandBits);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_fixuint_impl.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_lib.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_lib.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_lib.h (revision 351984)
@@ -0,0 +1,319 @@
+//===-- lib/fp_lib.h - Floating-point utilities -------------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a configuration header for soft-float routines in compiler-rt.
+// This file does not provide any part of the compiler-rt interface, but defines
+// many useful constants and utility routines that are used in the
+// implementation of the soft-float routines in compiler-rt.
+//
+// Assumes that float, double and long double correspond to the IEEE-754
+// binary32, binary64 and binary 128 types, respectively, and that integer
+// endianness matches floating point endianness on the target platform.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FP_LIB_HEADER
+#define FP_LIB_HEADER
+
+#include "int_lib.h"
+#include "int_math.h"
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+
+// x86_64 FreeBSD prior v9.3 define fixed-width types incorrectly in
+// 32-bit mode.
+#if defined(__FreeBSD__) && defined(__i386__)
+#include <sys/param.h>
+#if __FreeBSD_version < 903000 // v9.3
+#define uint64_t unsigned long long
+#define int64_t long long
+#undef UINT64_C
+#define UINT64_C(c) (c##ULL)
+#endif
+#endif
+
+#if defined SINGLE_PRECISION
+
+typedef uint32_t rep_t;
+typedef int32_t srep_t;
+typedef float fp_t;
+#define REP_C UINT32_C
+#define significandBits 23
+
+static __inline int rep_clz(rep_t a) { return __builtin_clz(a); }
+
+// 32x32 --> 64 bit multiply
+static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
+ const uint64_t product = (uint64_t)a * b;
+ *hi = product >> 32;
+ *lo = product;
+}
+COMPILER_RT_ABI fp_t __addsf3(fp_t a, fp_t b);
+
+#elif defined DOUBLE_PRECISION
+
+typedef uint64_t rep_t;
+typedef int64_t srep_t;
+typedef double fp_t;
+#define REP_C UINT64_C
+#define significandBits 52
+
+static __inline int rep_clz(rep_t a) {
+#if defined __LP64__
+ return __builtin_clzl(a);
+#else
+ if (a & REP_C(0xffffffff00000000))
+ return __builtin_clz(a >> 32);
+ else
+ return 32 + __builtin_clz(a & REP_C(0xffffffff));
+#endif
+}
+
+#define loWord(a) (a & 0xffffffffU)
+#define hiWord(a) (a >> 32)
+
+// 64x64 -> 128 wide multiply for platforms that don't have such an operation;
+// many 64-bit platforms have this operation, but they tend to have hardware
+// floating-point, so we don't bother with a special case for them here.
+static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
+ // Each of the component 32x32 -> 64 products
+ const uint64_t plolo = loWord(a) * loWord(b);
+ const uint64_t plohi = loWord(a) * hiWord(b);
+ const uint64_t philo = hiWord(a) * loWord(b);
+ const uint64_t phihi = hiWord(a) * hiWord(b);
+ // Sum terms that contribute to lo in a way that allows us to get the carry
+ const uint64_t r0 = loWord(plolo);
+ const uint64_t r1 = hiWord(plolo) + loWord(plohi) + loWord(philo);
+ *lo = r0 + (r1 << 32);
+ // Sum terms contributing to hi with the carry from lo
+ *hi = hiWord(plohi) + hiWord(philo) + hiWord(r1) + phihi;
+}
+#undef loWord
+#undef hiWord
+
+COMPILER_RT_ABI fp_t __adddf3(fp_t a, fp_t b);
+
+#elif defined QUAD_PRECISION
+#if __LDBL_MANT_DIG__ == 113 && defined(__SIZEOF_INT128__)
+#define CRT_LDBL_128BIT
+typedef __uint128_t rep_t;
+typedef __int128_t srep_t;
+typedef long double fp_t;
+#define REP_C (__uint128_t)
+// Note: Since there is no explicit way to tell compiler the constant is a
+// 128-bit integer, we let the constant be casted to 128-bit integer
+#define significandBits 112
+
+static __inline int rep_clz(rep_t a) {
+ const union {
+ __uint128_t ll;
+#if _YUGA_BIG_ENDIAN
+ struct {
+ uint64_t high, low;
+ } s;
+#else
+ struct {
+ uint64_t low, high;
+ } s;
+#endif
+ } uu = {.ll = a};
+
+ uint64_t word;
+ uint64_t add;
+
+ if (uu.s.high) {
+ word = uu.s.high;
+ add = 0;
+ } else {
+ word = uu.s.low;
+ add = 64;
+ }
+ return __builtin_clzll(word) + add;
+}
+
+#define Word_LoMask UINT64_C(0x00000000ffffffff)
+#define Word_HiMask UINT64_C(0xffffffff00000000)
+#define Word_FullMask UINT64_C(0xffffffffffffffff)
+#define Word_1(a) (uint64_t)((a >> 96) & Word_LoMask)
+#define Word_2(a) (uint64_t)((a >> 64) & Word_LoMask)
+#define Word_3(a) (uint64_t)((a >> 32) & Word_LoMask)
+#define Word_4(a) (uint64_t)(a & Word_LoMask)
+
+// 128x128 -> 256 wide multiply for platforms that don't have such an operation;
+// many 64-bit platforms have this operation, but they tend to have hardware
+// floating-point, so we don't bother with a special case for them here.
+static __inline void wideMultiply(rep_t a, rep_t b, rep_t *hi, rep_t *lo) {
+
+ const uint64_t product11 = Word_1(a) * Word_1(b);
+ const uint64_t product12 = Word_1(a) * Word_2(b);
+ const uint64_t product13 = Word_1(a) * Word_3(b);
+ const uint64_t product14 = Word_1(a) * Word_4(b);
+ const uint64_t product21 = Word_2(a) * Word_1(b);
+ const uint64_t product22 = Word_2(a) * Word_2(b);
+ const uint64_t product23 = Word_2(a) * Word_3(b);
+ const uint64_t product24 = Word_2(a) * Word_4(b);
+ const uint64_t product31 = Word_3(a) * Word_1(b);
+ const uint64_t product32 = Word_3(a) * Word_2(b);
+ const uint64_t product33 = Word_3(a) * Word_3(b);
+ const uint64_t product34 = Word_3(a) * Word_4(b);
+ const uint64_t product41 = Word_4(a) * Word_1(b);
+ const uint64_t product42 = Word_4(a) * Word_2(b);
+ const uint64_t product43 = Word_4(a) * Word_3(b);
+ const uint64_t product44 = Word_4(a) * Word_4(b);
+
+ const __uint128_t sum0 = (__uint128_t)product44;
+ const __uint128_t sum1 = (__uint128_t)product34 + (__uint128_t)product43;
+ const __uint128_t sum2 =
+ (__uint128_t)product24 + (__uint128_t)product33 + (__uint128_t)product42;
+ const __uint128_t sum3 = (__uint128_t)product14 + (__uint128_t)product23 +
+ (__uint128_t)product32 + (__uint128_t)product41;
+ const __uint128_t sum4 =
+ (__uint128_t)product13 + (__uint128_t)product22 + (__uint128_t)product31;
+ const __uint128_t sum5 = (__uint128_t)product12 + (__uint128_t)product21;
+ const __uint128_t sum6 = (__uint128_t)product11;
+
+ const __uint128_t r0 = (sum0 & Word_FullMask) + ((sum1 & Word_LoMask) << 32);
+ const __uint128_t r1 = (sum0 >> 64) + ((sum1 >> 32) & Word_FullMask) +
+ (sum2 & Word_FullMask) + ((sum3 << 32) & Word_HiMask);
+
+ *lo = r0 + (r1 << 64);
+ *hi = (r1 >> 64) + (sum1 >> 96) + (sum2 >> 64) + (sum3 >> 32) + sum4 +
+ (sum5 << 32) + (sum6 << 64);
+}
+#undef Word_1
+#undef Word_2
+#undef Word_3
+#undef Word_4
+#undef Word_HiMask
+#undef Word_LoMask
+#undef Word_FullMask
+#endif // __LDBL_MANT_DIG__ == 113 && __SIZEOF_INT128__
+#else
+#error SINGLE_PRECISION, DOUBLE_PRECISION or QUAD_PRECISION must be defined.
+#endif
+
+#if defined(SINGLE_PRECISION) || defined(DOUBLE_PRECISION) || \
+ defined(CRT_LDBL_128BIT)
+#define typeWidth (sizeof(rep_t) * CHAR_BIT)
+#define exponentBits (typeWidth - significandBits - 1)
+#define maxExponent ((1 << exponentBits) - 1)
+#define exponentBias (maxExponent >> 1)
+
+#define implicitBit (REP_C(1) << significandBits)
+#define significandMask (implicitBit - 1U)
+#define signBit (REP_C(1) << (significandBits + exponentBits))
+#define absMask (signBit - 1U)
+#define exponentMask (absMask ^ significandMask)
+#define oneRep ((rep_t)exponentBias << significandBits)
+#define infRep exponentMask
+#define quietBit (implicitBit >> 1)
+#define qnanRep (exponentMask | quietBit)
+
+static __inline rep_t toRep(fp_t x) {
+ const union {
+ fp_t f;
+ rep_t i;
+ } rep = {.f = x};
+ return rep.i;
+}
+
+static __inline fp_t fromRep(rep_t x) {
+ const union {
+ fp_t f;
+ rep_t i;
+ } rep = {.i = x};
+ return rep.f;
+}
+
+static __inline int normalize(rep_t *significand) {
+ const int shift = rep_clz(*significand) - rep_clz(implicitBit);
+ *significand <<= shift;
+ return 1 - shift;
+}
+
+static __inline void wideLeftShift(rep_t *hi, rep_t *lo, int count) {
+ *hi = *hi << count | *lo >> (typeWidth - count);
+ *lo = *lo << count;
+}
+
+static __inline void wideRightShiftWithSticky(rep_t *hi, rep_t *lo,
+ unsigned int count) {
+ if (count < typeWidth) {
+ const bool sticky = *lo << (typeWidth - count);
+ *lo = *hi << (typeWidth - count) | *lo >> count | sticky;
+ *hi = *hi >> count;
+ } else if (count < 2 * typeWidth) {
+ const bool sticky = *hi << (2 * typeWidth - count) | *lo;
+ *lo = *hi >> (count - typeWidth) | sticky;
+ *hi = 0;
+ } else {
+ const bool sticky = *hi | *lo;
+ *lo = sticky;
+ *hi = 0;
+ }
+}
+
+// Implements logb methods (logb, logbf, logbl) for IEEE-754. This avoids
+// pulling in a libm dependency from compiler-rt, but is not meant to replace
+// it (i.e. code calling logb() should get the one from libm, not this), hence
+// the __compiler_rt prefix.
+static __inline fp_t __compiler_rt_logbX(fp_t x) {
+ rep_t rep = toRep(x);
+ int exp = (rep & exponentMask) >> significandBits;
+
+ // Abnormal cases:
+ // 1) +/- inf returns +inf; NaN returns NaN
+ // 2) 0.0 returns -inf
+ if (exp == maxExponent) {
+ if (((rep & signBit) == 0) || (x != x)) {
+ return x; // NaN or +inf: return x
+ } else {
+ return -x; // -inf: return -x
+ }
+ } else if (x == 0.0) {
+ // 0.0: return -inf
+ return fromRep(infRep | signBit);
+ }
+
+ if (exp != 0) {
+ // Normal number
+ return exp - exponentBias; // Unbias exponent
+ } else {
+ // Subnormal number; normalize and repeat
+ rep &= absMask;
+ const int shift = 1 - normalize(&rep);
+ exp = (rep & exponentMask) >> significandBits;
+ return exp - exponentBias - shift; // Unbias exponent
+ }
+}
+#endif
+
+#if defined(SINGLE_PRECISION)
+static __inline fp_t __compiler_rt_logbf(fp_t x) {
+ return __compiler_rt_logbX(x);
+}
+#elif defined(DOUBLE_PRECISION)
+static __inline fp_t __compiler_rt_logb(fp_t x) {
+ return __compiler_rt_logbX(x);
+}
+#elif defined(QUAD_PRECISION)
+#if defined(CRT_LDBL_128BIT)
+static __inline fp_t __compiler_rt_logbl(fp_t x) {
+ return __compiler_rt_logbX(x);
+}
+#else
+// The generic implementation only works for ieee754 floating point. For other
+// floating point types, continue to rely on the libm implementation for now.
+static __inline long double __compiler_rt_logbl(long double x) {
+ return crt_logbl(x);
+}
+#endif
+#endif
+
+#endif // FP_LIB_HEADER
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_lib.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_mul_impl.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_mul_impl.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_mul_impl.inc (revision 351984)
@@ -0,0 +1,128 @@
+//===---- lib/fp_mul_impl.inc - floating point multiplication -----*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements soft-float multiplication with the IEEE-754 default
+// rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#include "fp_lib.h"
+
+static __inline fp_t __mulXf3__(fp_t a, fp_t b) {
+ const unsigned int aExponent = toRep(a) >> significandBits & maxExponent;
+ const unsigned int bExponent = toRep(b) >> significandBits & maxExponent;
+ const rep_t productSign = (toRep(a) ^ toRep(b)) & signBit;
+
+ rep_t aSignificand = toRep(a) & significandMask;
+ rep_t bSignificand = toRep(b) & significandMask;
+ int scale = 0;
+
+ // Detect if a or b is zero, denormal, infinity, or NaN.
+ if (aExponent - 1U >= maxExponent - 1U ||
+ bExponent - 1U >= maxExponent - 1U) {
+
+ const rep_t aAbs = toRep(a) & absMask;
+ const rep_t bAbs = toRep(b) & absMask;
+
+ // NaN * anything = qNaN
+ if (aAbs > infRep)
+ return fromRep(toRep(a) | quietBit);
+ // anything * NaN = qNaN
+ if (bAbs > infRep)
+ return fromRep(toRep(b) | quietBit);
+
+ if (aAbs == infRep) {
+ // infinity * non-zero = +/- infinity
+ if (bAbs)
+ return fromRep(aAbs | productSign);
+ // infinity * zero = NaN
+ else
+ return fromRep(qnanRep);
+ }
+
+ if (bAbs == infRep) {
+ // non-zero * infinity = +/- infinity
+ if (aAbs)
+ return fromRep(bAbs | productSign);
+ // zero * infinity = NaN
+ else
+ return fromRep(qnanRep);
+ }
+
+ // zero * anything = +/- zero
+ if (!aAbs)
+ return fromRep(productSign);
+ // anything * zero = +/- zero
+ if (!bAbs)
+ return fromRep(productSign);
+
+ // One or both of a or b is denormal. The other (if applicable) is a
+ // normal number. Renormalize one or both of a and b, and set scale to
+ // include the necessary exponent adjustment.
+ if (aAbs < implicitBit)
+ scale += normalize(&aSignificand);
+ if (bAbs < implicitBit)
+ scale += normalize(&bSignificand);
+ }
+
+ // Set the implicit significand bit. If we fell through from the
+ // denormal path it was already set by normalize( ), but setting it twice
+ // won't hurt anything.
+ aSignificand |= implicitBit;
+ bSignificand |= implicitBit;
+
+ // Perform a basic multiplication on the significands. One of them must be
+ // shifted beforehand to be aligned with the exponent.
+ rep_t productHi, productLo;
+ wideMultiply(aSignificand, bSignificand << exponentBits, &productHi,
+ &productLo);
+
+ int productExponent = aExponent + bExponent - exponentBias + scale;
+
+ // Normalize the significand and adjust the exponent if needed.
+ if (productHi & implicitBit)
+ productExponent++;
+ else
+ wideLeftShift(&productHi, &productLo, 1);
+
+ // If we have overflowed the type, return +/- infinity.
+ if (productExponent >= maxExponent)
+ return fromRep(infRep | productSign);
+
+ if (productExponent <= 0) {
+ // The result is denormal before rounding.
+ //
+ // If the result is so small that it just underflows to zero, return
+ // zero with the appropriate sign. Mathematically, there is no need to
+ // handle this case separately, but we make it a special case to
+ // simplify the shift logic.
+ const unsigned int shift = REP_C(1) - (unsigned int)productExponent;
+ if (shift >= typeWidth)
+ return fromRep(productSign);
+
+ // Otherwise, shift the significand of the result so that the round
+ // bit is the high bit of productLo.
+ wideRightShiftWithSticky(&productHi, &productLo, shift);
+ } else {
+ // The result is normal before rounding. Insert the exponent.
+ productHi &= significandMask;
+ productHi |= (rep_t)productExponent << significandBits;
+ }
+
+ // Insert the sign of the result.
+ productHi |= productSign;
+
+ // Perform the final rounding. The final result may overflow to infinity,
+ // or underflow to zero, but those are the correct results in those cases.
+ // We use the default IEEE-754 round-to-nearest, ties-to-even rounding mode.
+ if (productLo > signBit)
+ productHi++;
+ if (productLo == signBit)
+ productHi += productHi & 1;
+ return fromRep(productHi);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_mul_impl.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_trunc.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_trunc.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_trunc.h (revision 351984)
@@ -0,0 +1,81 @@
+//=== lib/fp_trunc.h - high precision -> low precision conversion *- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Set source and destination precision setting
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef FP_TRUNC_HEADER
+#define FP_TRUNC_HEADER
+
+#include "int_lib.h"
+
+#if defined SRC_SINGLE
+typedef float src_t;
+typedef uint32_t src_rep_t;
+#define SRC_REP_C UINT32_C
+static const int srcSigBits = 23;
+
+#elif defined SRC_DOUBLE
+typedef double src_t;
+typedef uint64_t src_rep_t;
+#define SRC_REP_C UINT64_C
+static const int srcSigBits = 52;
+
+#elif defined SRC_QUAD
+typedef long double src_t;
+typedef __uint128_t src_rep_t;
+#define SRC_REP_C (__uint128_t)
+static const int srcSigBits = 112;
+
+#else
+#error Source should be double precision or quad precision!
+#endif // end source precision
+
+#if defined DST_DOUBLE
+typedef double dst_t;
+typedef uint64_t dst_rep_t;
+#define DST_REP_C UINT64_C
+static const int dstSigBits = 52;
+
+#elif defined DST_SINGLE
+typedef float dst_t;
+typedef uint32_t dst_rep_t;
+#define DST_REP_C UINT32_C
+static const int dstSigBits = 23;
+
+#elif defined DST_HALF
+typedef uint16_t dst_t;
+typedef uint16_t dst_rep_t;
+#define DST_REP_C UINT16_C
+static const int dstSigBits = 10;
+
+#else
+#error Destination should be single precision or double precision!
+#endif // end destination precision
+
+// End of specialization parameters. Two helper routines for conversion to and
+// from the representation of floating-point data as integer values follow.
+
+static __inline src_rep_t srcToRep(src_t x) {
+ const union {
+ src_t f;
+ src_rep_t i;
+ } rep = {.f = x};
+ return rep.i;
+}
+
+static __inline dst_t dstFromRep(dst_rep_t x) {
+ const union {
+ dst_t f;
+ dst_rep_t i;
+ } rep = {.i = x};
+ return rep.f;
+}
+
+#endif // FP_TRUNC_HEADER
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_trunc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_trunc_impl.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_trunc_impl.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_trunc_impl.inc (revision 351984)
@@ -0,0 +1,132 @@
+//= lib/fp_trunc_impl.inc - high precision -> low precision conversion *-*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements a fairly generic conversion from a wider to a narrower
+// IEEE-754 floating-point type in the default (round to nearest, ties to even)
+// rounding mode. The constants and types defined following the includes below
+// parameterize the conversion.
+//
+// This routine can be trivially adapted to support conversions to
+// half-precision or from quad-precision. It does not support types that don't
+// use the usual IEEE-754 interchange formats; specifically, some work would be
+// needed to adapt it to (for example) the Intel 80-bit format or PowerPC
+// double-double format.
+//
+// Note please, however, that this implementation is only intended to support
+// *narrowing* operations; if you need to convert to a *wider* floating-point
+// type (e.g. float -> double), then this routine will not do what you want it
+// to.
+//
+// It also requires that integer types at least as large as both formats
+// are available on the target platform; this may pose a problem when trying
+// to add support for quad on some 32-bit systems, for example.
+//
+// Finally, the following assumptions are made:
+//
+// 1. Floating-point types and integer types have the same endianness on the
+// target platform.
+//
+// 2. Quiet NaNs, if supported, are indicated by the leading bit of the
+// significand field being set.
+//
+//===----------------------------------------------------------------------===//
+
+#include "fp_trunc.h"
+
+static __inline dst_t __truncXfYf2__(src_t a) {
+ // Various constants whose values follow from the type parameters.
+ // Any reasonable optimizer will fold and propagate all of these.
+ const int srcBits = sizeof(src_t) * CHAR_BIT;
+ const int srcExpBits = srcBits - srcSigBits - 1;
+ const int srcInfExp = (1 << srcExpBits) - 1;
+ const int srcExpBias = srcInfExp >> 1;
+
+ const src_rep_t srcMinNormal = SRC_REP_C(1) << srcSigBits;
+ const src_rep_t srcSignificandMask = srcMinNormal - 1;
+ const src_rep_t srcInfinity = (src_rep_t)srcInfExp << srcSigBits;
+ const src_rep_t srcSignMask = SRC_REP_C(1) << (srcSigBits + srcExpBits);
+ const src_rep_t srcAbsMask = srcSignMask - 1;
+ const src_rep_t roundMask = (SRC_REP_C(1) << (srcSigBits - dstSigBits)) - 1;
+ const src_rep_t halfway = SRC_REP_C(1) << (srcSigBits - dstSigBits - 1);
+ const src_rep_t srcQNaN = SRC_REP_C(1) << (srcSigBits - 1);
+ const src_rep_t srcNaNCode = srcQNaN - 1;
+
+ const int dstBits = sizeof(dst_t) * CHAR_BIT;
+ const int dstExpBits = dstBits - dstSigBits - 1;
+ const int dstInfExp = (1 << dstExpBits) - 1;
+ const int dstExpBias = dstInfExp >> 1;
+
+ const int underflowExponent = srcExpBias + 1 - dstExpBias;
+ const int overflowExponent = srcExpBias + dstInfExp - dstExpBias;
+ const src_rep_t underflow = (src_rep_t)underflowExponent << srcSigBits;
+ const src_rep_t overflow = (src_rep_t)overflowExponent << srcSigBits;
+
+ const dst_rep_t dstQNaN = DST_REP_C(1) << (dstSigBits - 1);
+ const dst_rep_t dstNaNCode = dstQNaN - 1;
+
+ // Break a into a sign and representation of the absolute value.
+ const src_rep_t aRep = srcToRep(a);
+ const src_rep_t aAbs = aRep & srcAbsMask;
+ const src_rep_t sign = aRep & srcSignMask;
+ dst_rep_t absResult;
+
+ if (aAbs - underflow < aAbs - overflow) {
+ // The exponent of a is within the range of normal numbers in the
+ // destination format. We can convert by simply right-shifting with
+ // rounding and adjusting the exponent.
+ absResult = aAbs >> (srcSigBits - dstSigBits);
+ absResult -= (dst_rep_t)(srcExpBias - dstExpBias) << dstSigBits;
+
+ const src_rep_t roundBits = aAbs & roundMask;
+ // Round to nearest.
+ if (roundBits > halfway)
+ absResult++;
+ // Tie to even.
+ else if (roundBits == halfway)
+ absResult += absResult & 1;
+ } else if (aAbs > srcInfinity) {
+ // a is NaN.
+ // Conjure the result by beginning with infinity, setting the qNaN
+ // bit and inserting the (truncated) trailing NaN field.
+ absResult = (dst_rep_t)dstInfExp << dstSigBits;
+ absResult |= dstQNaN;
+ absResult |=
+ ((aAbs & srcNaNCode) >> (srcSigBits - dstSigBits)) & dstNaNCode;
+ } else if (aAbs >= overflow) {
+ // a overflows to infinity.
+ absResult = (dst_rep_t)dstInfExp << dstSigBits;
+ } else {
+ // a underflows on conversion to the destination type or is an exact
+ // zero. The result may be a denormal or zero. Extract the exponent
+ // to get the shift amount for the denormalization.
+ const int aExp = aAbs >> srcSigBits;
+ const int shift = srcExpBias - dstExpBias - aExp + 1;
+
+ const src_rep_t significand = (aRep & srcSignificandMask) | srcMinNormal;
+
+ // Right shift by the denormalization amount with sticky.
+ if (shift > srcSigBits) {
+ absResult = 0;
+ } else {
+ const bool sticky = significand << (srcBits - shift);
+ src_rep_t denormalizedSignificand = significand >> shift | sticky;
+ absResult = denormalizedSignificand >> (srcSigBits - dstSigBits);
+ const src_rep_t roundBits = denormalizedSignificand & roundMask;
+ // Round to nearest
+ if (roundBits > halfway)
+ absResult++;
+ // Ties to even
+ else if (roundBits == halfway)
+ absResult += absResult & 1;
+ }
+ }
+
+ // Apply the signbit to the absolute value.
+ const dst_rep_t result = absResult | sign >> (srcBits - dstBits);
+ return dstFromRep(result);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/fp_trunc_impl.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/gcc_personality_v0.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/gcc_personality_v0.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/gcc_personality_v0.c (revision 351984)
@@ -0,0 +1,234 @@
+//===-- gcc_personality_v0.c - Implement __gcc_personality_v0 -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#include <unwind.h>
+#if defined(__arm__) && !defined(__ARM_DWARF_EH__) && \
+ !defined(__USING_SJLJ_EXCEPTIONS__)
+// When building with older compilers (e.g. clang <3.9), it is possible that we
+// have a version of unwind.h which does not provide the EHABI declarations
+// which are quired for the C personality to conform to the specification. In
+// order to provide forward compatibility for such compilers, we re-declare the
+// necessary interfaces in the helper to permit a standalone compilation of the
+// builtins (which contains the C unwinding personality for historical reasons).
+#include "unwind-ehabi-helpers.h"
+#endif
+
+// Pointer encodings documented at:
+// http://refspecs.freestandards.org/LSB_1.3.0/gLSB/gLSB/ehframehdr.html
+
+#define DW_EH_PE_omit 0xff // no data follows
+
+#define DW_EH_PE_absptr 0x00
+#define DW_EH_PE_uleb128 0x01
+#define DW_EH_PE_udata2 0x02
+#define DW_EH_PE_udata4 0x03
+#define DW_EH_PE_udata8 0x04
+#define DW_EH_PE_sleb128 0x09
+#define DW_EH_PE_sdata2 0x0A
+#define DW_EH_PE_sdata4 0x0B
+#define DW_EH_PE_sdata8 0x0C
+
+#define DW_EH_PE_pcrel 0x10
+#define DW_EH_PE_textrel 0x20
+#define DW_EH_PE_datarel 0x30
+#define DW_EH_PE_funcrel 0x40
+#define DW_EH_PE_aligned 0x50
+#define DW_EH_PE_indirect 0x80 // gcc extension
+
+// read a uleb128 encoded value and advance pointer
+static uintptr_t readULEB128(const uint8_t **data) {
+ uintptr_t result = 0;
+ uintptr_t shift = 0;
+ unsigned char byte;
+ const uint8_t *p = *data;
+ do {
+ byte = *p++;
+ result |= (byte & 0x7f) << shift;
+ shift += 7;
+ } while (byte & 0x80);
+ *data = p;
+ return result;
+}
+
+// read a pointer encoded value and advance pointer
+static uintptr_t readEncodedPointer(const uint8_t **data, uint8_t encoding) {
+ const uint8_t *p = *data;
+ uintptr_t result = 0;
+
+ if (encoding == DW_EH_PE_omit)
+ return 0;
+
+ // first get value
+ switch (encoding & 0x0F) {
+ case DW_EH_PE_absptr:
+ result = *((const uintptr_t *)p);
+ p += sizeof(uintptr_t);
+ break;
+ case DW_EH_PE_uleb128:
+ result = readULEB128(&p);
+ break;
+ case DW_EH_PE_udata2:
+ result = *((const uint16_t *)p);
+ p += sizeof(uint16_t);
+ break;
+ case DW_EH_PE_udata4:
+ result = *((const uint32_t *)p);
+ p += sizeof(uint32_t);
+ break;
+ case DW_EH_PE_udata8:
+ result = *((const uint64_t *)p);
+ p += sizeof(uint64_t);
+ break;
+ case DW_EH_PE_sdata2:
+ result = *((const int16_t *)p);
+ p += sizeof(int16_t);
+ break;
+ case DW_EH_PE_sdata4:
+ result = *((const int32_t *)p);
+ p += sizeof(int32_t);
+ break;
+ case DW_EH_PE_sdata8:
+ result = *((const int64_t *)p);
+ p += sizeof(int64_t);
+ break;
+ case DW_EH_PE_sleb128:
+ default:
+ // not supported
+ compilerrt_abort();
+ break;
+ }
+
+ // then add relative offset
+ switch (encoding & 0x70) {
+ case DW_EH_PE_absptr:
+ // do nothing
+ break;
+ case DW_EH_PE_pcrel:
+ result += (uintptr_t)(*data);
+ break;
+ case DW_EH_PE_textrel:
+ case DW_EH_PE_datarel:
+ case DW_EH_PE_funcrel:
+ case DW_EH_PE_aligned:
+ default:
+ // not supported
+ compilerrt_abort();
+ break;
+ }
+
+ // then apply indirection
+ if (encoding & DW_EH_PE_indirect) {
+ result = *((const uintptr_t *)result);
+ }
+
+ *data = p;
+ return result;
+}
+
+#if defined(__arm__) && !defined(__USING_SJLJ_EXCEPTIONS__) && \
+ !defined(__ARM_DWARF_EH__)
+#define USING_ARM_EHABI 1
+_Unwind_Reason_Code __gnu_unwind_frame(struct _Unwind_Exception *,
+ struct _Unwind_Context *);
+#endif
+
+static inline _Unwind_Reason_Code
+continueUnwind(struct _Unwind_Exception *exceptionObject,
+ struct _Unwind_Context *context) {
+#if USING_ARM_EHABI
+ // On ARM EHABI the personality routine is responsible for actually
+ // unwinding a single stack frame before returning (ARM EHABI Sec. 6.1).
+ if (__gnu_unwind_frame(exceptionObject, context) != _URC_OK)
+ return _URC_FAILURE;
+#endif
+ return _URC_CONTINUE_UNWIND;
+}
+
+// The C compiler makes references to __gcc_personality_v0 in
+// the dwarf unwind information for translation units that use
+// __attribute__((cleanup(xx))) on local variables.
+// This personality routine is called by the system unwinder
+// on each frame as the stack is unwound during a C++ exception
+// throw through a C function compiled with -fexceptions.
+#if __USING_SJLJ_EXCEPTIONS__
+// the setjump-longjump based exceptions personality routine has a
+// different name
+COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_sj0(
+ int version, _Unwind_Action actions, uint64_t exceptionClass,
+ struct _Unwind_Exception *exceptionObject, struct _Unwind_Context *context)
+#elif USING_ARM_EHABI
+// The ARM EHABI personality routine has a different signature.
+COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_v0(
+ _Unwind_State state, struct _Unwind_Exception *exceptionObject,
+ struct _Unwind_Context *context)
+#else
+COMPILER_RT_ABI _Unwind_Reason_Code __gcc_personality_v0(
+ int version, _Unwind_Action actions, uint64_t exceptionClass,
+ struct _Unwind_Exception *exceptionObject, struct _Unwind_Context *context)
+#endif
+{
+ // Since C does not have catch clauses, there is nothing to do during
+ // phase 1 (the search phase).
+#if USING_ARM_EHABI
+ // After resuming from a cleanup we should also continue on to the next
+ // frame straight away.
+ if ((state & _US_ACTION_MASK) != _US_UNWIND_FRAME_STARTING)
+#else
+ if (actions & _UA_SEARCH_PHASE)
+#endif
+ return continueUnwind(exceptionObject, context);
+
+ // There is nothing to do if there is no LSDA for this frame.
+ const uint8_t *lsda = (uint8_t *)_Unwind_GetLanguageSpecificData(context);
+ if (lsda == (uint8_t *)0)
+ return continueUnwind(exceptionObject, context);
+
+ uintptr_t pc = (uintptr_t)_Unwind_GetIP(context) - 1;
+ uintptr_t funcStart = (uintptr_t)_Unwind_GetRegionStart(context);
+ uintptr_t pcOffset = pc - funcStart;
+
+ // Parse LSDA header.
+ uint8_t lpStartEncoding = *lsda++;
+ if (lpStartEncoding != DW_EH_PE_omit) {
+ readEncodedPointer(&lsda, lpStartEncoding);
+ }
+ uint8_t ttypeEncoding = *lsda++;
+ if (ttypeEncoding != DW_EH_PE_omit) {
+ readULEB128(&lsda);
+ }
+ // Walk call-site table looking for range that includes current PC.
+ uint8_t callSiteEncoding = *lsda++;
+ uint32_t callSiteTableLength = readULEB128(&lsda);
+ const uint8_t *callSiteTableStart = lsda;
+ const uint8_t *callSiteTableEnd = callSiteTableStart + callSiteTableLength;
+ const uint8_t *p = callSiteTableStart;
+ while (p < callSiteTableEnd) {
+ uintptr_t start = readEncodedPointer(&p, callSiteEncoding);
+ uintptr_t length = readEncodedPointer(&p, callSiteEncoding);
+ uintptr_t landingPad = readEncodedPointer(&p, callSiteEncoding);
+ readULEB128(&p); // action value not used for C code
+ if (landingPad == 0)
+ continue; // no landing pad for this entry
+ if ((start <= pcOffset) && (pcOffset < (start + length))) {
+ // Found landing pad for the PC.
+ // Set Instruction Pointer to so we re-enter function
+ // at landing pad. The landing pad is created by the compiler
+ // to take two parameters in registers.
+ _Unwind_SetGR(context, __builtin_eh_return_data_regno(0),
+ (uintptr_t)exceptionObject);
+ _Unwind_SetGR(context, __builtin_eh_return_data_regno(1), 0);
+ _Unwind_SetIP(context, (funcStart + landingPad));
+ return _URC_INSTALL_CONTEXT;
+ }
+ }
+
+ // No landing pad found, continue unwinding.
+ return continueUnwind(exceptionObject, context);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/gcc_personality_v0.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_abi1.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_abi1.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_abi1.S (revision 351984)
@@ -0,0 +1,102 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Functions that implement common sequences in function prologues and epilogues
+// used to save code size
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .globl \name
+ .type \name, @function
+ .falign
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+ .macro FALLTHROUGH_TAIL_CALL name0 name1
+ .size \name0, . - \name0
+ .globl \name1
+ .type \name1, @function
+ .falign
+\name1:
+ .endm
+
+
+
+
+// Save r25:24 at fp+#-8 and r27:26 at fp+#-16.
+
+
+
+
+// The compiler knows that the __save_* functions clobber LR. No other
+// registers should be used without informing the compiler.
+
+// Since we can only issue one store per packet, we don't hurt performance by
+// simply jumping to the right point in this sequence of stores.
+
+FUNCTION_BEGIN __save_r24_through_r27
+ memd(fp+#-16) = r27:26
+FALLTHROUGH_TAIL_CALL __save_r24_through_r27 __save_r24_through_r25
+ {
+ memd(fp+#-8) = r25:24
+ jumpr lr
+ }
+FUNCTION_END __save_r24_through_r25
+
+
+
+
+// For each of the *_before_tailcall functions, jumpr lr is executed in parallel
+// with deallocframe. That way, the return gets the old value of lr, which is
+// where these functions need to return, and at the same time, lr gets the value
+// it needs going into the tail call.
+
+FUNCTION_BEGIN __restore_r24_through_r27_and_deallocframe_before_tailcall
+ r27:26 = memd(fp+#-16)
+FALLTHROUGH_TAIL_CALL __restore_r24_through_r27_and_deallocframe_before_tailcall __restore_r24_through_r25_and_deallocframe_before_tailcall
+ {
+ r25:24 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r24_through_r25_and_deallocframe_before_tailcall
+
+
+
+
+// Here we use the extra load bandwidth to restore LR early, allowing the return
+// to occur in parallel with the deallocframe.
+
+FUNCTION_BEGIN __restore_r24_through_r27_and_deallocframe
+ {
+ lr = memw(fp+#4)
+ r27:26 = memd(fp+#-16)
+ }
+ {
+ r25:24 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r24_through_r27_and_deallocframe
+
+
+
+
+// Here the load bandwidth is maximized.
+
+FUNCTION_BEGIN __restore_r24_through_r25_and_deallocframe
+ {
+ r25:24 = memd(fp+#-8)
+ deallocframe
+ }
+ jumpr lr
+FUNCTION_END __restore_r24_through_r25_and_deallocframe
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_abi1.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_abi2.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_abi2.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_abi2.S (revision 351984)
@@ -0,0 +1,267 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Functions that implement common sequences in function prologues and epilogues
+// used to save code size
+
+ .macro FUNCTION_BEGIN name
+ .p2align 2
+ .section .text.\name,"ax",@progbits
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+ .macro FALLTHROUGH_TAIL_CALL name0 name1
+ .p2align 2
+ .size \name0, . - \name0
+ .globl \name1
+ .type \name1, @function
+\name1:
+ .endm
+
+
+
+
+// Save r17:16 at fp+#-8, r19:18 at fp+#-16, r21:20 at fp+#-24, r23:22 at
+// fp+#-32, r25:24 at fp+#-40, and r27:26 at fp+#-48.
+// The compiler knows that the __save_* functions clobber LR. No other
+// registers should be used without informing the compiler.
+
+FUNCTION_BEGIN __save_r16_through_r27
+ {
+ memd(fp+#-48) = r27:26
+ memd(fp+#-40) = r25:24
+ }
+ {
+ memd(fp+#-32) = r23:22
+ memd(fp+#-24) = r21:20
+ }
+ {
+ memd(fp+#-16) = r19:18
+ memd(fp+#-8) = r17:16
+ jumpr lr
+ }
+FUNCTION_END __save_r16_through_r27
+
+FUNCTION_BEGIN __save_r16_through_r25
+ {
+ memd(fp+#-40) = r25:24
+ memd(fp+#-32) = r23:22
+ }
+ {
+ memd(fp+#-24) = r21:20
+ memd(fp+#-16) = r19:18
+ }
+ {
+ memd(fp+#-8) = r17:16
+ jumpr lr
+ }
+FUNCTION_END __save_r16_through_r25
+
+FUNCTION_BEGIN __save_r16_through_r23
+ {
+ memd(fp+#-32) = r23:22
+ memd(fp+#-24) = r21:20
+ }
+ {
+ memd(fp+#-16) = r19:18
+ memd(fp+#-8) = r17:16
+ jumpr lr
+ }
+FUNCTION_END __save_r16_through_r23
+
+FUNCTION_BEGIN __save_r16_through_r21
+ {
+ memd(fp+#-24) = r21:20
+ memd(fp+#-16) = r19:18
+ }
+ {
+ memd(fp+#-8) = r17:16
+ jumpr lr
+ }
+FUNCTION_END __save_r16_through_r21
+
+FUNCTION_BEGIN __save_r16_through_r19
+ {
+ memd(fp+#-16) = r19:18
+ memd(fp+#-8) = r17:16
+ jumpr lr
+ }
+FUNCTION_END __save_r16_through_r19
+
+FUNCTION_BEGIN __save_r16_through_r17
+ {
+ memd(fp+#-8) = r17:16
+ jumpr lr
+ }
+FUNCTION_END __save_r16_through_r17
+
+// For each of the *_before_tailcall functions, jumpr lr is executed in parallel
+// with deallocframe. That way, the return gets the old value of lr, which is
+// where these functions need to return, and at the same time, lr gets the value
+// it needs going into the tail call.
+
+
+FUNCTION_BEGIN __restore_r16_through_r27_and_deallocframe_before_tailcall
+ r27:26 = memd(fp+#-48)
+ {
+ r25:24 = memd(fp+#-40)
+ r23:22 = memd(fp+#-32)
+ }
+ {
+ r21:20 = memd(fp+#-24)
+ r19:18 = memd(fp+#-16)
+ }
+ {
+ r17:16 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r16_through_r27_and_deallocframe_before_tailcall
+
+FUNCTION_BEGIN __restore_r16_through_r25_and_deallocframe_before_tailcall
+ {
+ r25:24 = memd(fp+#-40)
+ r23:22 = memd(fp+#-32)
+ }
+ {
+ r21:20 = memd(fp+#-24)
+ r19:18 = memd(fp+#-16)
+ }
+ {
+ r17:16 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r16_through_r25_and_deallocframe_before_tailcall
+
+FUNCTION_BEGIN __restore_r16_through_r23_and_deallocframe_before_tailcall
+ {
+ r23:22 = memd(fp+#-32)
+ r21:20 = memd(fp+#-24)
+ }
+ r19:18 = memd(fp+#-16)
+ {
+ r17:16 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r16_through_r23_and_deallocframe_before_tailcall
+
+
+FUNCTION_BEGIN __restore_r16_through_r21_and_deallocframe_before_tailcall
+ {
+ r21:20 = memd(fp+#-24)
+ r19:18 = memd(fp+#-16)
+ }
+ {
+ r17:16 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r16_through_r19_and_deallocframe_before_tailcall
+
+FUNCTION_BEGIN __restore_r16_through_r19_and_deallocframe_before_tailcall
+ r19:18 = memd(fp+#-16)
+ {
+ r17:16 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r16_through_r19_and_deallocframe_before_tailcall
+
+FUNCTION_BEGIN __restore_r16_through_r17_and_deallocframe_before_tailcall
+ {
+ r17:16 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r16_through_r17_and_deallocframe_before_tailcall
+
+
+FUNCTION_BEGIN __restore_r16_through_r27_and_deallocframe
+ r27:26 = memd(fp+#-48)
+ {
+ r25:24 = memd(fp+#-40)
+ r23:22 = memd(fp+#-32)
+ }
+ {
+ r21:20 = memd(fp+#-24)
+ r19:18 = memd(fp+#-16)
+ }
+ {
+ r17:16 = memd(fp+#-8)
+ dealloc_return
+ }
+FUNCTION_END __restore_r16_through_r27_and_deallocframe
+
+FUNCTION_BEGIN __restore_r16_through_r25_and_deallocframe
+ {
+ r25:24 = memd(fp+#-40)
+ r23:22 = memd(fp+#-32)
+ }
+ {
+ r21:20 = memd(fp+#-24)
+ r19:18 = memd(fp+#-16)
+ }
+ {
+ r17:16 = memd(fp+#-8)
+ dealloc_return
+ }
+FUNCTION_END __restore_r16_through_r25_and_deallocframe
+
+FUNCTION_BEGIN __restore_r16_through_r23_and_deallocframe
+ {
+ r23:22 = memd(fp+#-32)
+ }
+ {
+ r21:20 = memd(fp+#-24)
+ r19:18 = memd(fp+#-16)
+ }
+ {
+ r17:16 = memd(fp+#-8)
+ dealloc_return
+ }
+FUNCTION_END __restore_r16_through_r23_and_deallocframe
+
+FUNCTION_BEGIN __restore_r16_through_r21_and_deallocframe
+ {
+ r21:20 = memd(fp+#-24)
+ r19:18 = memd(fp+#-16)
+ }
+ {
+ r17:16 = memd(fp+#-8)
+ dealloc_return
+ }
+FUNCTION_END __restore_r16_through_r21_and_deallocframe
+
+FUNCTION_BEGIN __restore_r16_through_r19_and_deallocframe
+ {
+ r19:18 = memd(fp+#-16)
+ r17:16 = memd(fp+#-8)
+ }
+ {
+ dealloc_return
+ }
+FUNCTION_END __restore_r16_through_r19_and_deallocframe
+
+FUNCTION_BEGIN __restore_r16_through_r17_and_deallocframe
+ {
+ r17:16 = memd(fp+#-8)
+ dealloc_return
+ }
+FUNCTION_END __restore_r16_through_r17_and_deallocframe
+
+FUNCTION_BEGIN __deallocframe
+ dealloc_return
+FUNCTION_END __deallocframe
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_abi2.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_legacy.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_legacy.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_legacy.S (revision 351984)
@@ -0,0 +1,156 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+
+// Functions that implement common sequences in function prologues and epilogues
+// used to save code size
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .globl \name
+ .type \name, @function
+ .falign
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+ .macro FALLTHROUGH_TAIL_CALL name0 name1
+ .size \name0, . - \name0
+ .globl \name1
+ .type \name1, @function
+ .falign
+\name1:
+ .endm
+
+
+
+
+// Save r27:26 at fp+#-8, r25:24 at fp+#-16, r23:22 at fp+#-24, r21:20 at
+// fp+#-32, r19:18 at fp+#-40, and r17:16 at fp+#-48.
+
+
+
+
+// The compiler knows that the __save_* functions clobber LR. No other
+// registers should be used without informing the compiler.
+
+// Since we can only issue one store per packet, we don't hurt performance by
+// simply jumping to the right point in this sequence of stores.
+
+FUNCTION_BEGIN __save_r27_through_r16
+ memd(fp+#-48) = r17:16
+FALLTHROUGH_TAIL_CALL __save_r27_through_r16 __save_r27_through_r18
+ memd(fp+#-40) = r19:18
+FALLTHROUGH_TAIL_CALL __save_r27_through_r18 __save_r27_through_r20
+ memd(fp+#-32) = r21:20
+FALLTHROUGH_TAIL_CALL __save_r27_through_r20 __save_r27_through_r22
+ memd(fp+#-24) = r23:22
+FALLTHROUGH_TAIL_CALL __save_r27_through_r22 __save_r27_through_r24
+ memd(fp+#-16) = r25:24
+ {
+ memd(fp+#-8) = r27:26
+ jumpr lr
+ }
+FUNCTION_END __save_r27_through_r24
+
+
+
+
+// For each of the *_before_sibcall functions, jumpr lr is executed in parallel
+// with deallocframe. That way, the return gets the old value of lr, which is
+// where these functions need to return, and at the same time, lr gets the value
+// it needs going into the sibcall.
+
+FUNCTION_BEGIN __restore_r27_through_r20_and_deallocframe_before_sibcall
+ {
+ r21:20 = memd(fp+#-32)
+ r23:22 = memd(fp+#-24)
+ }
+FALLTHROUGH_TAIL_CALL __restore_r27_through_r20_and_deallocframe_before_sibcall __restore_r27_through_r24_and_deallocframe_before_sibcall
+ {
+ r25:24 = memd(fp+#-16)
+ jump __restore_r27_through_r26_and_deallocframe_before_sibcall
+ }
+FUNCTION_END __restore_r27_through_r24_and_deallocframe_before_sibcall
+
+
+
+
+FUNCTION_BEGIN __restore_r27_through_r16_and_deallocframe_before_sibcall
+ r17:16 = memd(fp+#-48)
+FALLTHROUGH_TAIL_CALL __restore_r27_through_r16_and_deallocframe_before_sibcall __restore_r27_through_r18_and_deallocframe_before_sibcall
+ {
+ r19:18 = memd(fp+#-40)
+ r21:20 = memd(fp+#-32)
+ }
+FALLTHROUGH_TAIL_CALL __restore_r27_through_r18_and_deallocframe_before_sibcall __restore_r27_through_r22_and_deallocframe_before_sibcall
+ {
+ r23:22 = memd(fp+#-24)
+ r25:24 = memd(fp+#-16)
+ }
+FALLTHROUGH_TAIL_CALL __restore_r27_through_r22_and_deallocframe_before_sibcall __restore_r27_through_r26_and_deallocframe_before_sibcall
+ {
+ r27:26 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r27_through_r26_and_deallocframe_before_sibcall
+
+
+
+
+// Here we use the extra load bandwidth to restore LR early, allowing the return
+// to occur in parallel with the deallocframe.
+
+FUNCTION_BEGIN __restore_r27_through_r16_and_deallocframe
+ {
+ r17:16 = memd(fp+#-48)
+ r19:18 = memd(fp+#-40)
+ }
+FALLTHROUGH_TAIL_CALL __restore_r27_through_r16_and_deallocframe __restore_r27_through_r20_and_deallocframe
+ {
+ r21:20 = memd(fp+#-32)
+ r23:22 = memd(fp+#-24)
+ }
+FALLTHROUGH_TAIL_CALL __restore_r27_through_r20_and_deallocframe __restore_r27_through_r24_and_deallocframe
+ {
+ lr = memw(fp+#4)
+ r25:24 = memd(fp+#-16)
+ }
+ {
+ r27:26 = memd(fp+#-8)
+ deallocframe
+ jumpr lr
+ }
+FUNCTION_END __restore_r27_through_r24_and_deallocframe
+
+
+
+
+// Here the load bandwidth is maximized for all three functions.
+
+FUNCTION_BEGIN __restore_r27_through_r18_and_deallocframe
+ {
+ r19:18 = memd(fp+#-40)
+ r21:20 = memd(fp+#-32)
+ }
+FALLTHROUGH_TAIL_CALL __restore_r27_through_r18_and_deallocframe __restore_r27_through_r22_and_deallocframe
+ {
+ r23:22 = memd(fp+#-24)
+ r25:24 = memd(fp+#-16)
+ }
+FALLTHROUGH_TAIL_CALL __restore_r27_through_r22_and_deallocframe __restore_r27_through_r26_and_deallocframe
+ {
+ r27:26 = memd(fp+#-8)
+ deallocframe
+ }
+ jumpr lr
+FUNCTION_END __restore_r27_through_r26_and_deallocframe
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/common_entry_exit_legacy.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfaddsub.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfaddsub.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfaddsub.S (revision 351984)
@@ -0,0 +1,396 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Double Precision Multiply
+
+#define A r1:0
+#define AH r1
+#define AL r0
+#define B r3:2
+#define BH r3
+#define BL r2
+
+#define EXPA r4
+#define EXPB r5
+#define EXPB_A r5:4
+
+#define ZTMP r7:6
+#define ZTMPH r7
+#define ZTMPL r6
+
+#define ATMP r13:12
+#define ATMPH r13
+#define ATMPL r12
+
+#define BTMP r9:8
+#define BTMPH r9
+#define BTMPL r8
+
+#define ATMP2 r11:10
+#define ATMP2H r11
+#define ATMP2L r10
+
+#define EXPDIFF r15
+#define EXTRACTOFF r14
+#define EXTRACTAMT r15:14
+
+#define TMP r28
+
+#define MANTBITS 52
+#define HI_MANTBITS 20
+#define EXPBITS 11
+#define BIAS 1024
+#define MANTISSA_TO_INT_BIAS 52
+#define SR_BIT_INEXACT 5
+
+#ifndef SR_ROUND_OFF
+#define SR_ROUND_OFF 22
+#endif
+
+#define NORMAL p3
+#define BIGB p2
+
+#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG
+#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG
+#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG
+#define END(TAG) .size TAG,.-TAG
+
+ .text
+ .global __hexagon_adddf3
+ .global __hexagon_subdf3
+ .type __hexagon_adddf3, @function
+ .type __hexagon_subdf3, @function
+
+Q6_ALIAS(adddf3)
+FAST_ALIAS(adddf3)
+FAST2_ALIAS(adddf3)
+Q6_ALIAS(subdf3)
+FAST_ALIAS(subdf3)
+FAST2_ALIAS(subdf3)
+
+ .p2align 5
+__hexagon_adddf3:
+ {
+ EXPA = extractu(AH,#EXPBITS,#HI_MANTBITS)
+ EXPB = extractu(BH,#EXPBITS,#HI_MANTBITS)
+ ATMP = combine(##0x20000000,#0)
+ }
+ {
+ NORMAL = dfclass(A,#2)
+ NORMAL = dfclass(B,#2)
+ BTMP = ATMP
+ BIGB = cmp.gtu(EXPB,EXPA) // Is B substantially greater than A?
+ }
+ {
+ if (!NORMAL) jump .Ladd_abnormal // If abnormal, go to special code
+ if (BIGB) A = B // if B >> A, swap A and B
+ if (BIGB) B = A // If B >> A, swap A and B
+ if (BIGB) EXPB_A = combine(EXPA,EXPB) // swap exponents
+ }
+ {
+ ATMP = insert(A,#MANTBITS,#EXPBITS-2) // Q1.62
+ BTMP = insert(B,#MANTBITS,#EXPBITS-2) // Q1.62
+ EXPDIFF = sub(EXPA,EXPB)
+ ZTMP = combine(#62,#1)
+ }
+#undef BIGB
+#undef NORMAL
+#define B_POS p3
+#define A_POS p2
+#define NO_STICKIES p1
+.Ladd_continue:
+ {
+ EXPDIFF = min(EXPDIFF,ZTMPH) // If exponent difference >= ~60,
+ // will collapse to sticky bit
+ ATMP2 = neg(ATMP)
+ A_POS = cmp.gt(AH,#-1)
+ EXTRACTOFF = #0
+ }
+ {
+ if (!A_POS) ATMP = ATMP2
+ ATMP2 = extractu(BTMP,EXTRACTAMT)
+ BTMP = ASR(BTMP,EXPDIFF)
+#undef EXTRACTAMT
+#undef EXPDIFF
+#undef EXTRACTOFF
+#define ZERO r15:14
+ ZERO = #0
+ }
+ {
+ NO_STICKIES = cmp.eq(ATMP2,ZERO)
+ if (!NO_STICKIES.new) BTMPL = or(BTMPL,ZTMPL)
+ EXPB = add(EXPA,#-BIAS-60)
+ B_POS = cmp.gt(BH,#-1)
+ }
+ {
+ ATMP = add(ATMP,BTMP) // ADD!!!
+ ATMP2 = sub(ATMP,BTMP) // Negate and ADD --> SUB!!!
+ ZTMP = combine(#54,##2045)
+ }
+ {
+ p0 = cmp.gtu(EXPA,ZTMPH) // must be pretty high in case of large cancellation
+ p0 = !cmp.gtu(EXPA,ZTMPL)
+ if (!p0.new) jump:nt .Ladd_ovf_unf
+ if (!B_POS) ATMP = ATMP2 // if B neg, pick difference
+ }
+ {
+ A = convert_d2df(ATMP) // Convert to Double Precision, taking care of flags, etc. So nice!
+ p0 = cmp.eq(ATMPH,#0)
+ p0 = cmp.eq(ATMPL,#0)
+ if (p0.new) jump:nt .Ladd_zero // or maybe conversion handles zero case correctly?
+ }
+ {
+ AH += asl(EXPB,#HI_MANTBITS)
+ jumpr r31
+ }
+ .falign
+__hexagon_subdf3:
+ {
+ BH = togglebit(BH,#31)
+ jump __qdsp_adddf3
+ }
+
+
+ .falign
+.Ladd_zero:
+ // True zero, full cancellation
+ // +0 unless round towards negative infinity
+ {
+ TMP = USR
+ A = #0
+ BH = #1
+ }
+ {
+ TMP = extractu(TMP,#2,#22)
+ BH = asl(BH,#31)
+ }
+ {
+ p0 = cmp.eq(TMP,#2)
+ if (p0.new) AH = xor(AH,BH)
+ jumpr r31
+ }
+ .falign
+.Ladd_ovf_unf:
+ // Overflow or Denormal is possible
+ // Good news: Underflow flag is not possible!
+
+ // ATMP has 2's complement value
+ //
+ // EXPA has A's exponent, EXPB has EXPA-BIAS-60
+ //
+ // Convert, extract exponent, add adjustment.
+ // If > 2046, overflow
+ // If <= 0, denormal
+ //
+ // Note that we've not done our zero check yet, so do that too
+
+ {
+ A = convert_d2df(ATMP)
+ p0 = cmp.eq(ATMPH,#0)
+ p0 = cmp.eq(ATMPL,#0)
+ if (p0.new) jump:nt .Ladd_zero
+ }
+ {
+ TMP = extractu(AH,#EXPBITS,#HI_MANTBITS)
+ AH += asl(EXPB,#HI_MANTBITS)
+ }
+ {
+ EXPB = add(EXPB,TMP)
+ B = combine(##0x00100000,#0)
+ }
+ {
+ p0 = cmp.gt(EXPB,##BIAS+BIAS-2)
+ if (p0.new) jump:nt .Ladd_ovf
+ }
+ {
+ p0 = cmp.gt(EXPB,#0)
+ if (p0.new) jumpr:t r31
+ TMP = sub(#1,EXPB)
+ }
+ {
+ B = insert(A,#MANTBITS,#0)
+ A = ATMP
+ }
+ {
+ B = lsr(B,TMP)
+ }
+ {
+ A = insert(B,#63,#0)
+ jumpr r31
+ }
+ .falign
+.Ladd_ovf:
+ // We get either max finite value or infinity. Either way, overflow+inexact
+ {
+ A = ATMP // 2's complement value
+ TMP = USR
+ ATMP = combine(##0x7fefffff,#-1) // positive max finite
+ }
+ {
+ EXPB = extractu(TMP,#2,#SR_ROUND_OFF) // rounding bits
+ TMP = or(TMP,#0x28) // inexact + overflow
+ BTMP = combine(##0x7ff00000,#0) // positive infinity
+ }
+ {
+ USR = TMP
+ EXPB ^= lsr(AH,#31) // Does sign match rounding?
+ TMP = EXPB // unmodified rounding mode
+ }
+ {
+ p0 = !cmp.eq(TMP,#1) // If not round-to-zero and
+ p0 = !cmp.eq(EXPB,#2) // Not rounding the other way,
+ if (p0.new) ATMP = BTMP // we should get infinity
+ }
+ {
+ A = insert(ATMP,#63,#0) // insert inf/maxfinite, leave sign
+ }
+ {
+ p0 = dfcmp.eq(A,A)
+ jumpr r31
+ }
+
+.Ladd_abnormal:
+ {
+ ATMP = extractu(A,#63,#0) // strip off sign
+ BTMP = extractu(B,#63,#0) // strip off sign
+ }
+ {
+ p3 = cmp.gtu(ATMP,BTMP)
+ if (!p3.new) A = B // sort values
+ if (!p3.new) B = A // sort values
+ }
+ {
+ // Any NaN --> NaN, possibly raise invalid if sNaN
+ p0 = dfclass(A,#0x0f) // A not NaN?
+ if (!p0.new) jump:nt .Linvalid_nan_add
+ if (!p3) ATMP = BTMP
+ if (!p3) BTMP = ATMP
+ }
+ {
+ // Infinity + non-infinity number is infinity
+ // Infinity + infinity --> inf or nan
+ p1 = dfclass(A,#0x08) // A is infinity
+ if (p1.new) jump:nt .Linf_add
+ }
+ {
+ p2 = dfclass(B,#0x01) // B is zero
+ if (p2.new) jump:nt .LB_zero // so return A or special 0+0
+ ATMP = #0
+ }
+ // We are left with adding one or more subnormals
+ {
+ p0 = dfclass(A,#4)
+ if (p0.new) jump:nt .Ladd_two_subnormal
+ ATMP = combine(##0x20000000,#0)
+ }
+ {
+ EXPA = extractu(AH,#EXPBITS,#HI_MANTBITS)
+ EXPB = #1
+ // BTMP already ABS(B)
+ BTMP = asl(BTMP,#EXPBITS-2)
+ }
+#undef ZERO
+#define EXTRACTOFF r14
+#define EXPDIFF r15
+ {
+ ATMP = insert(A,#MANTBITS,#EXPBITS-2)
+ EXPDIFF = sub(EXPA,EXPB)
+ ZTMP = combine(#62,#1)
+ jump .Ladd_continue
+ }
+
+.Ladd_two_subnormal:
+ {
+ ATMP = extractu(A,#63,#0)
+ BTMP = extractu(B,#63,#0)
+ }
+ {
+ ATMP = neg(ATMP)
+ BTMP = neg(BTMP)
+ p0 = cmp.gt(AH,#-1)
+ p1 = cmp.gt(BH,#-1)
+ }
+ {
+ if (p0) ATMP = A
+ if (p1) BTMP = B
+ }
+ {
+ ATMP = add(ATMP,BTMP)
+ }
+ {
+ BTMP = neg(ATMP)
+ p0 = cmp.gt(ATMPH,#-1)
+ B = #0
+ }
+ {
+ if (!p0) A = BTMP
+ if (p0) A = ATMP
+ BH = ##0x80000000
+ }
+ {
+ if (!p0) AH = or(AH,BH)
+ p0 = dfcmp.eq(A,B)
+ if (p0.new) jump:nt .Lzero_plus_zero
+ }
+ {
+ jumpr r31
+ }
+
+.Linvalid_nan_add:
+ {
+ TMP = convert_df2sf(A) // will generate invalid if sNaN
+ p0 = dfclass(B,#0x0f) // if B is not NaN
+ if (p0.new) B = A // make it whatever A is
+ }
+ {
+ BL = convert_df2sf(B) // will generate invalid if sNaN
+ A = #-1
+ jumpr r31
+ }
+ .falign
+.LB_zero:
+ {
+ p0 = dfcmp.eq(ATMP,A) // is A also zero?
+ if (!p0.new) jumpr:t r31 // If not, just return A
+ }
+ // 0 + 0 is special
+ // if equal integral values, they have the same sign, which is fine for all rounding
+ // modes.
+ // If unequal in sign, we get +0 for all rounding modes except round down
+.Lzero_plus_zero:
+ {
+ p0 = cmp.eq(A,B)
+ if (p0.new) jumpr:t r31
+ }
+ {
+ TMP = USR
+ }
+ {
+ TMP = extractu(TMP,#2,#SR_ROUND_OFF)
+ A = #0
+ }
+ {
+ p0 = cmp.eq(TMP,#2)
+ if (p0.new) AH = ##0x80000000
+ jumpr r31
+ }
+.Linf_add:
+ // adding infinities is only OK if they are equal
+ {
+ p0 = !cmp.eq(AH,BH) // Do they have different signs
+ p0 = dfclass(B,#8) // And is B also infinite?
+ if (!p0.new) jumpr:t r31 // If not, just a normal inf
+ }
+ {
+ BL = ##0x7f800001 // sNAN
+ }
+ {
+ A = convert_sf2df(BL) // trigger invalid, set NaN
+ jumpr r31
+ }
+END(__hexagon_adddf3)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfaddsub.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfdiv.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfdiv.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfdiv.S (revision 351984)
@@ -0,0 +1,491 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Double Precision Divide
+
+#define A r1:0
+#define AH r1
+#define AL r0
+
+#define B r3:2
+#define BH r3
+#define BL r2
+
+#define Q r5:4
+#define QH r5
+#define QL r4
+
+#define PROD r7:6
+#define PRODHI r7
+#define PRODLO r6
+
+#define SFONE r8
+#define SFDEN r9
+#define SFERROR r10
+#define SFRECIP r11
+
+#define EXPBA r13:12
+#define EXPB r13
+#define EXPA r12
+
+#define REMSUB2 r15:14
+
+
+
+#define SIGN r28
+
+#define Q_POSITIVE p3
+#define NORMAL p2
+#define NO_OVF_UNF p1
+#define P_TMP p0
+
+#define RECIPEST_SHIFT 3
+#define QADJ 61
+
+#define DFCLASS_NORMAL 0x02
+#define DFCLASS_NUMBER 0x0F
+#define DFCLASS_INFINITE 0x08
+#define DFCLASS_ZERO 0x01
+#define DFCLASS_NONZERO (DFCLASS_NUMBER ^ DFCLASS_ZERO)
+#define DFCLASS_NONINFINITE (DFCLASS_NUMBER ^ DFCLASS_INFINITE)
+
+#define DF_MANTBITS 52
+#define DF_EXPBITS 11
+#define SF_MANTBITS 23
+#define SF_EXPBITS 8
+#define DF_BIAS 0x3ff
+
+#define SR_ROUND_OFF 22
+
+#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG
+#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG
+#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG
+#define END(TAG) .size TAG,.-TAG
+
+ .text
+ .global __hexagon_divdf3
+ .type __hexagon_divdf3,@function
+ Q6_ALIAS(divdf3)
+ FAST_ALIAS(divdf3)
+ FAST2_ALIAS(divdf3)
+ .p2align 5
+__hexagon_divdf3:
+ {
+ NORMAL = dfclass(A,#DFCLASS_NORMAL)
+ NORMAL = dfclass(B,#DFCLASS_NORMAL)
+ EXPBA = combine(BH,AH)
+ SIGN = xor(AH,BH)
+ }
+#undef A
+#undef AH
+#undef AL
+#undef B
+#undef BH
+#undef BL
+#define REM r1:0
+#define REMHI r1
+#define REMLO r0
+#define DENOM r3:2
+#define DENOMHI r3
+#define DENOMLO r2
+ {
+ if (!NORMAL) jump .Ldiv_abnormal
+ PROD = extractu(DENOM,#SF_MANTBITS,#DF_MANTBITS-SF_MANTBITS)
+ SFONE = ##0x3f800001
+ }
+ {
+ SFDEN = or(SFONE,PRODLO)
+ EXPB = extractu(EXPB,#DF_EXPBITS,#DF_MANTBITS-32)
+ EXPA = extractu(EXPA,#DF_EXPBITS,#DF_MANTBITS-32)
+ Q_POSITIVE = cmp.gt(SIGN,#-1)
+ }
+#undef SIGN
+#define ONE r28
+.Ldenorm_continue:
+ {
+ SFRECIP,P_TMP = sfrecipa(SFONE,SFDEN)
+ SFERROR = and(SFONE,#-2)
+ ONE = #1
+ EXPA = sub(EXPA,EXPB)
+ }
+#undef EXPB
+#define RECIPEST r13
+ {
+ SFERROR -= sfmpy(SFRECIP,SFDEN):lib
+ REMHI = insert(ONE,#DF_EXPBITS+1,#DF_MANTBITS-32)
+ RECIPEST = ##0x00800000 << RECIPEST_SHIFT
+ }
+ {
+ SFRECIP += sfmpy(SFRECIP,SFERROR):lib
+ DENOMHI = insert(ONE,#DF_EXPBITS+1,#DF_MANTBITS-32)
+ SFERROR = and(SFONE,#-2)
+ }
+ {
+ SFERROR -= sfmpy(SFRECIP,SFDEN):lib
+ QH = #-DF_BIAS+1
+ QL = #DF_BIAS-1
+ }
+ {
+ SFRECIP += sfmpy(SFRECIP,SFERROR):lib
+ NO_OVF_UNF = cmp.gt(EXPA,QH)
+ NO_OVF_UNF = !cmp.gt(EXPA,QL)
+ }
+ {
+ RECIPEST = insert(SFRECIP,#SF_MANTBITS,#RECIPEST_SHIFT)
+ Q = #0
+ EXPA = add(EXPA,#-QADJ)
+ }
+#undef SFERROR
+#undef SFRECIP
+#define TMP r10
+#define TMP1 r11
+ {
+ RECIPEST = add(RECIPEST,#((-3) << RECIPEST_SHIFT))
+ }
+
+#define DIV_ITER1B(QSHIFTINSN,QSHIFT,REMSHIFT,EXTRA) \
+ { \
+ PROD = mpyu(RECIPEST,REMHI); \
+ REM = asl(REM,# ## ( REMSHIFT )); \
+ }; \
+ { \
+ PRODLO = # ## 0; \
+ REM -= mpyu(PRODHI,DENOMLO); \
+ REMSUB2 = mpyu(PRODHI,DENOMHI); \
+ }; \
+ { \
+ Q += QSHIFTINSN(PROD, # ## ( QSHIFT )); \
+ REM -= asl(REMSUB2, # ## 32); \
+ EXTRA \
+ }
+
+
+ DIV_ITER1B(ASL,14,15,)
+ DIV_ITER1B(ASR,1,15,)
+ DIV_ITER1B(ASR,16,15,)
+ DIV_ITER1B(ASR,31,15,PROD=# ( 0 );)
+
+#undef REMSUB2
+#define TMPPAIR r15:14
+#define TMPPAIRHI r15
+#define TMPPAIRLO r14
+#undef RECIPEST
+#define EXPB r13
+ {
+ // compare or sub with carry
+ TMPPAIR = sub(REM,DENOM)
+ P_TMP = cmp.gtu(DENOM,REM)
+ // set up amt to add to q
+ if (!P_TMP.new) PRODLO = #2
+ }
+ {
+ Q = add(Q,PROD)
+ if (!P_TMP) REM = TMPPAIR
+ TMPPAIR = #0
+ }
+ {
+ P_TMP = cmp.eq(REM,TMPPAIR)
+ if (!P_TMP.new) QL = or(QL,ONE)
+ }
+ {
+ PROD = neg(Q)
+ }
+ {
+ if (!Q_POSITIVE) Q = PROD
+ }
+#undef REM
+#undef REMHI
+#undef REMLO
+#undef DENOM
+#undef DENOMLO
+#undef DENOMHI
+#define A r1:0
+#define AH r1
+#define AL r0
+#define B r3:2
+#define BH r3
+#define BL r2
+ {
+ A = convert_d2df(Q)
+ if (!NO_OVF_UNF) jump .Ldiv_ovf_unf
+ }
+ {
+ AH += asl(EXPA,#DF_MANTBITS-32)
+ jumpr r31
+ }
+
+.Ldiv_ovf_unf:
+ {
+ AH += asl(EXPA,#DF_MANTBITS-32)
+ EXPB = extractu(AH,#DF_EXPBITS,#DF_MANTBITS-32)
+ }
+ {
+ PROD = abs(Q)
+ EXPA = add(EXPA,EXPB)
+ }
+ {
+ P_TMP = cmp.gt(EXPA,##DF_BIAS+DF_BIAS) // overflow
+ if (P_TMP.new) jump:nt .Ldiv_ovf
+ }
+ {
+ P_TMP = cmp.gt(EXPA,#0)
+ if (P_TMP.new) jump:nt .Lpossible_unf // round up to normal possible...
+ }
+ // Underflow
+ // We know what the infinite range exponent should be (EXPA)
+ // Q is 2's complement, PROD is abs(Q)
+ // Normalize Q, shift right, add a high bit, convert, change exponent
+
+#define FUDGE1 7 // how much to shift right
+#define FUDGE2 4 // how many guard/round to keep at lsbs
+
+ {
+ EXPB = add(clb(PROD),#-1) // doesn't need to be added in since
+ EXPA = sub(#FUDGE1,EXPA) // we extract post-converted exponent
+ TMP = USR
+ TMP1 = #63
+ }
+ {
+ EXPB = min(EXPA,TMP1)
+ TMP1 = or(TMP,#0x030)
+ PROD = asl(PROD,EXPB)
+ EXPA = #0
+ }
+ {
+ TMPPAIR = extractu(PROD,EXPBA) // bits that will get shifted out
+ PROD = lsr(PROD,EXPB) // shift out bits
+ B = #1
+ }
+ {
+ P_TMP = cmp.gtu(B,TMPPAIR)
+ if (!P_TMP.new) PRODLO = or(BL,PRODLO)
+ PRODHI = setbit(PRODHI,#DF_MANTBITS-32+FUDGE2)
+ }
+ {
+ Q = neg(PROD)
+ P_TMP = bitsclr(PRODLO,#(1<<FUDGE2)-1)
+ if (!P_TMP.new) TMP = TMP1
+ }
+ {
+ USR = TMP
+ if (Q_POSITIVE) Q = PROD
+ TMP = #-DF_BIAS-(DF_MANTBITS+FUDGE2)
+ }
+ {
+ A = convert_d2df(Q)
+ }
+ {
+ AH += asl(TMP,#DF_MANTBITS-32)
+ jumpr r31
+ }
+
+
+.Lpossible_unf:
+ // If upper parts of Q were all F's, but abs(A) == 0x00100000_00000000, we rounded up to min_normal
+ // The answer is correct, but we need to raise Underflow
+ {
+ B = extractu(A,#63,#0)
+ TMPPAIR = combine(##0x00100000,#0) // min normal
+ TMP = #0x7FFF
+ }
+ {
+ P_TMP = dfcmp.eq(TMPPAIR,B) // Is everything zero in the rounded value...
+ P_TMP = bitsset(PRODHI,TMP) // but a bunch of bits set in the unrounded abs(quotient)?
+ }
+
+#if (__HEXAGON_ARCH__ == 60)
+ TMP = USR // If not, just return
+ if (!P_TMP) jumpr r31 // Else, we want to set Unf+Inexact
+ // Note that inexact is already set...
+#else
+ {
+ if (!P_TMP) jumpr r31 // If not, just return
+ TMP = USR // Else, we want to set Unf+Inexact
+ } // Note that inexact is already set...
+#endif
+ {
+ TMP = or(TMP,#0x30)
+ }
+ {
+ USR = TMP
+ }
+ {
+ p0 = dfcmp.eq(A,A)
+ jumpr r31
+ }
+
+.Ldiv_ovf:
+
+ // Raise Overflow, and choose the correct overflow value (saturated normal or infinity)
+
+ {
+ TMP = USR
+ B = combine(##0x7fefffff,#-1)
+ AH = mux(Q_POSITIVE,#0,#-1)
+ }
+ {
+ PROD = combine(##0x7ff00000,#0)
+ QH = extractu(TMP,#2,#SR_ROUND_OFF)
+ TMP = or(TMP,#0x28)
+ }
+ {
+ USR = TMP
+ QH ^= lsr(AH,#31)
+ QL = QH
+ }
+ {
+ p0 = !cmp.eq(QL,#1) // if not round-to-zero
+ p0 = !cmp.eq(QH,#2) // and not rounding the other way
+ if (p0.new) B = PROD // go to inf
+ p0 = dfcmp.eq(B,B) // get exceptions
+ }
+ {
+ A = insert(B,#63,#0)
+ jumpr r31
+ }
+
+#undef ONE
+#define SIGN r28
+#undef NORMAL
+#undef NO_OVF_UNF
+#define P_INF p1
+#define P_ZERO p2
+.Ldiv_abnormal:
+ {
+ P_TMP = dfclass(A,#DFCLASS_NUMBER)
+ P_TMP = dfclass(B,#DFCLASS_NUMBER)
+ Q_POSITIVE = cmp.gt(SIGN,#-1)
+ }
+ {
+ P_INF = dfclass(A,#DFCLASS_INFINITE)
+ P_INF = dfclass(B,#DFCLASS_INFINITE)
+ }
+ {
+ P_ZERO = dfclass(A,#DFCLASS_ZERO)
+ P_ZERO = dfclass(B,#DFCLASS_ZERO)
+ }
+ {
+ if (!P_TMP) jump .Ldiv_nan
+ if (P_INF) jump .Ldiv_invalid
+ }
+ {
+ if (P_ZERO) jump .Ldiv_invalid
+ }
+ {
+ P_ZERO = dfclass(A,#DFCLASS_NONZERO) // nonzero
+ P_ZERO = dfclass(B,#DFCLASS_NONINFINITE) // non-infinite
+ }
+ {
+ P_INF = dfclass(A,#DFCLASS_NONINFINITE) // non-infinite
+ P_INF = dfclass(B,#DFCLASS_NONZERO) // nonzero
+ }
+ {
+ if (!P_ZERO) jump .Ldiv_zero_result
+ if (!P_INF) jump .Ldiv_inf_result
+ }
+ // Now we've narrowed it down to (de)normal / (de)normal
+ // Set up A/EXPA B/EXPB and go back
+#undef P_ZERO
+#undef P_INF
+#define P_TMP2 p1
+ {
+ P_TMP = dfclass(A,#DFCLASS_NORMAL)
+ P_TMP2 = dfclass(B,#DFCLASS_NORMAL)
+ TMP = ##0x00100000
+ }
+ {
+ EXPBA = combine(BH,AH)
+ AH = insert(TMP,#DF_EXPBITS+1,#DF_MANTBITS-32) // clear out hidden bit, sign bit
+ BH = insert(TMP,#DF_EXPBITS+1,#DF_MANTBITS-32) // clear out hidden bit, sign bit
+ }
+ {
+ if (P_TMP) AH = or(AH,TMP) // if normal, add back in hidden bit
+ if (P_TMP2) BH = or(BH,TMP) // if normal, add back in hidden bit
+ }
+ {
+ QH = add(clb(A),#-DF_EXPBITS)
+ QL = add(clb(B),#-DF_EXPBITS)
+ TMP = #1
+ }
+ {
+ EXPA = extractu(EXPA,#DF_EXPBITS,#DF_MANTBITS-32)
+ EXPB = extractu(EXPB,#DF_EXPBITS,#DF_MANTBITS-32)
+ }
+ {
+ A = asl(A,QH)
+ B = asl(B,QL)
+ if (!P_TMP) EXPA = sub(TMP,QH)
+ if (!P_TMP2) EXPB = sub(TMP,QL)
+ } // recreate values needed by resume coke
+ {
+ PROD = extractu(B,#SF_MANTBITS,#DF_MANTBITS-SF_MANTBITS)
+ }
+ {
+ SFDEN = or(SFONE,PRODLO)
+ jump .Ldenorm_continue
+ }
+
+.Ldiv_zero_result:
+ {
+ AH = xor(AH,BH)
+ B = #0
+ }
+ {
+ A = insert(B,#63,#0)
+ jumpr r31
+ }
+.Ldiv_inf_result:
+ {
+ p2 = dfclass(B,#DFCLASS_ZERO)
+ p2 = dfclass(A,#DFCLASS_NONINFINITE)
+ }
+ {
+ TMP = USR
+ if (!p2) jump 1f
+ AH = xor(AH,BH)
+ }
+ {
+ TMP = or(TMP,#0x04) // DBZ
+ }
+ {
+ USR = TMP
+ }
+1:
+ {
+ B = combine(##0x7ff00000,#0)
+ p0 = dfcmp.uo(B,B) // take possible exception
+ }
+ {
+ A = insert(B,#63,#0)
+ jumpr r31
+ }
+.Ldiv_nan:
+ {
+ p0 = dfclass(A,#0x10)
+ p1 = dfclass(B,#0x10)
+ if (!p0.new) A = B
+ if (!p1.new) B = A
+ }
+ {
+ QH = convert_df2sf(A) // get possible invalid exceptions
+ QL = convert_df2sf(B)
+ }
+ {
+ A = #-1
+ jumpr r31
+ }
+
+.Ldiv_invalid:
+ {
+ TMP = ##0x7f800001
+ }
+ {
+ A = convert_sf2df(TMP) // get invalid, get DF qNaN
+ jumpr r31
+ }
+END(__hexagon_divdf3)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfdiv.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dffma.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dffma.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dffma.S (revision 351984)
@@ -0,0 +1,696 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG
+#define END(TAG) .size TAG,.-TAG
+
+// Double Precision Multiply
+
+
+#define A r1:0
+#define AH r1
+#define AL r0
+#define B r3:2
+#define BH r3
+#define BL r2
+#define C r5:4
+#define CH r5
+#define CL r4
+
+
+
+#define BTMP r15:14
+#define BTMPH r15
+#define BTMPL r14
+
+#define ATMP r13:12
+#define ATMPH r13
+#define ATMPL r12
+
+#define CTMP r11:10
+#define CTMPH r11
+#define CTMPL r10
+
+#define PP_LL r9:8
+#define PP_LL_H r9
+#define PP_LL_L r8
+
+#define PP_ODD r7:6
+#define PP_ODD_H r7
+#define PP_ODD_L r6
+
+
+#define PP_HH r17:16
+#define PP_HH_H r17
+#define PP_HH_L r16
+
+#define EXPA r18
+#define EXPB r19
+#define EXPBA r19:18
+
+#define TMP r28
+
+#define P_TMP p0
+#define PROD_NEG p3
+#define EXACT p2
+#define SWAP p1
+
+#define MANTBITS 52
+#define HI_MANTBITS 20
+#define EXPBITS 11
+#define BIAS 1023
+#define STACKSPACE 32
+
+#define ADJUST 4
+
+#define FUDGE 7
+#define FUDGE2 3
+
+#ifndef SR_ROUND_OFF
+#define SR_ROUND_OFF 22
+#endif
+
+ // First, classify for normal values, and abort if abnormal
+ //
+ // Next, unpack mantissa into 0x1000_0000_0000_0000 + mant<<8
+ //
+ // Since we know that the 2 MSBs of the H registers is zero, we should never carry
+ // the partial products that involve the H registers
+ //
+ // Try to buy X slots, at the expense of latency if needed
+ //
+ // We will have PP_HH with the upper bits of the product, PP_LL with the lower
+ // PP_HH can have a maximum of 0x03FF_FFFF_FFFF_FFFF or thereabouts
+ // PP_HH can have a minimum of 0x0100_0000_0000_0000
+ //
+ // 0x0100_0000_0000_0000 has EXP of EXPA+EXPB-BIAS
+ //
+ // We need to align CTMP.
+ // If CTMP >> PP, convert PP to 64 bit with sticky, align CTMP, and follow normal add
+ // If CTMP << PP align CTMP and add 128 bits. Then compute sticky
+ // If CTMP ~= PP, align CTMP and add 128 bits. May have massive cancellation.
+ //
+ // Convert partial product and CTMP to 2's complement prior to addition
+ //
+ // After we add, we need to normalize into upper 64 bits, then compute sticky.
+
+ .text
+ .global __hexagon_fmadf4
+ .type __hexagon_fmadf4,@function
+ .global __hexagon_fmadf5
+ .type __hexagon_fmadf5,@function
+ .global fma
+ .type fma,@function
+ Q6_ALIAS(fmadf5)
+ .p2align 5
+__hexagon_fmadf4:
+__hexagon_fmadf5:
+fma:
+ {
+ P_TMP = dfclass(A,#2)
+ P_TMP = dfclass(B,#2)
+ ATMP = #0
+ BTMP = #0
+ }
+ {
+ ATMP = insert(A,#MANTBITS,#EXPBITS-3)
+ BTMP = insert(B,#MANTBITS,#EXPBITS-3)
+ PP_ODD_H = ##0x10000000
+ allocframe(#STACKSPACE)
+ }
+ {
+ PP_LL = mpyu(ATMPL,BTMPL)
+ if (!P_TMP) jump .Lfma_abnormal_ab
+ ATMPH = or(ATMPH,PP_ODD_H)
+ BTMPH = or(BTMPH,PP_ODD_H)
+ }
+ {
+ P_TMP = dfclass(C,#2)
+ if (!P_TMP.new) jump:nt .Lfma_abnormal_c
+ CTMP = combine(PP_ODD_H,#0)
+ PP_ODD = combine(#0,PP_LL_H)
+ }
+.Lfma_abnormal_c_restart:
+ {
+ PP_ODD += mpyu(BTMPL,ATMPH)
+ CTMP = insert(C,#MANTBITS,#EXPBITS-3)
+ memd(r29+#0) = PP_HH
+ memd(r29+#8) = EXPBA
+ }
+ {
+ PP_ODD += mpyu(ATMPL,BTMPH)
+ EXPBA = neg(CTMP)
+ P_TMP = cmp.gt(CH,#-1)
+ TMP = xor(AH,BH)
+ }
+ {
+ EXPA = extractu(AH,#EXPBITS,#HI_MANTBITS)
+ EXPB = extractu(BH,#EXPBITS,#HI_MANTBITS)
+ PP_HH = combine(#0,PP_ODD_H)
+ if (!P_TMP) CTMP = EXPBA
+ }
+ {
+ PP_HH += mpyu(ATMPH,BTMPH)
+ PP_LL = combine(PP_ODD_L,PP_LL_L)
+#undef PP_ODD
+#undef PP_ODD_H
+#undef PP_ODD_L
+#undef ATMP
+#undef ATMPL
+#undef ATMPH
+#undef BTMP
+#undef BTMPL
+#undef BTMPH
+#define RIGHTLEFTSHIFT r13:12
+#define RIGHTSHIFT r13
+#define LEFTSHIFT r12
+
+ EXPA = add(EXPA,EXPB)
+#undef EXPB
+#undef EXPBA
+#define EXPC r19
+#define EXPCA r19:18
+ EXPC = extractu(CH,#EXPBITS,#HI_MANTBITS)
+ }
+ // PP_HH:PP_LL now has product
+ // CTMP is negated
+ // EXPA,B,C are extracted
+ // We need to negate PP
+ // Since we will be adding with carry later, if we need to negate,
+ // just invert all bits now, which we can do conditionally and in parallel
+#define PP_HH_TMP r15:14
+#define PP_LL_TMP r7:6
+ {
+ EXPA = add(EXPA,#-BIAS+(ADJUST))
+ PROD_NEG = !cmp.gt(TMP,#-1)
+ PP_LL_TMP = #0
+ PP_HH_TMP = #0
+ }
+ {
+ PP_LL_TMP = sub(PP_LL_TMP,PP_LL,PROD_NEG):carry
+ P_TMP = !cmp.gt(TMP,#-1)
+ SWAP = cmp.gt(EXPC,EXPA) // If C >> PP
+ if (SWAP.new) EXPCA = combine(EXPA,EXPC)
+ }
+ {
+ PP_HH_TMP = sub(PP_HH_TMP,PP_HH,PROD_NEG):carry
+ if (P_TMP) PP_LL = PP_LL_TMP
+#undef PP_LL_TMP
+#define CTMP2 r7:6
+#define CTMP2H r7
+#define CTMP2L r6
+ CTMP2 = #0
+ EXPC = sub(EXPA,EXPC)
+ }
+ {
+ if (P_TMP) PP_HH = PP_HH_TMP
+ P_TMP = cmp.gt(EXPC,#63)
+ if (SWAP) PP_LL = CTMP2
+ if (SWAP) CTMP2 = PP_LL
+ }
+#undef PP_HH_TMP
+//#define ONE r15:14
+//#define S_ONE r14
+#define ZERO r15:14
+#define S_ZERO r15
+#undef PROD_NEG
+#define P_CARRY p3
+ {
+ if (SWAP) PP_HH = CTMP // Swap C and PP
+ if (SWAP) CTMP = PP_HH
+ if (P_TMP) EXPC = add(EXPC,#-64)
+ TMP = #63
+ }
+ {
+ // If diff > 63, pre-shift-right by 64...
+ if (P_TMP) CTMP2 = CTMP
+ TMP = asr(CTMPH,#31)
+ RIGHTSHIFT = min(EXPC,TMP)
+ LEFTSHIFT = #0
+ }
+#undef C
+#undef CH
+#undef CL
+#define STICKIES r5:4
+#define STICKIESH r5
+#define STICKIESL r4
+ {
+ if (P_TMP) CTMP = combine(TMP,TMP) // sign extension of pre-shift-right-64
+ STICKIES = extract(CTMP2,RIGHTLEFTSHIFT)
+ CTMP2 = lsr(CTMP2,RIGHTSHIFT)
+ LEFTSHIFT = sub(#64,RIGHTSHIFT)
+ }
+ {
+ ZERO = #0
+ TMP = #-2
+ CTMP2 |= lsl(CTMP,LEFTSHIFT)
+ CTMP = asr(CTMP,RIGHTSHIFT)
+ }
+ {
+ P_CARRY = cmp.gtu(STICKIES,ZERO) // If we have sticky bits from C shift
+ if (P_CARRY.new) CTMP2L = and(CTMP2L,TMP) // make sure adding 1 == OR
+#undef ZERO
+#define ONE r15:14
+#define S_ONE r14
+ ONE = #1
+ STICKIES = #0
+ }
+ {
+ PP_LL = add(CTMP2,PP_LL,P_CARRY):carry // use the carry to add the sticky
+ }
+ {
+ PP_HH = add(CTMP,PP_HH,P_CARRY):carry
+ TMP = #62
+ }
+ // PP_HH:PP_LL now holds the sum
+ // We may need to normalize left, up to ??? bits.
+ //
+ // I think that if we have massive cancellation, the range we normalize by
+ // is still limited
+ {
+ LEFTSHIFT = add(clb(PP_HH),#-2)
+ if (!cmp.eq(LEFTSHIFT.new,TMP)) jump:t 1f // all sign bits?
+ }
+ // We had all sign bits, shift left by 62.
+ {
+ CTMP = extractu(PP_LL,#62,#2)
+ PP_LL = asl(PP_LL,#62)
+ EXPA = add(EXPA,#-62) // And adjust exponent of result
+ }
+ {
+ PP_HH = insert(CTMP,#62,#0) // Then shift 63
+ }
+ {
+ LEFTSHIFT = add(clb(PP_HH),#-2)
+ }
+ .falign
+1:
+ {
+ CTMP = asl(PP_HH,LEFTSHIFT)
+ STICKIES |= asl(PP_LL,LEFTSHIFT)
+ RIGHTSHIFT = sub(#64,LEFTSHIFT)
+ EXPA = sub(EXPA,LEFTSHIFT)
+ }
+ {
+ CTMP |= lsr(PP_LL,RIGHTSHIFT)
+ EXACT = cmp.gtu(ONE,STICKIES)
+ TMP = #BIAS+BIAS-2
+ }
+ {
+ if (!EXACT) CTMPL = or(CTMPL,S_ONE)
+ // If EXPA is overflow/underflow, jump to ovf_unf
+ P_TMP = !cmp.gt(EXPA,TMP)
+ P_TMP = cmp.gt(EXPA,#1)
+ if (!P_TMP.new) jump:nt .Lfma_ovf_unf
+ }
+ {
+ // XXX: FIXME: should PP_HH for check of zero be CTMP?
+ P_TMP = cmp.gtu(ONE,CTMP) // is result true zero?
+ A = convert_d2df(CTMP)
+ EXPA = add(EXPA,#-BIAS-60)
+ PP_HH = memd(r29+#0)
+ }
+ {
+ AH += asl(EXPA,#HI_MANTBITS)
+ EXPCA = memd(r29+#8)
+ if (!P_TMP) dealloc_return // not zero, return
+ }
+.Ladd_yields_zero:
+ // We had full cancellation. Return +/- zero (-0 when round-down)
+ {
+ TMP = USR
+ A = #0
+ }
+ {
+ TMP = extractu(TMP,#2,#SR_ROUND_OFF)
+ PP_HH = memd(r29+#0)
+ EXPCA = memd(r29+#8)
+ }
+ {
+ p0 = cmp.eq(TMP,#2)
+ if (p0.new) AH = ##0x80000000
+ dealloc_return
+ }
+
+#undef RIGHTLEFTSHIFT
+#undef RIGHTSHIFT
+#undef LEFTSHIFT
+#undef CTMP2
+#undef CTMP2H
+#undef CTMP2L
+
+.Lfma_ovf_unf:
+ {
+ p0 = cmp.gtu(ONE,CTMP)
+ if (p0.new) jump:nt .Ladd_yields_zero
+ }
+ {
+ A = convert_d2df(CTMP)
+ EXPA = add(EXPA,#-BIAS-60)
+ TMP = EXPA
+ }
+#define NEW_EXPB r7
+#define NEW_EXPA r6
+ {
+ AH += asl(EXPA,#HI_MANTBITS)
+ NEW_EXPB = extractu(AH,#EXPBITS,#HI_MANTBITS)
+ }
+ {
+ NEW_EXPA = add(EXPA,NEW_EXPB)
+ PP_HH = memd(r29+#0)
+ EXPCA = memd(r29+#8)
+#undef PP_HH
+#undef PP_HH_H
+#undef PP_HH_L
+#undef EXPCA
+#undef EXPC
+#undef EXPA
+#undef PP_LL
+#undef PP_LL_H
+#undef PP_LL_L
+#define EXPA r6
+#define EXPB r7
+#define EXPBA r7:6
+#define ATMP r9:8
+#define ATMPH r9
+#define ATMPL r8
+#undef NEW_EXPB
+#undef NEW_EXPA
+ ATMP = abs(CTMP)
+ }
+ {
+ p0 = cmp.gt(EXPA,##BIAS+BIAS)
+ if (p0.new) jump:nt .Lfma_ovf
+ }
+ {
+ p0 = cmp.gt(EXPA,#0)
+ if (p0.new) jump:nt .Lpossible_unf
+ }
+ {
+ // TMP has original EXPA.
+ // ATMP is corresponding value
+ // Normalize ATMP and shift right to correct location
+ EXPB = add(clb(ATMP),#-2) // Amount to left shift to normalize
+ EXPA = sub(#1+5,TMP) // Amount to right shift to denormalize
+ p3 = cmp.gt(CTMPH,#-1)
+ }
+ // Underflow
+ // We know that the infinte range exponent should be EXPA
+ // CTMP is 2's complement, ATMP is abs(CTMP)
+ {
+ EXPA = add(EXPA,EXPB) // how much to shift back right
+ ATMP = asl(ATMP,EXPB) // shift left
+ AH = USR
+ TMP = #63
+ }
+ {
+ EXPB = min(EXPA,TMP)
+ EXPA = #0
+ AL = #0x0030
+ }
+ {
+ B = extractu(ATMP,EXPBA)
+ ATMP = asr(ATMP,EXPB)
+ }
+ {
+ p0 = cmp.gtu(ONE,B)
+ if (!p0.new) ATMPL = or(ATMPL,S_ONE)
+ ATMPH = setbit(ATMPH,#HI_MANTBITS+FUDGE2)
+ }
+ {
+ CTMP = neg(ATMP)
+ p1 = bitsclr(ATMPL,#(1<<FUDGE2)-1)
+ if (!p1.new) AH = or(AH,AL)
+ B = #0
+ }
+ {
+ if (p3) CTMP = ATMP
+ USR = AH
+ TMP = #-BIAS-(MANTBITS+FUDGE2)
+ }
+ {
+ A = convert_d2df(CTMP)
+ }
+ {
+ AH += asl(TMP,#HI_MANTBITS)
+ dealloc_return
+ }
+.Lpossible_unf:
+ {
+ TMP = ##0x7fefffff
+ ATMP = abs(CTMP)
+ }
+ {
+ p0 = cmp.eq(AL,#0)
+ p0 = bitsclr(AH,TMP)
+ if (!p0.new) dealloc_return:t
+ TMP = #0x7fff
+ }
+ {
+ p0 = bitsset(ATMPH,TMP)
+ BH = USR
+ BL = #0x0030
+ }
+ {
+ if (p0) BH = or(BH,BL)
+ }
+ {
+ USR = BH
+ }
+ {
+ p0 = dfcmp.eq(A,A)
+ dealloc_return
+ }
+.Lfma_ovf:
+ {
+ TMP = USR
+ CTMP = combine(##0x7fefffff,#-1)
+ A = CTMP
+ }
+ {
+ ATMP = combine(##0x7ff00000,#0)
+ BH = extractu(TMP,#2,#SR_ROUND_OFF)
+ TMP = or(TMP,#0x28)
+ }
+ {
+ USR = TMP
+ BH ^= lsr(AH,#31)
+ BL = BH
+ }
+ {
+ p0 = !cmp.eq(BL,#1)
+ p0 = !cmp.eq(BH,#2)
+ }
+ {
+ p0 = dfcmp.eq(ATMP,ATMP)
+ if (p0.new) CTMP = ATMP
+ }
+ {
+ A = insert(CTMP,#63,#0)
+ dealloc_return
+ }
+#undef CTMP
+#undef CTMPH
+#undef CTMPL
+#define BTMP r11:10
+#define BTMPH r11
+#define BTMPL r10
+
+#undef STICKIES
+#undef STICKIESH
+#undef STICKIESL
+#define C r5:4
+#define CH r5
+#define CL r4
+
+.Lfma_abnormal_ab:
+ {
+ ATMP = extractu(A,#63,#0)
+ BTMP = extractu(B,#63,#0)
+ deallocframe
+ }
+ {
+ p3 = cmp.gtu(ATMP,BTMP)
+ if (!p3.new) A = B // sort values
+ if (!p3.new) B = A
+ }
+ {
+ p0 = dfclass(A,#0x0f) // A NaN?
+ if (!p0.new) jump:nt .Lnan
+ if (!p3) ATMP = BTMP
+ if (!p3) BTMP = ATMP
+ }
+ {
+ p1 = dfclass(A,#0x08) // A is infinity
+ p1 = dfclass(B,#0x0e) // B is nonzero
+ }
+ {
+ p0 = dfclass(A,#0x08) // a is inf
+ p0 = dfclass(B,#0x01) // b is zero
+ }
+ {
+ if (p1) jump .Lab_inf
+ p2 = dfclass(B,#0x01)
+ }
+ {
+ if (p0) jump .Linvalid
+ if (p2) jump .Lab_true_zero
+ TMP = ##0x7c000000
+ }
+ // We are left with a normal or subnormal times a subnormal, A > B
+ // If A and B are both very small, we will go to a single sticky bit; replace
+ // A and B lower 63 bits with 0x0010_0000_0000_0000, which yields equivalent results
+ // if A and B might multiply to something bigger, decrease A exp and increase B exp
+ // and start over
+ {
+ p0 = bitsclr(AH,TMP)
+ if (p0.new) jump:nt .Lfma_ab_tiny
+ }
+ {
+ TMP = add(clb(BTMP),#-EXPBITS)
+ }
+ {
+ BTMP = asl(BTMP,TMP)
+ }
+ {
+ B = insert(BTMP,#63,#0)
+ AH -= asl(TMP,#HI_MANTBITS)
+ }
+ jump fma
+
+.Lfma_ab_tiny:
+ ATMP = combine(##0x00100000,#0)
+ {
+ A = insert(ATMP,#63,#0)
+ B = insert(ATMP,#63,#0)
+ }
+ jump fma
+
+.Lab_inf:
+ {
+ B = lsr(B,#63)
+ p0 = dfclass(C,#0x10)
+ }
+ {
+ A ^= asl(B,#63)
+ if (p0) jump .Lnan
+ }
+ {
+ p1 = dfclass(C,#0x08)
+ if (p1.new) jump:nt .Lfma_inf_plus_inf
+ }
+ // A*B is +/- inf, C is finite. Return A
+ {
+ jumpr r31
+ }
+ .falign
+.Lfma_inf_plus_inf:
+ { // adding infinities of different signs is invalid
+ p0 = dfcmp.eq(A,C)
+ if (!p0.new) jump:nt .Linvalid
+ }
+ {
+ jumpr r31
+ }
+
+.Lnan:
+ {
+ p0 = dfclass(B,#0x10)
+ p1 = dfclass(C,#0x10)
+ if (!p0.new) B = A
+ if (!p1.new) C = A
+ }
+ { // find sNaNs
+ BH = convert_df2sf(B)
+ BL = convert_df2sf(C)
+ }
+ {
+ BH = convert_df2sf(A)
+ A = #-1
+ jumpr r31
+ }
+
+.Linvalid:
+ {
+ TMP = ##0x7f800001 // sp snan
+ }
+ {
+ A = convert_sf2df(TMP)
+ jumpr r31
+ }
+
+.Lab_true_zero:
+ // B is zero, A is finite number
+ {
+ p0 = dfclass(C,#0x10)
+ if (p0.new) jump:nt .Lnan
+ if (p0.new) A = C
+ }
+ {
+ p0 = dfcmp.eq(B,C) // is C also zero?
+ AH = lsr(AH,#31) // get sign
+ }
+ {
+ BH ^= asl(AH,#31) // form correctly signed zero in B
+ if (!p0) A = C // If C is not zero, return C
+ if (!p0) jumpr r31
+ }
+ // B has correctly signed zero, C is also zero
+.Lzero_plus_zero:
+ {
+ p0 = cmp.eq(B,C) // yes, scalar equals. +0++0 or -0+-0
+ if (p0.new) jumpr:t r31
+ A = B
+ }
+ {
+ TMP = USR
+ }
+ {
+ TMP = extractu(TMP,#2,#SR_ROUND_OFF)
+ A = #0
+ }
+ {
+ p0 = cmp.eq(TMP,#2)
+ if (p0.new) AH = ##0x80000000
+ jumpr r31
+ }
+#undef BTMP
+#undef BTMPH
+#undef BTMPL
+#define CTMP r11:10
+ .falign
+.Lfma_abnormal_c:
+ // We know that AB is normal * normal
+ // C is not normal: zero, subnormal, inf, or NaN.
+ {
+ p0 = dfclass(C,#0x10) // is C NaN?
+ if (p0.new) jump:nt .Lnan
+ if (p0.new) A = C // move NaN to A
+ deallocframe
+ }
+ {
+ p0 = dfclass(C,#0x08) // is C inf?
+ if (p0.new) A = C // return C
+ if (p0.new) jumpr:nt r31
+ }
+ // zero or subnormal
+ // If we have a zero, and we know AB is normal*normal, we can just call normal multiply
+ {
+ p0 = dfclass(C,#0x01) // is C zero?
+ if (p0.new) jump:nt __hexagon_muldf3
+ TMP = #1
+ }
+ // Left with: subnormal
+ // Adjust C and jump back to restart
+ {
+ allocframe(#STACKSPACE) // oops, deallocated above, re-allocate frame
+ CTMP = #0
+ CH = insert(TMP,#EXPBITS,#HI_MANTBITS)
+ jump .Lfma_abnormal_c_restart
+ }
+END(fma)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dffma.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfminmax.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfminmax.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfminmax.S (revision 351984)
@@ -0,0 +1,75 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define A r1:0
+#define B r3:2
+#define ATMP r5:4
+
+
+#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG
+#define END(TAG) .size TAG,.-TAG
+
+// Min and Max return A if B is NaN, or B if A is NaN
+// Otherwise, they return the smaller or bigger value
+//
+// If values are equal, we want to favor -0.0 for min and +0.0 for max.
+
+// Compares always return false for NaN
+// if (isnan(A)) A = B; if (A > B) A = B will only trigger at most one of those options.
+
+ .text
+ .global __hexagon_mindf3
+ .global __hexagon_maxdf3
+ .global fmin
+ .type fmin,@function
+ .global fmax
+ .type fmax,@function
+ .type __hexagon_mindf3,@function
+ .type __hexagon_maxdf3,@function
+ Q6_ALIAS(mindf3)
+ Q6_ALIAS(maxdf3)
+ .p2align 5
+__hexagon_mindf3:
+fmin:
+ {
+ p0 = dfclass(A,#0x10) // If A is a number
+ p1 = dfcmp.gt(A,B) // AND B > A, don't swap
+ ATMP = A
+ }
+ {
+ if (p0) A = B // if A is NaN use B
+ if (p1) A = B // gt is always false if either is NaN
+ p2 = dfcmp.eq(A,B) // if A == B
+ if (!p2.new) jumpr:t r31
+ }
+ // A == B, return A|B to select -0.0 over 0.0
+ {
+ A = or(ATMP,B)
+ jumpr r31
+ }
+END(__hexagon_mindf3)
+ .falign
+__hexagon_maxdf3:
+fmax:
+ {
+ p0 = dfclass(A,#0x10)
+ p1 = dfcmp.gt(B,A)
+ ATMP = A
+ }
+ {
+ if (p0) A = B
+ if (p1) A = B
+ p2 = dfcmp.eq(A,B)
+ if (!p2.new) jumpr:t r31
+ }
+ // A == B, return A&B to select 0.0 over -0.0
+ {
+ A = and(ATMP,B)
+ jumpr r31
+ }
+END(__hexagon_maxdf3)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfminmax.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfmul.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfmul.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfmul.S (revision 351984)
@@ -0,0 +1,413 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Double Precision Multiply
+#define A r1:0
+#define AH r1
+#define AL r0
+#define B r3:2
+#define BH r3
+#define BL r2
+
+#define BTMP r5:4
+#define BTMPH r5
+#define BTMPL r4
+
+#define PP_ODD r7:6
+#define PP_ODD_H r7
+#define PP_ODD_L r6
+
+#define ONE r9:8
+#define S_ONE r8
+#define S_ZERO r9
+
+#define PP_HH r11:10
+#define PP_HH_H r11
+#define PP_HH_L r10
+
+#define ATMP r13:12
+#define ATMPH r13
+#define ATMPL r12
+
+#define PP_LL r15:14
+#define PP_LL_H r15
+#define PP_LL_L r14
+
+#define TMP r28
+
+#define MANTBITS 52
+#define HI_MANTBITS 20
+#define EXPBITS 11
+#define BIAS 1024
+#define MANTISSA_TO_INT_BIAS 52
+
+// Some constant to adjust normalization amount in error code
+// Amount to right shift the partial product to get to a denorm
+#define FUDGE 5
+
+#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG
+#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG
+#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG
+#define END(TAG) .size TAG,.-TAG
+
+#define SR_ROUND_OFF 22
+ .text
+ .global __hexagon_muldf3
+ .type __hexagon_muldf3,@function
+ Q6_ALIAS(muldf3)
+ FAST_ALIAS(muldf3)
+ FAST2_ALIAS(muldf3)
+ .p2align 5
+__hexagon_muldf3:
+ {
+ p0 = dfclass(A,#2)
+ p0 = dfclass(B,#2)
+ ATMP = combine(##0x40000000,#0)
+ }
+ {
+ ATMP = insert(A,#MANTBITS,#EXPBITS-1)
+ BTMP = asl(B,#EXPBITS-1)
+ TMP = #-BIAS
+ ONE = #1
+ }
+ {
+ PP_ODD = mpyu(BTMPL,ATMPH)
+ BTMP = insert(ONE,#2,#62)
+ }
+ // since we know that the MSB of the H registers is zero, we should never carry
+ // H <= 2^31-1. L <= 2^32-1. Therefore, HL <= 2^63-2^32-2^31+1
+ // Adding 2 HLs, we get 2^64-3*2^32+2 maximum.
+ // Therefore, we can add 3 2^32-1 values safely without carry. We only need one.
+ {
+ PP_LL = mpyu(ATMPL,BTMPL)
+ PP_ODD += mpyu(ATMPL,BTMPH)
+ }
+ {
+ PP_ODD += lsr(PP_LL,#32)
+ PP_HH = mpyu(ATMPH,BTMPH)
+ BTMP = combine(##BIAS+BIAS-4,#0)
+ }
+ {
+ PP_HH += lsr(PP_ODD,#32)
+ if (!p0) jump .Lmul_abnormal
+ p1 = cmp.eq(PP_LL_L,#0) // 64 lsb's 0?
+ p1 = cmp.eq(PP_ODD_L,#0) // 64 lsb's 0?
+ }
+
+ // PP_HH can have a maximum of 0x3FFF_FFFF_FFFF_FFFF or thereabouts
+ // PP_HH can have a minimum of 0x1000_0000_0000_0000 or so
+
+#undef PP_ODD
+#undef PP_ODD_H
+#undef PP_ODD_L
+#define EXP10 r7:6
+#define EXP1 r7
+#define EXP0 r6
+ {
+ if (!p1) PP_HH_L = or(PP_HH_L,S_ONE)
+ EXP0 = extractu(AH,#EXPBITS,#HI_MANTBITS)
+ EXP1 = extractu(BH,#EXPBITS,#HI_MANTBITS)
+ }
+ {
+ PP_LL = neg(PP_HH)
+ EXP0 += add(TMP,EXP1)
+ TMP = xor(AH,BH)
+ }
+ {
+ if (!p2.new) PP_HH = PP_LL
+ p2 = cmp.gt(TMP,#-1)
+ p0 = !cmp.gt(EXP0,BTMPH)
+ p0 = cmp.gt(EXP0,BTMPL)
+ if (!p0.new) jump:nt .Lmul_ovf_unf
+ }
+ {
+ A = convert_d2df(PP_HH)
+ EXP0 = add(EXP0,#-BIAS-58)
+ }
+ {
+ AH += asl(EXP0,#HI_MANTBITS)
+ jumpr r31
+ }
+
+ .falign
+.Lpossible_unf:
+ // We end up with a positive exponent
+ // But we may have rounded up to an exponent of 1.
+ // If the exponent is 1, if we rounded up to it
+ // we need to also raise underflow
+ // Fortunately, this is pretty easy to detect, we must have +/- 0x0010_0000_0000_0000
+ // And the PP should also have more than one bit set
+ //
+ // Note: ATMP should have abs(PP_HH)
+ // Note: BTMPL should have 0x7FEFFFFF
+ {
+ p0 = cmp.eq(AL,#0)
+ p0 = bitsclr(AH,BTMPL)
+ if (!p0.new) jumpr:t r31
+ BTMPH = #0x7fff
+ }
+ {
+ p0 = bitsset(ATMPH,BTMPH)
+ BTMPL = USR
+ BTMPH = #0x030
+ }
+ {
+ if (p0) BTMPL = or(BTMPL,BTMPH)
+ }
+ {
+ USR = BTMPL
+ }
+ {
+ p0 = dfcmp.eq(A,A)
+ jumpr r31
+ }
+ .falign
+.Lmul_ovf_unf:
+ {
+ A = convert_d2df(PP_HH)
+ ATMP = abs(PP_HH) // take absolute value
+ EXP1 = add(EXP0,#-BIAS-58)
+ }
+ {
+ AH += asl(EXP1,#HI_MANTBITS)
+ EXP1 = extractu(AH,#EXPBITS,#HI_MANTBITS)
+ BTMPL = ##0x7FEFFFFF
+ }
+ {
+ EXP1 += add(EXP0,##-BIAS-58)
+ //BTMPH = add(clb(ATMP),#-2)
+ BTMPH = #0
+ }
+ {
+ p0 = cmp.gt(EXP1,##BIAS+BIAS-2) // overflow
+ if (p0.new) jump:nt .Lmul_ovf
+ }
+ {
+ p0 = cmp.gt(EXP1,#0)
+ if (p0.new) jump:nt .Lpossible_unf
+ BTMPH = sub(EXP0,BTMPH)
+ TMP = #63 // max amount to shift
+ }
+ // Underflow
+ //
+ // PP_HH has the partial product with sticky LSB.
+ // PP_HH can have a maximum of 0x3FFF_FFFF_FFFF_FFFF or thereabouts
+ // PP_HH can have a minimum of 0x1000_0000_0000_0000 or so
+ // The exponent of PP_HH is in EXP1, which is non-positive (0 or negative)
+ // That's the exponent that happens after the normalization
+ //
+ // EXP0 has the exponent that, when added to the normalized value, is out of range.
+ //
+ // Strategy:
+ //
+ // * Shift down bits, with sticky bit, such that the bits are aligned according
+ // to the LZ count and appropriate exponent, but not all the way to mantissa
+ // field, keep around the last few bits.
+ // * Put a 1 near the MSB
+ // * Check the LSBs for inexact; if inexact also set underflow
+ // * Convert [u]d2df -- will correctly round according to rounding mode
+ // * Replace exponent field with zero
+
+ {
+ BTMPL = #0 // offset for extract
+ BTMPH = sub(#FUDGE,BTMPH) // amount to right shift
+ }
+ {
+ p3 = cmp.gt(PP_HH_H,#-1) // is it positive?
+ BTMPH = min(BTMPH,TMP) // Don't shift more than 63
+ PP_HH = ATMP
+ }
+ {
+ TMP = USR
+ PP_LL = extractu(PP_HH,BTMP)
+ }
+ {
+ PP_HH = asr(PP_HH,BTMPH)
+ BTMPL = #0x0030 // underflow flag
+ AH = insert(S_ZERO,#EXPBITS,#HI_MANTBITS)
+ }
+ {
+ p0 = cmp.gtu(ONE,PP_LL) // Did we extract all zeros?
+ if (!p0.new) PP_HH_L = or(PP_HH_L,S_ONE) // add sticky bit
+ PP_HH_H = setbit(PP_HH_H,#HI_MANTBITS+3) // Add back in a bit so we can use convert instruction
+ }
+ {
+ PP_LL = neg(PP_HH)
+ p1 = bitsclr(PP_HH_L,#0x7) // Are the LSB's clear?
+ if (!p1.new) TMP = or(BTMPL,TMP) // If not, Inexact+Underflow
+ }
+ {
+ if (!p3) PP_HH = PP_LL
+ USR = TMP
+ }
+ {
+ A = convert_d2df(PP_HH) // Do rounding
+ p0 = dfcmp.eq(A,A) // realize exception
+ }
+ {
+ AH = insert(S_ZERO,#EXPBITS-1,#HI_MANTBITS+1) // Insert correct exponent
+ jumpr r31
+ }
+ .falign
+.Lmul_ovf:
+ // We get either max finite value or infinity. Either way, overflow+inexact
+ {
+ TMP = USR
+ ATMP = combine(##0x7fefffff,#-1) // positive max finite
+ A = PP_HH
+ }
+ {
+ PP_LL_L = extractu(TMP,#2,#SR_ROUND_OFF) // rounding bits
+ TMP = or(TMP,#0x28) // inexact + overflow
+ BTMP = combine(##0x7ff00000,#0) // positive infinity
+ }
+ {
+ USR = TMP
+ PP_LL_L ^= lsr(AH,#31) // Does sign match rounding?
+ TMP = PP_LL_L // unmodified rounding mode
+ }
+ {
+ p0 = !cmp.eq(TMP,#1) // If not round-to-zero and
+ p0 = !cmp.eq(PP_LL_L,#2) // Not rounding the other way,
+ if (p0.new) ATMP = BTMP // we should get infinity
+ p0 = dfcmp.eq(A,A) // Realize FP exception if enabled
+ }
+ {
+ A = insert(ATMP,#63,#0) // insert inf/maxfinite, leave sign
+ jumpr r31
+ }
+
+.Lmul_abnormal:
+ {
+ ATMP = extractu(A,#63,#0) // strip off sign
+ BTMP = extractu(B,#63,#0) // strip off sign
+ }
+ {
+ p3 = cmp.gtu(ATMP,BTMP)
+ if (!p3.new) A = B // sort values
+ if (!p3.new) B = A // sort values
+ }
+ {
+ // Any NaN --> NaN, possibly raise invalid if sNaN
+ p0 = dfclass(A,#0x0f) // A not NaN?
+ if (!p0.new) jump:nt .Linvalid_nan
+ if (!p3) ATMP = BTMP
+ if (!p3) BTMP = ATMP
+ }
+ {
+ // Infinity * nonzero number is infinity
+ p1 = dfclass(A,#0x08) // A is infinity
+ p1 = dfclass(B,#0x0e) // B is nonzero
+ }
+ {
+ // Infinity * zero --> NaN, raise invalid
+ // Other zeros return zero
+ p0 = dfclass(A,#0x08) // A is infinity
+ p0 = dfclass(B,#0x01) // B is zero
+ }
+ {
+ if (p1) jump .Ltrue_inf
+ p2 = dfclass(B,#0x01)
+ }
+ {
+ if (p0) jump .Linvalid_zeroinf
+ if (p2) jump .Ltrue_zero // so return zero
+ TMP = ##0x7c000000
+ }
+ // We are left with a normal or subnormal times a subnormal. A > B
+ // If A and B are both very small (exp(a) < BIAS-MANTBITS),
+ // we go to a single sticky bit, which we can round easily.
+ // If A and B might multiply to something bigger, decrease A exponent and increase
+ // B exponent and try again
+ {
+ p0 = bitsclr(AH,TMP)
+ if (p0.new) jump:nt .Lmul_tiny
+ }
+ {
+ TMP = cl0(BTMP)
+ }
+ {
+ TMP = add(TMP,#-EXPBITS)
+ }
+ {
+ BTMP = asl(BTMP,TMP)
+ }
+ {
+ B = insert(BTMP,#63,#0)
+ AH -= asl(TMP,#HI_MANTBITS)
+ }
+ jump __hexagon_muldf3
+.Lmul_tiny:
+ {
+ TMP = USR
+ A = xor(A,B) // get sign bit
+ }
+ {
+ TMP = or(TMP,#0x30) // Inexact + Underflow
+ A = insert(ONE,#63,#0) // put in rounded up value
+ BTMPH = extractu(TMP,#2,#SR_ROUND_OFF) // get rounding mode
+ }
+ {
+ USR = TMP
+ p0 = cmp.gt(BTMPH,#1) // Round towards pos/neg inf?
+ if (!p0.new) AL = #0 // If not, zero
+ BTMPH ^= lsr(AH,#31) // rounding my way --> set LSB
+ }
+ {
+ p0 = cmp.eq(BTMPH,#3) // if rounding towards right inf
+ if (!p0.new) AL = #0 // don't go to zero
+ jumpr r31
+ }
+.Linvalid_zeroinf:
+ {
+ TMP = USR
+ }
+ {
+ A = #-1
+ TMP = or(TMP,#2)
+ }
+ {
+ USR = TMP
+ }
+ {
+ p0 = dfcmp.uo(A,A) // force exception if enabled
+ jumpr r31
+ }
+.Linvalid_nan:
+ {
+ p0 = dfclass(B,#0x0f) // if B is not NaN
+ TMP = convert_df2sf(A) // will generate invalid if sNaN
+ if (p0.new) B = A // make it whatever A is
+ }
+ {
+ BL = convert_df2sf(B) // will generate invalid if sNaN
+ A = #-1
+ jumpr r31
+ }
+ .falign
+.Ltrue_zero:
+ {
+ A = B
+ B = A
+ }
+.Ltrue_inf:
+ {
+ BH = extract(BH,#1,#31)
+ }
+ {
+ AH ^= asl(BH,#31)
+ jumpr r31
+ }
+END(__hexagon_muldf3)
+
+#undef ATMP
+#undef ATMPL
+#undef ATMPH
+#undef BTMP
+#undef BTMPL
+#undef BTMPH
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfmul.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfsqrt.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfsqrt.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfsqrt.S (revision 351984)
@@ -0,0 +1,405 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+// Double Precision square root
+
+#define EXP r28
+
+#define A r1:0
+#define AH r1
+#define AL r0
+
+#define SFSH r3:2
+#define SF_S r3
+#define SF_H r2
+
+#define SFHALF_SONE r5:4
+#define S_ONE r4
+#define SFHALF r5
+#define SF_D r6
+#define SF_E r7
+#define RECIPEST r8
+#define SFRAD r9
+
+#define FRACRAD r11:10
+#define FRACRADH r11
+#define FRACRADL r10
+
+#define ROOT r13:12
+#define ROOTHI r13
+#define ROOTLO r12
+
+#define PROD r15:14
+#define PRODHI r15
+#define PRODLO r14
+
+#define P_TMP p0
+#define P_EXP1 p1
+#define NORMAL p2
+
+#define SF_EXPBITS 8
+#define SF_MANTBITS 23
+
+#define DF_EXPBITS 11
+#define DF_MANTBITS 52
+
+#define DF_BIAS 0x3ff
+
+#define DFCLASS_ZERO 0x01
+#define DFCLASS_NORMAL 0x02
+#define DFCLASS_DENORMAL 0x02
+#define DFCLASS_INFINITE 0x08
+#define DFCLASS_NAN 0x10
+
+#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG; .type __qdsp_##TAG,@function
+#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG; .type __hexagon_fast_##TAG,@function
+#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG; .type __hexagon_fast2_##TAG,@function
+#define END(TAG) .size TAG,.-TAG
+
+ .text
+ .global __hexagon_sqrtdf2
+ .type __hexagon_sqrtdf2,@function
+ .global __hexagon_sqrt
+ .type __hexagon_sqrt,@function
+ Q6_ALIAS(sqrtdf2)
+ Q6_ALIAS(sqrt)
+ FAST_ALIAS(sqrtdf2)
+ FAST_ALIAS(sqrt)
+ FAST2_ALIAS(sqrtdf2)
+ FAST2_ALIAS(sqrt)
+ .type sqrt,@function
+ .p2align 5
+__hexagon_sqrtdf2:
+__hexagon_sqrt:
+ {
+ PROD = extractu(A,#SF_MANTBITS+1,#DF_MANTBITS-SF_MANTBITS)
+ EXP = extractu(AH,#DF_EXPBITS,#DF_MANTBITS-32)
+ SFHALF_SONE = combine(##0x3f000004,#1)
+ }
+ {
+ NORMAL = dfclass(A,#DFCLASS_NORMAL) // Is it normal
+ NORMAL = cmp.gt(AH,#-1) // and positive?
+ if (!NORMAL.new) jump:nt .Lsqrt_abnormal
+ SFRAD = or(SFHALF,PRODLO)
+ }
+#undef NORMAL
+.Ldenormal_restart:
+ {
+ FRACRAD = A
+ SF_E,P_TMP = sfinvsqrta(SFRAD)
+ SFHALF = and(SFHALF,#-16)
+ SFSH = #0
+ }
+#undef A
+#undef AH
+#undef AL
+#define ERROR r1:0
+#define ERRORHI r1
+#define ERRORLO r0
+ // SF_E : reciprocal square root
+ // SF_H : half rsqrt
+ // sf_S : square root
+ // SF_D : error term
+ // SFHALF: 0.5
+ {
+ SF_S += sfmpy(SF_E,SFRAD):lib // s0: root
+ SF_H += sfmpy(SF_E,SFHALF):lib // h0: 0.5*y0. Could also decrement exponent...
+ SF_D = SFHALF
+#undef SFRAD
+#define SHIFTAMT r9
+ SHIFTAMT = and(EXP,#1)
+ }
+ {
+ SF_D -= sfmpy(SF_S,SF_H):lib // d0: 0.5-H*S = 0.5-0.5*~1
+ FRACRADH = insert(S_ONE,#DF_EXPBITS+1,#DF_MANTBITS-32) // replace upper bits with hidden
+ P_EXP1 = cmp.gtu(SHIFTAMT,#0)
+ }
+ {
+ SF_S += sfmpy(SF_S,SF_D):lib // s1: refine sqrt
+ SF_H += sfmpy(SF_H,SF_D):lib // h1: refine half-recip
+ SF_D = SFHALF
+ SHIFTAMT = mux(P_EXP1,#8,#9)
+ }
+ {
+ SF_D -= sfmpy(SF_S,SF_H):lib // d1: error term
+ FRACRAD = asl(FRACRAD,SHIFTAMT) // Move fracrad bits to right place
+ SHIFTAMT = mux(P_EXP1,#3,#2)
+ }
+ {
+ SF_H += sfmpy(SF_H,SF_D):lib // d2: rsqrt
+ // cool trick: half of 1/sqrt(x) has same mantissa as 1/sqrt(x).
+ PROD = asl(FRACRAD,SHIFTAMT) // fracrad<<(2+exp1)
+ }
+ {
+ SF_H = and(SF_H,##0x007fffff)
+ }
+ {
+ SF_H = add(SF_H,##0x00800000 - 3)
+ SHIFTAMT = mux(P_EXP1,#7,#8)
+ }
+ {
+ RECIPEST = asl(SF_H,SHIFTAMT)
+ SHIFTAMT = mux(P_EXP1,#15-(1+1),#15-(1+0))
+ }
+ {
+ ROOT = mpyu(RECIPEST,PRODHI) // root = mpyu_full(recipest,hi(fracrad<<(2+exp1)))
+ }
+
+#undef SFSH // r3:2
+#undef SF_H // r2
+#undef SF_S // r3
+#undef S_ONE // r4
+#undef SFHALF // r5
+#undef SFHALF_SONE // r5:4
+#undef SF_D // r6
+#undef SF_E // r7
+
+#define HL r3:2
+#define LL r5:4
+#define HH r7:6
+
+#undef P_EXP1
+#define P_CARRY0 p1
+#define P_CARRY1 p2
+#define P_CARRY2 p3
+
+ // Iteration 0
+ // Maybe we can save a cycle by starting with ERROR=asl(fracrad), then as we multiply
+ // We can shift and subtract instead of shift and add?
+ {
+ ERROR = asl(FRACRAD,#15)
+ PROD = mpyu(ROOTHI,ROOTHI)
+ P_CARRY0 = cmp.eq(r0,r0)
+ }
+ {
+ ERROR -= asl(PROD,#15)
+ PROD = mpyu(ROOTHI,ROOTLO)
+ P_CARRY1 = cmp.eq(r0,r0)
+ }
+ {
+ ERROR -= lsr(PROD,#16)
+ P_CARRY2 = cmp.eq(r0,r0)
+ }
+ {
+ ERROR = mpyu(ERRORHI,RECIPEST)
+ }
+ {
+ ROOT += lsr(ERROR,SHIFTAMT)
+ SHIFTAMT = add(SHIFTAMT,#16)
+ ERROR = asl(FRACRAD,#31) // for next iter
+ }
+ // Iteration 1
+ {
+ PROD = mpyu(ROOTHI,ROOTHI)
+ ERROR -= mpyu(ROOTHI,ROOTLO) // amount is 31, no shift needed
+ }
+ {
+ ERROR -= asl(PROD,#31)
+ PROD = mpyu(ROOTLO,ROOTLO)
+ }
+ {
+ ERROR -= lsr(PROD,#33)
+ }
+ {
+ ERROR = mpyu(ERRORHI,RECIPEST)
+ }
+ {
+ ROOT += lsr(ERROR,SHIFTAMT)
+ SHIFTAMT = add(SHIFTAMT,#16)
+ ERROR = asl(FRACRAD,#47) // for next iter
+ }
+ // Iteration 2
+ {
+ PROD = mpyu(ROOTHI,ROOTHI)
+ }
+ {
+ ERROR -= asl(PROD,#47)
+ PROD = mpyu(ROOTHI,ROOTLO)
+ }
+ {
+ ERROR -= asl(PROD,#16) // bidir shr 31-47
+ PROD = mpyu(ROOTLO,ROOTLO)
+ }
+ {
+ ERROR -= lsr(PROD,#17) // 64-47
+ }
+ {
+ ERROR = mpyu(ERRORHI,RECIPEST)
+ }
+ {
+ ROOT += lsr(ERROR,SHIFTAMT)
+ }
+#undef ERROR
+#undef PROD
+#undef PRODHI
+#undef PRODLO
+#define REM_HI r15:14
+#define REM_HI_HI r15
+#define REM_LO r1:0
+#undef RECIPEST
+#undef SHIFTAMT
+#define TWOROOT_LO r9:8
+ // Adjust Root
+ {
+ HL = mpyu(ROOTHI,ROOTLO)
+ LL = mpyu(ROOTLO,ROOTLO)
+ REM_HI = #0
+ REM_LO = #0
+ }
+ {
+ HL += lsr(LL,#33)
+ LL += asl(HL,#33)
+ P_CARRY0 = cmp.eq(r0,r0)
+ }
+ {
+ HH = mpyu(ROOTHI,ROOTHI)
+ REM_LO = sub(REM_LO,LL,P_CARRY0):carry
+ TWOROOT_LO = #1
+ }
+ {
+ HH += lsr(HL,#31)
+ TWOROOT_LO += asl(ROOT,#1)
+ }
+#undef HL
+#undef LL
+#define REM_HI_TMP r3:2
+#define REM_HI_TMP_HI r3
+#define REM_LO_TMP r5:4
+ {
+ REM_HI = sub(FRACRAD,HH,P_CARRY0):carry
+ REM_LO_TMP = sub(REM_LO,TWOROOT_LO,P_CARRY1):carry
+#undef FRACRAD
+#undef HH
+#define ZERO r11:10
+#define ONE r7:6
+ ONE = #1
+ ZERO = #0
+ }
+ {
+ REM_HI_TMP = sub(REM_HI,ZERO,P_CARRY1):carry
+ ONE = add(ROOT,ONE)
+ EXP = add(EXP,#-DF_BIAS) // subtract bias --> signed exp
+ }
+ {
+ // If carry set, no borrow: result was still positive
+ if (P_CARRY1) ROOT = ONE
+ if (P_CARRY1) REM_LO = REM_LO_TMP
+ if (P_CARRY1) REM_HI = REM_HI_TMP
+ }
+ {
+ REM_LO_TMP = sub(REM_LO,TWOROOT_LO,P_CARRY2):carry
+ ONE = #1
+ EXP = asr(EXP,#1) // divide signed exp by 2
+ }
+ {
+ REM_HI_TMP = sub(REM_HI,ZERO,P_CARRY2):carry
+ ONE = add(ROOT,ONE)
+ }
+ {
+ if (P_CARRY2) ROOT = ONE
+ if (P_CARRY2) REM_LO = REM_LO_TMP
+ // since tworoot <= 2^32, remhi must be zero
+#undef REM_HI_TMP
+#undef REM_HI_TMP_HI
+#define S_ONE r2
+#define ADJ r3
+ S_ONE = #1
+ }
+ {
+ P_TMP = cmp.eq(REM_LO,ZERO) // is the low part zero
+ if (!P_TMP.new) ROOTLO = or(ROOTLO,S_ONE) // if so, it's exact... hopefully
+ ADJ = cl0(ROOT)
+ EXP = add(EXP,#-63)
+ }
+#undef REM_LO
+#define RET r1:0
+#define RETHI r1
+ {
+ RET = convert_ud2df(ROOT) // set up mantissa, maybe set inexact flag
+ EXP = add(EXP,ADJ) // add back bias
+ }
+ {
+ RETHI += asl(EXP,#DF_MANTBITS-32) // add exponent adjust
+ jumpr r31
+ }
+#undef REM_LO_TMP
+#undef REM_HI_TMP
+#undef REM_HI_TMP_HI
+#undef REM_LO
+#undef REM_HI
+#undef TWOROOT_LO
+
+#undef RET
+#define A r1:0
+#define AH r1
+#define AL r1
+#undef S_ONE
+#define TMP r3:2
+#define TMPHI r3
+#define TMPLO r2
+#undef P_CARRY0
+#define P_NEG p1
+
+
+#define SFHALF r5
+#define SFRAD r9
+.Lsqrt_abnormal:
+ {
+ P_TMP = dfclass(A,#DFCLASS_ZERO) // zero?
+ if (P_TMP.new) jumpr:t r31
+ }
+ {
+ P_TMP = dfclass(A,#DFCLASS_NAN)
+ if (P_TMP.new) jump:nt .Lsqrt_nan
+ }
+ {
+ P_TMP = cmp.gt(AH,#-1)
+ if (!P_TMP.new) jump:nt .Lsqrt_invalid_neg
+ if (!P_TMP.new) EXP = ##0x7F800001 // sNaN
+ }
+ {
+ P_TMP = dfclass(A,#DFCLASS_INFINITE)
+ if (P_TMP.new) jumpr:nt r31
+ }
+ // If we got here, we're denormal
+ // prepare to restart
+ {
+ A = extractu(A,#DF_MANTBITS,#0) // Extract mantissa
+ }
+ {
+ EXP = add(clb(A),#-DF_EXPBITS) // how much to normalize?
+ }
+ {
+ A = asl(A,EXP) // Shift mantissa
+ EXP = sub(#1,EXP) // Form exponent
+ }
+ {
+ AH = insert(EXP,#1,#DF_MANTBITS-32) // insert lsb of exponent
+ }
+ {
+ TMP = extractu(A,#SF_MANTBITS+1,#DF_MANTBITS-SF_MANTBITS) // get sf value (mant+exp1)
+ SFHALF = ##0x3f000004 // form half constant
+ }
+ {
+ SFRAD = or(SFHALF,TMPLO) // form sf value
+ SFHALF = and(SFHALF,#-16)
+ jump .Ldenormal_restart // restart
+ }
+.Lsqrt_nan:
+ {
+ EXP = convert_df2sf(A) // if sNaN, get invalid
+ A = #-1 // qNaN
+ jumpr r31
+ }
+.Lsqrt_invalid_neg:
+ {
+ A = convert_sf2df(EXP) // Invalid,NaNval
+ jumpr r31
+ }
+END(__hexagon_sqrt)
+END(__hexagon_sqrtdf2)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/dfsqrt.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/divdi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/divdi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/divdi3.S (revision 351984)
@@ -0,0 +1,84 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_divdi3
+ {
+ p2 = tstbit(r1,#31)
+ p3 = tstbit(r3,#31)
+ }
+ {
+ r1:0 = abs(r1:0)
+ r3:2 = abs(r3:2)
+ }
+ {
+ r6 = cl0(r1:0) // count leading 0's of dividend (numerator)
+ r7 = cl0(r3:2) // count leading 0's of divisor (denominator)
+ r5:4 = r3:2 // divisor moved into working registers
+ r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder
+ }
+ {
+ p3 = xor(p2,p3)
+ r10 = sub(r7,r6) // left shift count for bit & divisor
+ r1:0 = #0 // initialize quotient to 0
+ r15:14 = #1 // initialize bit to 1
+ }
+ {
+ r11 = add(r10,#1) // loop count is 1 more than shift count
+ r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb
+ r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor
+ }
+ {
+ p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend
+ loop0(1f,r11) // register loop
+ }
+ {
+ if (p0) jump .hexagon_divdi3_return // if divisor > dividend, we're done, so return
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder
+ }
+ {
+ r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder
+ r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8)
+ }
+ {
+ r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8)
+ r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6)
+ }
+ {
+ r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration
+ r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration
+ }:endloop0
+
+.hexagon_divdi3_return:
+ {
+ r3:2 = neg(r1:0)
+ }
+ {
+ r1:0 = vmux(p3,r3:2,r1:0)
+ jumpr r31
+ }
+FUNCTION_END __hexagon_divdi3
+
+ .globl __qdsp_divdi3
+ .set __qdsp_divdi3, __hexagon_divdi3
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/divdi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/divsi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/divsi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/divsi3.S (revision 351984)
@@ -0,0 +1,83 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_divsi3
+ {
+ p0 = cmp.ge(r0,#0)
+ p1 = cmp.ge(r1,#0)
+ r1 = abs(r0)
+ r2 = abs(r1)
+ }
+ {
+ r3 = cl0(r1)
+ r4 = cl0(r2)
+ r5 = sub(r1,r2)
+ p2 = cmp.gtu(r2,r1)
+ }
+#if (__HEXAGON_ARCH__ == 60)
+ {
+ r0 = #0
+ p1 = xor(p0,p1)
+ p0 = cmp.gtu(r2,r5)
+ }
+ if (p2) jumpr r31
+#else
+ {
+ r0 = #0
+ p1 = xor(p0,p1)
+ p0 = cmp.gtu(r2,r5)
+ if (p2) jumpr r31
+ }
+#endif
+ {
+ r0 = mux(p1,#-1,#1)
+ if (p0) jumpr r31
+ r4 = sub(r4,r3)
+ r3 = #1
+ }
+ {
+ r0 = #0
+ r3:2 = vlslw(r3:2,r4)
+ loop0(1f,r4)
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r2,r1)
+ if (!p0.new) r1 = sub(r1,r2)
+ if (!p0.new) r0 = add(r0,r3)
+ r3:2 = vlsrw(r3:2,#1)
+ }:endloop0
+ {
+ p0 = cmp.gtu(r2,r1)
+ if (!p0.new) r0 = add(r0,r3)
+ if (!p1) jumpr r31
+ }
+ {
+ r0 = neg(r0)
+ jumpr r31
+ }
+FUNCTION_END __hexagon_divsi3
+
+ .globl __qdsp_divsi3
+ .set __qdsp_divsi3, __hexagon_divsi3
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/divsi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fabs_opt.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fabs_opt.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fabs_opt.S (revision 351984)
@@ -0,0 +1,36 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+.macro FUNCTION_BEGIN name
+.text
+.p2align 5
+.globl \name
+.type \name, @function
+\name:
+.endm
+
+.macro FUNCTION_END name
+.size \name, . - \name
+.endm
+
+FUNCTION_BEGIN fabs
+ {
+ r1 = clrbit(r1, #31)
+ jumpr r31
+ }
+FUNCTION_END fabs
+
+FUNCTION_BEGIN fabsf
+ {
+ r0 = clrbit(r0, #31)
+ jumpr r31
+ }
+FUNCTION_END fabsf
+
+ .globl fabsl
+ .set fabsl, fabs
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fabs_opt.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath2_dlib_asm.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath2_dlib_asm.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath2_dlib_asm.S (revision 351984)
@@ -0,0 +1,490 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/* ==================================================================== */
+/* FUNCTIONS Optimized double floating point operators */
+/* ==================================================================== */
+/* c = dadd_asm(a, b) */
+/* ==================================================================== *
+fast2_QDOUBLE fast2_dadd(fast2_QDOUBLE a,fast2_QDOUBLE b) {
+ fast2_QDOUBLE c;
+ lint manta = a & MANTMASK;
+ int expa = Q6_R_sxth_R(a) ;
+ lint mantb = b & MANTMASK;
+ int expb = Q6_R_sxth_R(b) ;
+ int exp, expdiff, j, k, hi, lo, cn;
+ lint mant;
+
+ expdiff = (int) Q6_P_vabsdiffh_PP(a, b);
+ expdiff = Q6_R_sxth_R(expdiff) ;
+ if (expdiff > 63) { expdiff = 62;}
+ if (expa > expb) {
+ exp = expa + 1;
+ expa = 1;
+ expb = expdiff + 1;
+ } else {
+ exp = expb + 1;
+ expb = 1;
+ expa = expdiff + 1;
+ }
+ mant = (manta>>expa) + (mantb>>expb);
+
+ hi = (int) (mant>>32);
+ lo = (int) (mant);
+
+ k = Q6_R_normamt_R(hi);
+ if(hi == 0 || hi == -1) k = 31+Q6_R_normamt_R(lo);
+
+ mant = (mant << k);
+ cn = (mant == 0x8000000000000000LL);
+ exp = exp - k + cn;
+
+ if (mant == 0 || mant == -1) exp = 0x8001;
+ c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK);
+ return(c);
+ }
+ * ==================================================================== */
+ .text
+ .global fast2_dadd_asm
+ .type fast2_dadd_asm, @function
+fast2_dadd_asm:
+#define manta R0
+#define mantexpa R1:0
+#define lmanta R1:0
+#define mantb R2
+#define mantexpb R3:2
+#define lmantb R3:2
+#define expa R4
+#define expb R5
+#define mantexpd R7:6
+#define expd R6
+#define exp R8
+#define c63 R9
+#define lmant R1:0
+#define manth R1
+#define mantl R0
+#define minmin R11:10 // exactly 0x000000000000008001LL
+#define minminl R10
+#define k R4
+#define ce P0
+ .falign
+ {
+ mantexpd = VABSDIFFH(mantexpa, mantexpb) //represented as 0x08001LL
+ c63 = #62
+ expa = SXTH(manta)
+ expb = SXTH(mantb)
+ } {
+ expd = SXTH(expd)
+ ce = CMP.GT(expa, expb);
+ if ( ce.new) exp = add(expa, #1)
+ if (!ce.new) exp = add(expb, #1)
+ } {
+ if ( ce) expa = #1
+ if (!ce) expb = #1
+ manta.L = #0
+ expd = MIN(expd, c63)
+ } {
+ if (!ce) expa = add(expd, #1)
+ if ( ce) expb = add(expd, #1)
+ mantb.L = #0
+ minmin = #0
+ } {
+ lmanta = ASR(lmanta, expa)
+ lmantb = ASR(lmantb, expb)
+ } {
+ lmant = add(lmanta, lmantb)
+ minminl.L = #0x8001
+ } {
+ k = clb(lmant)
+ c63 = #58
+ } {
+ k = add(k, #-1)
+ p0 = cmp.gt(k, c63)
+ } {
+ mantexpa = ASL(lmant, k)
+ exp = SUB(exp, k)
+ if(p0) jump .Ldenorma
+ } {
+ manta = insert(exp, #16, #0)
+ jumpr r31
+ }
+.Ldenorma:
+ {
+ mantexpa = minmin
+ jumpr r31
+ }
+/* =================================================================== *
+ fast2_QDOUBLE fast2_dsub(fast2_QDOUBLE a,fast2_QDOUBLE b) {
+ fast2_QDOUBLE c;
+ lint manta = a & MANTMASK;
+ int expa = Q6_R_sxth_R(a) ;
+ lint mantb = b & MANTMASK;
+ int expb = Q6_R_sxth_R(b) ;
+ int exp, expdiff, j, k;
+ lint mant;
+
+ expdiff = (int) Q6_P_vabsdiffh_PP(a, b);
+ expdiff = Q6_R_sxth_R(expdiff) ;
+ if (expdiff > 63) { expdiff = 62;}
+ if (expa > expb) {
+ exp = expa + 1;
+ expa = 1;
+ expb = expdiff + 1;
+ } else {
+ exp = expb + 1;
+ expb = 1;
+ expa = expdiff + 1;
+ }
+ mant = (manta>>expa) - (mantb>>expb);
+ k = Q6_R_clb_P(mant)-1;
+ mant = (mant << k);
+ exp = exp - k;
+ if (mant == 0 || mant == -1) exp = 0x8001;
+ c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK);
+ return(c);
+ }
+ * ==================================================================== */
+ .text
+ .global fast2_dsub_asm
+ .type fast2_dsub_asm, @function
+fast2_dsub_asm:
+
+#define manta R0
+#define mantexpa R1:0
+#define lmanta R1:0
+#define mantb R2
+#define mantexpb R3:2
+#define lmantb R3:2
+#define expa R4
+#define expb R5
+#define mantexpd R7:6
+#define expd R6
+#define exp R8
+#define c63 R9
+#define lmant R1:0
+#define manth R1
+#define mantl R0
+#define minmin R11:10 // exactly 0x000000000000008001LL
+#define minminl R10
+#define k R4
+#define ce P0
+ .falign
+ {
+ mantexpd = VABSDIFFH(mantexpa, mantexpb) //represented as 0x08001LL
+ c63 = #62
+ expa = SXTH(manta)
+ expb = SXTH(mantb)
+ } {
+ expd = SXTH(expd)
+ ce = CMP.GT(expa, expb);
+ if ( ce.new) exp = add(expa, #1)
+ if (!ce.new) exp = add(expb, #1)
+ } {
+ if ( ce) expa = #1
+ if (!ce) expb = #1
+ manta.L = #0
+ expd = MIN(expd, c63)
+ } {
+ if (!ce) expa = add(expd, #1)
+ if ( ce) expb = add(expd, #1)
+ mantb.L = #0
+ minmin = #0
+ } {
+ lmanta = ASR(lmanta, expa)
+ lmantb = ASR(lmantb, expb)
+ } {
+ lmant = sub(lmanta, lmantb)
+ minminl.L = #0x8001
+ } {
+ k = clb(lmant)
+ c63 = #58
+ } {
+ k = add(k, #-1)
+ p0 = cmp.gt(k, c63)
+ } {
+ mantexpa = ASL(lmant, k)
+ exp = SUB(exp, k)
+ if(p0) jump .Ldenorm
+ } {
+ manta = insert(exp, #16, #0)
+ jumpr r31
+ }
+.Ldenorm:
+ {
+ mantexpa = minmin
+ jumpr r31
+ }
+/* ==================================================================== *
+ fast2_QDOUBLE fast2_dmpy(fast2_QDOUBLE a,fast2_QDOUBLE b) {
+ fast2_QDOUBLE c;
+ lint manta = a & MANTMASK;
+ int expa = Q6_R_sxth_R(a) ;
+ lint mantb = b & MANTMASK;
+ int expb = Q6_R_sxth_R(b) ;
+ int exp, k;
+ lint mant;
+ int hia, hib, hi, lo;
+ unsigned int loa, lob;
+
+ hia = (int)(a >> 32);
+ loa = Q6_R_extractu_RII((int)manta, 31, 1);
+ hib = (int)(b >> 32);
+ lob = Q6_R_extractu_RII((int)mantb, 31, 1);
+
+ mant = Q6_P_mpy_RR(hia, lob);
+ mant = Q6_P_mpyacc_RR(mant,hib, loa);
+ mant = (mant >> 30) + (Q6_P_mpy_RR(hia, hib)<<1);
+
+ hi = (int) (mant>>32);
+
+ k = Q6_R_normamt_R(hi);
+ mant = mant << k;
+ exp = expa + expb - k;
+ if (mant == 0 || mant == -1) exp = 0x8001;
+ c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK);
+ return(c);
+ }
+ * ==================================================================== */
+ .text
+ .global fast2_dmpy_asm
+ .type fast2_dmpy_asm, @function
+fast2_dmpy_asm:
+
+#define mantal R0
+#define mantah R1
+#define mantexpa R1:0
+#define mantbl R2
+#define mantbh R3
+#define mantexpb R3:2
+#define expa R4
+#define expb R5
+#define c8001 R12
+#define mantexpd R7:6
+#define mantdh R7
+#define exp R8
+#define lmantc R11:10
+#define kb R9
+#define guard R11
+#define mantal_ R12
+#define mantbl_ R13
+#define min R15:14
+#define minh R15
+
+ .falign
+ {
+ mantbl_= lsr(mantbl, #16)
+ expb = sxth(mantbl)
+ expa = sxth(mantal)
+ mantal_= lsr(mantal, #16)
+ }
+ {
+ lmantc = mpy(mantah, mantbh)
+ mantexpd = mpy(mantah, mantbl_)
+ mantal.L = #0x0
+ min = #0
+ }
+ {
+ lmantc = add(lmantc, lmantc)
+ mantexpd+= mpy(mantbh, mantal_)
+ mantbl.L = #0x0
+ minh.H = #0x8000
+ }
+ {
+ mantexpd = asr(mantexpd, #15)
+ c8001.L = #0x8001
+ p1 = cmp.eq(mantexpa, mantexpb)
+ }
+ {
+ mantexpd = add(mantexpd, lmantc)
+ exp = add(expa, expb)
+ p2 = cmp.eq(mantexpa, min)
+ }
+ {
+ kb = clb(mantexpd)
+ mantexpb = abs(mantexpd)
+ guard = #58
+ }
+ {
+ p1 = and(p1, p2)
+ exp = sub(exp, kb)
+ kb = add(kb, #-1)
+ p0 = cmp.gt(kb, guard)
+ }
+ {
+ exp = add(exp, #1)
+ mantexpa = asl(mantexpd, kb)
+ if(p1) jump .Lsat //rarely happens
+ }
+ {
+ mantal = insert(exp,#16, #0)
+ if(!p0) jumpr r31
+ }
+ {
+ mantal = insert(c8001,#16, #0)
+ jumpr r31
+ }
+.Lsat:
+ {
+ mantexpa = #-1
+ }
+ {
+ mantexpa = lsr(mantexpa, #1)
+ }
+ {
+ mantal = insert(exp,#16, #0)
+ jumpr r31
+ }
+
+/* ==================================================================== *
+ int fast2_qd2f(fast2_QDOUBLE a) {
+ int exp;
+ long long int manta;
+ int ic, rnd, mantb;
+
+ manta = a>>32;
+ exp = Q6_R_sxth_R(a) ;
+ ic = 0x80000000 & manta;
+ manta = Q6_R_abs_R_sat(manta);
+ mantb = (manta + rnd)>>7;
+ rnd = 0x40
+ exp = (exp + 126);
+ if((manta & 0xff) == rnd) rnd = 0x00;
+ if((manta & 0x7fffffc0) == 0x7fffffc0) {
+ manta = 0x0; exp++;
+ } else {
+ manta= mantb & 0x007fffff;
+ }
+ exp = (exp << 23) & 0x7fffffc0;
+ ic = Q6_R_addacc_RR(ic, exp, manta);
+ return (ic);
+ }
+ * ==================================================================== */
+
+ .text
+ .global fast2_qd2f_asm
+ .type fast2_qd2f_asm, @function
+fast2_qd2f_asm:
+#define mantah R1
+#define mantal R0
+#define cff R0
+#define mant R3
+#define expo R4
+#define rnd R5
+#define mask R6
+#define c07f R7
+#define c80 R0
+#define mantb R2
+#define ic R0
+
+ .falign
+ {
+ mant = abs(mantah):sat
+ expo = sxth(mantal)
+ rnd = #0x40
+ mask.L = #0xffc0
+ }
+ {
+ cff = extractu(mant, #8, #0)
+ p2 = cmp.gt(expo, #126)
+ p3 = cmp.ge(expo, #-126)
+ mask.H = #0x7fff
+ }
+ {
+ p1 = cmp.eq(cff,#0x40)
+ if(p1.new) rnd = #0
+ expo = add(expo, #126)
+ if(!p3) jump .Lmin
+ }
+ {
+ p0 = bitsset(mant, mask)
+ c80.L = #0x0000
+ mantb = add(mant, rnd)
+ c07f = lsr(mask, #8)
+ }
+ {
+ if(p0) expo = add(expo, #1)
+ if(p0) mant = #0
+ mantb = lsr(mantb, #7)
+ c80.H = #0x8000
+ }
+ {
+ ic = and(c80, mantah)
+ mask &= asl(expo, #23)
+ if(!p0) mant = and(mantb, c07f)
+ if(p2) jump .Lmax
+ }
+ {
+ ic += add(mask, mant)
+ jumpr r31
+ }
+.Lmax:
+ {
+ ic.L = #0xffff;
+ }
+ {
+ ic.H = #0x7f7f;
+ jumpr r31
+ }
+.Lmin:
+ {
+ ic = #0x0
+ jumpr r31
+ }
+
+/* ==================================================================== *
+fast2_QDOUBLE fast2_f2qd(int ia) {
+ lint exp;
+ lint mant;
+ fast2_QDOUBLE c;
+
+ mant = ((ia << 7) | 0x40000000)&0x7fffff80 ;
+ if (ia & 0x80000000) mant = -mant;
+ exp = ((ia >> 23) & 0xFFLL) - 126;
+ c = (mant<<32) | Q6_R_zxth_R(exp);;
+ return(c);
+}
+ * ==================================================================== */
+ .text
+ .global fast2_f2qd_asm
+ .type fast2_f2qd_asm, @function
+fast2_f2qd_asm:
+#define ia R0
+#define mag R3
+#define mantr R1
+#define expr R0
+#define zero R2
+#define maxneg R5:4
+#define maxnegl R4
+ .falign
+ {
+ mantr = asl(ia, #7)
+ p0 = tstbit(ia, #31)
+ maxneg = #0
+ mag = add(ia,ia)
+ }
+ {
+ mantr = setbit(mantr, #30)
+ expr= extractu(ia,#8,#23)
+ maxnegl.L = #0x8001
+ p1 = cmp.eq(mag, #0)
+ }
+ {
+ mantr= extractu(mantr, #31, #0)
+ expr= add(expr, #-126)
+ zero = #0
+ if(p1) jump .Lminqd
+ }
+ {
+ expr = zxth(expr)
+ if(p0) mantr= sub(zero, mantr)
+ jumpr r31
+ }
+.Lminqd:
+ {
+ R1:0 = maxneg
+ jumpr r31
+ }
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath2_dlib_asm.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath2_ldlib_asm.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath2_ldlib_asm.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath2_ldlib_asm.S (revision 351984)
@@ -0,0 +1,344 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/* ==================================================================== *
+
+fast2_QLDOUBLE fast2_ldadd(fast2_QLDOUBLE a,fast2_QLDOUBLE b) {
+ fast2_QLDOUBLE c;
+ lint manta = a & MANTMASK;
+ int expa = Q6_R_sxth_R(a) ;
+ lint mantb = b & MANTMASK;
+ int expb = Q6_R_sxth_R(b) ;
+ int exp, expdiff, j, k, hi, lo, cn;
+ lint mant;
+
+ expdiff = (int) Q6_P_vabsdiffh_PP(a, b);
+ expdiff = Q6_R_sxth_R(expdiff) ;
+ if (expdiff > 63) { expdiff = 62;}
+ if (expa > expb) {
+ exp = expa + 1;
+ expa = 1;
+ expb = expdiff + 1;
+ } else {
+ exp = expb + 1;
+ expb = 1;
+ expa = expdiff + 1;
+ }
+ mant = (manta>>expa) + (mantb>>expb);
+
+ hi = (int) (mant>>32);
+ lo = (int) (mant);
+
+ k = Q6_R_normamt_R(hi);
+ if(hi == 0 || hi == -1) k = 31+Q6_R_normamt_R(lo);
+
+ mant = (mant << k);
+ cn = (mant == 0x8000000000000000LL);
+ exp = exp - k + cn;
+
+ if (mant == 0 || mant == -1) exp = 0x8001;
+ c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK);
+ return(c);
+ }
+ * ==================================================================== */
+ .text
+ .global fast2_ldadd_asm
+ .type fast2_ldadd_asm, @function
+fast2_ldadd_asm:
+#define manta R1:0
+#define lmanta R1:0
+#define mantb R3:2
+#define lmantb R3:2
+#define expa R4
+#define expb R5
+#define expd R6
+#define exp R8
+#define c63 R9
+#define lmant R1:0
+#define k R4
+#define ce P0
+#define zero R3:2
+ .falign
+ {
+ expa = memw(r29+#8)
+ expb = memw(r29+#24)
+ r7 = r0
+ }
+ {
+ expd = sub(expa, expb):sat
+ ce = CMP.GT(expa, expb);
+ if ( ce.new) exp = add(expa, #1)
+ if (!ce.new) exp = add(expb, #1)
+ } {
+ expd = abs(expd):sat
+ if ( ce) expa = #1
+ if (!ce) expb = #1
+ c63 = #62
+ } {
+ expd = MIN(expd, c63)
+ manta = memd(r29+#0)
+ mantb = memd(r29+#16)
+ } {
+ if (!ce) expa = add(expd, #1)
+ if ( ce) expb = add(expd, #1)
+ } {
+ lmanta = ASR(lmanta, expa)
+ lmantb = ASR(lmantb, expb)
+ } {
+ lmant = add(lmanta, lmantb)
+ zero = #0
+ } {
+ k = clb(lmant)
+ c63.L =#0x0001
+ } {
+ exp -= add(k, #-1) //exp = exp - (k-1)
+ k = add(k, #-1)
+ p0 = cmp.gt(k, #58)
+ c63.H =#0x8000
+ } {
+ if(!p0)memw(r7+#8) = exp
+ lmant = ASL(lmant, k)
+ if(p0) jump .Ldenorma
+ } {
+ memd(r7+#0) = lmant
+ jumpr r31
+ }
+.Ldenorma:
+ memd(r7+#0) = zero
+ {
+ memw(r7+#8) = c63
+ jumpr r31
+ }
+/* =================================================================== *
+ fast2_QLDOUBLE fast2_ldsub(fast2_QLDOUBLE a,fast2_QLDOUBLE b) {
+ fast2_QLDOUBLE c;
+ lint manta = a & MANTMASK;
+ int expa = Q6_R_sxth_R(a) ;
+ lint mantb = b & MANTMASK;
+ int expb = Q6_R_sxth_R(b) ;
+ int exp, expdiff, j, k;
+ lint mant;
+
+ expdiff = (int) Q6_P_vabsdiffh_PP(a, b);
+ expdiff = Q6_R_sxth_R(expdiff) ;
+ if (expdiff > 63) { expdiff = 62;}
+ if (expa > expb) {
+ exp = expa + 1;
+ expa = 1;
+ expb = expdiff + 1;
+ } else {
+ exp = expb + 1;
+ expb = 1;
+ expa = expdiff + 1;
+ }
+ mant = (manta>>expa) - (mantb>>expb);
+ k = Q6_R_clb_P(mant)-1;
+ mant = (mant << k);
+ exp = exp - k;
+ if (mant == 0 || mant == -1) exp = 0x8001;
+ c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK);
+ return(c);
+ }
+ * ==================================================================== */
+ .text
+ .global fast2_ldsub_asm
+ .type fast2_ldsub_asm, @function
+fast2_ldsub_asm:
+#define manta R1:0
+#define lmanta R1:0
+#define mantb R3:2
+#define lmantb R3:2
+#define expa R4
+#define expb R5
+#define expd R6
+#define exp R8
+#define c63 R9
+#define lmant R1:0
+#define k R4
+#define ce P0
+#define zero R3:2
+ .falign
+ {
+ expa = memw(r29+#8)
+ expb = memw(r29+#24)
+ r7 = r0
+ }
+ {
+ expd = sub(expa, expb):sat
+ ce = CMP.GT(expa, expb);
+ if ( ce.new) exp = add(expa, #1)
+ if (!ce.new) exp = add(expb, #1)
+ } {
+ expd = abs(expd):sat
+ if ( ce) expa = #1
+ if (!ce) expb = #1
+ c63 = #62
+ } {
+ expd = min(expd, c63)
+ manta = memd(r29+#0)
+ mantb = memd(r29+#16)
+ } {
+ if (!ce) expa = add(expd, #1)
+ if ( ce) expb = add(expd, #1)
+ } {
+ lmanta = ASR(lmanta, expa)
+ lmantb = ASR(lmantb, expb)
+ } {
+ lmant = sub(lmanta, lmantb)
+ zero = #0
+ } {
+ k = clb(lmant)
+ c63.L =#0x0001
+ } {
+ exp -= add(k, #-1) //exp = exp - (k+1)
+ k = add(k, #-1)
+ p0 = cmp.gt(k, #58)
+ c63.H =#0x8000
+ } {
+ if(!p0)memw(r7+#8) = exp
+ lmant = asl(lmant, k)
+ if(p0) jump .Ldenorma_s
+ } {
+ memd(r7+#0) = lmant
+ jumpr r31
+ }
+.Ldenorma_s:
+ memd(r7+#0) = zero
+ {
+ memw(r7+#8) = c63
+ jumpr r31
+ }
+
+/* ==================================================================== *
+ fast2_QLDOUBLE fast2_ldmpy(fast2_QLDOUBLE a,fast2_QLDOUBLE b) {
+ fast2_QLDOUBLE c;
+ lint manta = a & MANTMASK;
+ int expa = Q6_R_sxth_R(a) ;
+ lint mantb = b & MANTMASK;
+ int expb = Q6_R_sxth_R(b) ;
+ int exp, k;
+ lint mant;
+ int hia, hib, hi, lo;
+ unsigned int loa, lob;
+
+ hia = (int)(a >> 32);
+ loa = Q6_R_extractu_RII((int)manta, 31, 1);
+ hib = (int)(b >> 32);
+ lob = Q6_R_extractu_RII((int)mantb, 31, 1);
+
+ mant = Q6_P_mpy_RR(hia, lob);
+ mant = Q6_P_mpyacc_RR(mant,hib, loa);
+ mant = (mant >> 30) + (Q6_P_mpy_RR(hia, hib)<<1);
+
+ hi = (int) (mant>>32);
+
+ k = Q6_R_normamt_R(hi);
+ mant = mant << k;
+ exp = expa + expb - k;
+ if (mant == 0 || mant == -1) exp = 0x8001;
+ c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK);
+ return(c);
+ }
+ * ==================================================================== */
+ .text
+ .global fast2_ldmpy_asm
+ .type fast2_ldmpy_asm, @function
+fast2_ldmpy_asm:
+
+#define mantxl_ R9
+#define mantxl R14
+#define mantxh R15
+#define mantx R15:14
+#define mantbl R2
+#define mantbl_ R8
+#define mantbh R3
+#define mantb R3:2
+#define expa R4
+#define expb R5
+#define c8001 R8
+#define mantd R7:6
+#define lmantc R11:10
+#define kp R9
+#define min R13:12
+#define minh R13
+#define max R13:12
+#define maxh R13
+#define ret R0
+
+ .falign
+ {
+ mantx = memd(r29+#0)
+ mantb = memd(r29+#16)
+ min = #0
+ }
+ {
+ mantbl_= extractu(mantbl, #31, #1)
+ mantxl_= extractu(mantxl, #31, #1)
+ minh.H = #0x8000
+ }
+ {
+ lmantc = mpy(mantxh, mantbh)
+ mantd = mpy(mantxh, mantbl_)
+ expa = memw(r29+#8)
+ expb = memw(r29+#24)
+ }
+ {
+ lmantc = add(lmantc, lmantc)
+ mantd += mpy(mantbh, mantxl_)
+ }
+ {
+ mantd = asr(mantd, #30)
+ c8001.L = #0x0001
+ p1 = cmp.eq(mantx, mantb)
+ }
+ {
+ mantd = add(mantd, lmantc)
+ expa= add(expa, expb)
+ p2 = cmp.eq(mantb, min)
+ }
+ {
+ kp = clb(mantd)
+ c8001.H = #0x8000
+ p1 = and(p1, p2)
+ }
+ {
+ expa-= add(kp, #-1)
+ kp = add(kp, #-1)
+ if(p1) jump .Lsat
+ }
+ {
+ mantd = asl(mantd, kp)
+ memw(ret+#8) = expa
+ p0 = cmp.gt(kp, #58)
+ if(p0.new) jump:NT .Ldenorm //rarely happens
+ }
+ {
+ memd(ret+#0) = mantd
+ jumpr r31
+ }
+.Lsat:
+ {
+ max = #0
+ expa+= add(kp, #1)
+ }
+ {
+ maxh.H = #0x4000
+ memw(ret+#8) = expa
+ }
+ {
+ memd(ret+#0) = max
+ jumpr r31
+ }
+.Ldenorm:
+ {
+ memw(ret+#8) = c8001
+ mantx = #0
+ }
+ {
+ memd(ret+#0) = mantx
+ jumpr r31
+ }
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath2_ldlib_asm.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath_dlib_asm.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath_dlib_asm.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath_dlib_asm.S (revision 351984)
@@ -0,0 +1,399 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+/* ==================================================================== */
+/* FUNCTIONS Optimized double floating point operators */
+/* ==================================================================== */
+/* c = dadd_asm(a, b) */
+/* ====================================================================
+
+QDOUBLE dadd(QDOUBLE a,QDOUBLE b) {
+ QDOUBLE c;
+ lint manta = a & MANTMASK;
+ int expa = HEXAGON_R_sxth_R(a) ;
+ lint mantb = b & MANTMASK;
+ int expb = HEXAGON_R_sxth_R(b) ;
+ int exp, expdiff, j, k, hi, lo, cn;
+ lint mant;
+
+ expdiff = (int) HEXAGON_P_vabsdiffh_PP(a, b);
+ expdiff = HEXAGON_R_sxth_R(expdiff) ;
+ if (expdiff > 63) { expdiff = 62;}
+ if (expa > expb) {
+ exp = expa + 1;
+ expa = 1;
+ expb = expdiff + 1;
+ } else {
+ exp = expb + 1;
+ expb = 1;
+ expa = expdiff + 1;
+ }
+ mant = (manta>>expa) + (mantb>>expb);
+
+ hi = (int) (mant>>32);
+ lo = (int) (mant);
+
+ k = HEXAGON_R_normamt_R(hi);
+ if(hi == 0 || hi == -1) k = 31+HEXAGON_R_normamt_R(lo);
+
+ mant = (mant << k);
+ cn = (mant == 0x8000000000000000LL);
+ exp = exp - k + cn;
+
+ if (mant == 0 || mant == -1) exp = 0x8001;
+ c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK);
+ return(c);
+ }
+ * ==================================================================== */
+ .text
+ .global dadd_asm
+ .type dadd_asm, @function
+dadd_asm:
+
+#define manta R0
+#define mantexpa R1:0
+#define lmanta R1:0
+#define mantb R2
+#define mantexpb R3:2
+#define lmantb R3:2
+#define expa R4
+#define expb R5
+#define mantexpd R7:6
+#define expd R6
+#define exp R8
+#define c63 R9
+#define lmant R1:0
+#define manth R1
+#define mantl R0
+#define zero R7:6
+#define zerol R6
+#define minus R3:2
+#define minusl R2
+#define maxneg R9
+#define minmin R11:10 // exactly 0x800000000000000000LL
+#define minminh R11
+#define k R4
+#define kl R5
+#define ce P0
+ .falign
+ {
+ mantexpd = VABSDIFFH(mantexpa, mantexpb) //represented as 0x08001LL
+ c63 = #62
+ expa = SXTH(manta)
+ expb = SXTH(mantb)
+ } {
+ expd = SXTH(expd)
+ ce = CMP.GT(expa, expb);
+ if ( ce.new) exp = add(expa, #1)
+ if (!ce.new) exp = add(expb, #1)
+ } {
+ if ( ce) expa = #1
+ if (!ce) expb = #1
+ manta.L = #0
+ expd = MIN(expd, c63)
+ } {
+ if (!ce) expa = add(expd, #1)
+ if ( ce) expb = add(expd, #1)
+ mantb.L = #0
+ zero = #0
+ } {
+ lmanta = ASR(lmanta, expa)
+ lmantb = ASR(lmantb, expb)
+ minmin = #0
+ } {
+ lmant = add(lmanta, lmantb)
+ minus = #-1
+ minminh.H = #0x8000
+ } {
+ k = NORMAMT(manth)
+ kl = NORMAMT(mantl)
+ p0 = cmp.eq(manth, zerol)
+ p1 = cmp.eq(manth, minusl)
+ } {
+ p0 = OR(p0, p1)
+ if(p0.new) k = add(kl, #31)
+ maxneg.H = #0
+ } {
+ mantexpa = ASL(lmant, k)
+ exp = SUB(exp, k)
+ maxneg.L = #0x8001
+ } {
+ p0 = cmp.eq(mantexpa, zero)
+ p1 = cmp.eq(mantexpa, minus)
+ manta.L = #0
+ exp = ZXTH(exp)
+ } {
+ p2 = cmp.eq(mantexpa, minmin) //is result 0x80....0
+ if(p2.new) exp = add(exp, #1)
+ }
+#if (__HEXAGON_ARCH__ == 60)
+ {
+ p0 = OR(p0, p1)
+ if( p0.new) manta = OR(manta,maxneg)
+ if(!p0.new) manta = OR(manta,exp)
+ }
+ jumpr r31
+#else
+ {
+ p0 = OR(p0, p1)
+ if( p0.new) manta = OR(manta,maxneg)
+ if(!p0.new) manta = OR(manta,exp)
+ jumpr r31
+ }
+#endif
+/* =================================================================== *
+ QDOUBLE dsub(QDOUBLE a,QDOUBLE b) {
+ QDOUBLE c;
+ lint manta = a & MANTMASK;
+ int expa = HEXAGON_R_sxth_R(a) ;
+ lint mantb = b & MANTMASK;
+ int expb = HEXAGON_R_sxth_R(b) ;
+ int exp, expdiff, j, k, hi, lo, cn;
+ lint mant;
+
+ expdiff = (int) HEXAGON_P_vabsdiffh_PP(a, b);
+ expdiff = HEXAGON_R_sxth_R(expdiff) ;
+ if (expdiff > 63) { expdiff = 62;}
+ if (expa > expb) {
+ exp = expa + 1;
+ expa = 1;
+ expb = expdiff + 1;
+ } else {
+ exp = expb + 1;
+ expb = 1;
+ expa = expdiff + 1;
+ }
+ mant = (manta>>expa) - (mantb>>expb);
+
+ hi = (int) (mant>>32);
+ lo = (int) (mant);
+
+ k = HEXAGON_R_normamt_R(hi);
+ if(hi == 0 || hi == -1) k = 31+HEXAGON_R_normamt_R(lo);
+
+ mant = (mant << k);
+ cn = (mant == 0x8000000000000000LL);
+ exp = exp - k + cn;
+
+ if (mant == 0 || mant == -1) exp = 0x8001;
+ c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK);
+ return(c);
+ }
+ * ==================================================================== */
+ .text
+ .global dsub_asm
+ .type dsub_asm, @function
+dsub_asm:
+
+#define manta R0
+#define mantexpa R1:0
+#define lmanta R1:0
+#define mantb R2
+#define mantexpb R3:2
+#define lmantb R3:2
+#define expa R4
+#define expb R5
+#define mantexpd R7:6
+#define expd R6
+#define exp R8
+#define c63 R9
+#define lmant R1:0
+#define manth R1
+#define mantl R0
+#define zero R7:6
+#define zerol R6
+#define minus R3:2
+#define minusl R2
+#define maxneg R9
+#define minmin R11:10 // exactly 0x800000000000000000LL
+#define minminh R11
+#define k R4
+#define kl R5
+#define ce P0
+ .falign
+ {
+ mantexpd = VABSDIFFH(mantexpa, mantexpb) //represented as 0x08001LL
+ c63 = #62
+ expa = SXTH(manta)
+ expb = SXTH(mantb)
+ } {
+ expd = SXTH(expd)
+ ce = CMP.GT(expa, expb);
+ if ( ce.new) exp = add(expa, #1)
+ if (!ce.new) exp = add(expb, #1)
+ } {
+ if ( ce) expa = #1
+ if (!ce) expb = #1
+ manta.L = #0
+ expd = MIN(expd, c63)
+ } {
+ if (!ce) expa = add(expd, #1)
+ if ( ce) expb = add(expd, #1)
+ mantb.L = #0
+ zero = #0
+ } {
+ lmanta = ASR(lmanta, expa)
+ lmantb = ASR(lmantb, expb)
+ minmin = #0
+ } {
+ lmant = sub(lmanta, lmantb)
+ minus = #-1
+ minminh.H = #0x8000
+ } {
+ k = NORMAMT(manth)
+ kl = NORMAMT(mantl)
+ p0 = cmp.eq(manth, zerol)
+ p1 = cmp.eq(manth, minusl)
+ } {
+ p0 = OR(p0, p1)
+ if(p0.new) k = add(kl, #31)
+ maxneg.H = #0
+ } {
+ mantexpa = ASL(lmant, k)
+ exp = SUB(exp, k)
+ maxneg.L = #0x8001
+ } {
+ p0 = cmp.eq(mantexpa, zero)
+ p1 = cmp.eq(mantexpa, minus)
+ manta.L = #0
+ exp = ZXTH(exp)
+ } {
+ p2 = cmp.eq(mantexpa, minmin) //is result 0x80....0
+ if(p2.new) exp = add(exp, #1)
+ }
+#if (__HEXAGON_ARCH__ == 60)
+ {
+ p0 = OR(p0, p1)
+ if( p0.new) manta = OR(manta,maxneg)
+ if(!p0.new) manta = OR(manta,exp)
+ }
+ jumpr r31
+#else
+ {
+ p0 = OR(p0, p1)
+ if( p0.new) manta = OR(manta,maxneg)
+ if(!p0.new) manta = OR(manta,exp)
+ jumpr r31
+ }
+#endif
+/* ==================================================================== *
+ QDOUBLE dmpy(QDOUBLE a,QDOUBLE b) {
+ QDOUBLE c;
+ lint manta = a & MANTMASK;
+ int expa = HEXAGON_R_sxth_R(a) ;
+ lint mantb = b & MANTMASK;
+ int expb = HEXAGON_R_sxth_R(b) ;
+ int exp, k;
+ lint mant;
+ int hia, hib, hi, lo;
+ unsigned int loa, lob;
+
+ hia = (int)(a >> 32);
+ loa = HEXAGON_R_extractu_RII((int)manta, 31, 1);
+ hib = (int)(b >> 32);
+ lob = HEXAGON_R_extractu_RII((int)mantb, 31, 1);
+
+ mant = HEXAGON_P_mpy_RR(hia, lob);
+ mant = HEXAGON_P_mpyacc_RR(mant,hib, loa);
+ mant = (mant >> 30) + (HEXAGON_P_mpy_RR(hia, hib)<<1);
+
+ hi = (int) (mant>>32);
+ lo = (int) (mant);
+
+ k = HEXAGON_R_normamt_R(hi);
+ if(hi == 0 || hi == -1) k = 31+HEXAGON_R_normamt_R(lo);
+ mant = mant << k;
+ exp = expa + expb - k;
+ if (mant == 0 || mant == -1) exp = 0x8001;
+ c = (mant & MANTMASK) | (((lint) exp) & EXP_MASK);
+ return(c);
+ }
+ * ==================================================================== */
+ .text
+ .global dmpy_asm
+ .type dmpy_asm, @function
+dmpy_asm:
+
+#define mantal R0
+#define mantah R1
+#define mantexpa R1:0
+#define mantbl R2
+#define mantbh R3
+#define mantexpb R3:2
+#define expa R4
+#define expb R5
+#define mantexpd R7:6
+#define exp R8
+#define lmantc R11:10
+#define mantch R11
+#define mantcl R10
+#define zero0 R7:6
+#define zero0l R6
+#define minus1 R3:2
+#define minus1l R2
+#define maxneg R9
+#define k R4
+#define kl R5
+
+ .falign
+ {
+ mantbl = lsr(mantbl, #16)
+ mantal = lsr(mantal, #16)
+ expa = sxth(mantal)
+ expb = sxth(mantbl)
+ }
+ {
+ lmantc = mpy(mantah, mantbh)
+ mantexpd = mpy(mantah, mantbl)
+ }
+ {
+ lmantc = add(lmantc, lmantc) //<<1
+ mantexpd+= mpy(mantbh, mantal)
+ }
+ {
+ lmantc += asr(mantexpd, #15)
+ exp = add(expa, expb)
+ zero0 = #0
+ minus1 = #-1
+ }
+ {
+ k = normamt(mantch)
+ kl = normamt(mantcl)
+ p0 = cmp.eq(mantch, zero0l)
+ p1 = cmp.eq(mantch, minus1l)
+ }
+ {
+ p0 = or(p0, p1)
+ if(p0.new) k = add(kl, #31)
+ maxneg.H = #0
+ }
+ {
+ mantexpa = asl(lmantc, k)
+ exp = sub(exp, k)
+ maxneg.L = #0x8001
+ }
+ {
+ p0 = cmp.eq(mantexpa, zero0)
+ p1 = cmp.eq(mantexpa, minus1)
+ mantal.L = #0
+ exp = zxth(exp)
+ }
+#if (__HEXAGON_ARCH__ == 60)
+ {
+ p0 = or(p0, p1)
+ if( p0.new) mantal = or(mantal,maxneg)
+ if(!p0.new) mantal = or(mantal,exp)
+ }
+ jumpr r31
+#else
+ {
+ p0 = or(p0, p1)
+ if( p0.new) mantal = or(mantal,maxneg)
+ if(!p0.new) mantal = or(mantal,exp)
+ jumpr r31
+ }
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fastmath_dlib_asm.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fma_opt.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fma_opt.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fma_opt.S (revision 351984)
@@ -0,0 +1,30 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+.macro FUNCTION_BEGIN name
+.text
+.p2align 5
+.globl \name
+.type \name, @function
+\name:
+.endm
+
+.macro FUNCTION_END name
+.size \name, . - \name
+.endm
+
+FUNCTION_BEGIN fmaf
+ r2 += sfmpy(r0, r1)
+ {
+ r0 = r2
+ jumpr r31
+ }
+FUNCTION_END fmaf
+
+ .globl fmal
+ .set fmal, fma
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fma_opt.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fmax_opt.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fmax_opt.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fmax_opt.S (revision 351984)
@@ -0,0 +1,29 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+.macro FUNCTION_BEGIN name
+.text
+.p2align 5
+.globl \name
+.type \name, @function
+\name:
+.endm
+
+.macro FUNCTION_END name
+.size \name, . - \name
+.endm
+
+FUNCTION_BEGIN fmaxf
+ {
+ r0 = sfmax(r0, r1)
+ jumpr r31
+ }
+FUNCTION_END fmaxf
+
+ .globl fmaxl
+ .set fmaxl, fmax
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fmax_opt.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fmin_opt.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fmin_opt.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fmin_opt.S (revision 351984)
@@ -0,0 +1,29 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+.macro FUNCTION_BEGIN name
+.text
+.p2align 5
+.globl \name
+.type \name, @function
+\name:
+.endm
+
+.macro FUNCTION_END name
+.size \name, . - \name
+.endm
+
+FUNCTION_BEGIN fminf
+ {
+ r0 = sfmin(r0, r1)
+ jumpr r31
+ }
+FUNCTION_END fminf
+
+ .globl fminl
+ .set fminl, fmin
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/fmin_opt.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/memcpy_forward_vp4cp4n2.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/memcpy_forward_vp4cp4n2.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/memcpy_forward_vp4cp4n2.S (revision 351984)
@@ -0,0 +1,124 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// An optimized version of a memcpy which is equivalent to the following loop:
+//
+// volatile unsigned *dest;
+// unsigned *src;
+//
+// for (i = 0; i < num_words; ++i)
+// *dest++ = *src++;
+//
+// The corresponding C prototype for this function would be
+// void hexagon_memcpy_forward_vp4cp4n2(volatile unsigned *dest,
+// const unsigned *src,
+// unsigned num_words);
+//
+// *** Both dest and src must be aligned to 32-bit boundaries. ***
+// The code does not perform any runtime checks for this, and will fail
+// in bad ways if this requirement is not met.
+//
+// The "forward" in the name refers to the fact that the function copies
+// the words going forward in memory. It is incorrect to use this function
+// for cases where the original code copied words in any other order.
+//
+// *** This function is only for the use by the compiler. ***
+// The only indended use is for the LLVM compiler to generate calls to
+// this function, when a mem-copy loop, like the one above, is detected.
+
+ .text
+
+// Inputs:
+// r0: dest
+// r1: src
+// r2: num_words
+
+ .globl hexagon_memcpy_forward_vp4cp4n2
+ .balign 32
+ .type hexagon_memcpy_forward_vp4cp4n2,@function
+hexagon_memcpy_forward_vp4cp4n2:
+
+ // Compute r3 to be the number of words remaining in the current page.
+ // At the same time, compute r4 to be the number of 32-byte blocks
+ // remaining in the page (for prefetch).
+ {
+ r3 = sub(##4096, r1)
+ r5 = lsr(r2, #3)
+ }
+ {
+ // The word count before end-of-page is in the 12 lowest bits of r3.
+ // (If the address in r1 was already page-aligned, the bits are 0.)
+ r3 = extractu(r3, #10, #2)
+ r4 = extractu(r3, #7, #5)
+ }
+ {
+ r3 = minu(r2, r3)
+ r4 = minu(r5, r4)
+ }
+ {
+ r4 = or(r4, ##2105344) // 2105344 = 0x202000
+ p0 = cmp.eq(r3, #0)
+ if (p0.new) jump:nt .Lskipprolog
+ }
+ l2fetch(r1, r4)
+ {
+ loop0(.Lprolog, r3)
+ r2 = sub(r2, r3) // r2 = number of words left after the prolog.
+ }
+ .falign
+.Lprolog:
+ {
+ r4 = memw(r1++#4)
+ memw(r0++#4) = r4.new
+ } :endloop0
+.Lskipprolog:
+ {
+ // Let r3 = number of whole pages left (page = 1024 words).
+ r3 = lsr(r2, #10)
+ if (cmp.eq(r3.new, #0)) jump:nt .Lskipmain
+ }
+ {
+ loop1(.Lout, r3)
+ r2 = extractu(r2, #10, #0) // r2 = r2 & 1023
+ r3 = ##2105472 // r3 = 0x202080 (prefetch info)
+ }
+ // Iterate over pages.
+ .falign
+.Lout:
+ // Prefetch each individual page.
+ l2fetch(r1, r3)
+ loop0(.Lpage, #512)
+ .falign
+.Lpage:
+ r5:4 = memd(r1++#8)
+ {
+ memw(r0++#8) = r4
+ memw(r0+#4) = r5
+ } :endloop0:endloop1
+.Lskipmain:
+ {
+ r3 = ##2105344 // r3 = 0x202000 (prefetch info)
+ r4 = lsr(r2, #3) // r4 = number of 32-byte blocks remaining.
+ p0 = cmp.eq(r2, #0)
+ if (p0.new) jumpr:nt r31
+ }
+ {
+ r3 = or(r3, r4)
+ loop0(.Lepilog, r2)
+ }
+ l2fetch(r1, r3)
+ .falign
+.Lepilog:
+ {
+ r4 = memw(r1++#4)
+ memw(r0++#4) = r4.new
+ } :endloop0
+
+ jumpr r31
+
+.size hexagon_memcpy_forward_vp4cp4n2, . - hexagon_memcpy_forward_vp4cp4n2
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/memcpy_forward_vp4cp4n2.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/memcpy_likely_aligned.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/memcpy_likely_aligned.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/memcpy_likely_aligned.S (revision 351984)
@@ -0,0 +1,63 @@
+//===------------------------- memcopy routines ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+FUNCTION_BEGIN __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes
+ {
+ p0 = bitsclr(r1,#7)
+ p0 = bitsclr(r0,#7)
+ if (p0.new) r5:4 = memd(r1)
+ r3 = #-3
+ }
+ {
+ if (!p0) jump .Lmemcpy_call
+ if (p0) memd(r0++#8) = r5:4
+ if (p0) r5:4 = memd(r1+#8)
+ r3 += lsr(r2,#3)
+ }
+ {
+ memd(r0++#8) = r5:4
+ r5:4 = memd(r1+#16)
+ r1 = add(r1,#24)
+ loop0(1f,r3)
+ }
+ .falign
+1:
+ {
+ memd(r0++#8) = r5:4
+ r5:4 = memd(r1++#8)
+ }:endloop0
+ {
+ memd(r0) = r5:4
+ r0 -= add(r2,#-8)
+ jumpr r31
+ }
+FUNCTION_END __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes
+
+.Lmemcpy_call:
+#ifdef __PIC__
+ jump memcpy@PLT
+#else
+ jump memcpy
+#endif
+
+ .globl __qdsp_memcpy_likely_aligned_min32bytes_mult8bytes
+ .set __qdsp_memcpy_likely_aligned_min32bytes_mult8bytes, \
+ __hexagon_memcpy_likely_aligned_min32bytes_mult8bytes
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/memcpy_likely_aligned.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/moddi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/moddi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/moddi3.S (revision 351984)
@@ -0,0 +1,82 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_moddi3
+ {
+ p3 = tstbit(r1,#31)
+ }
+ {
+ r1:0 = abs(r1:0)
+ r3:2 = abs(r3:2)
+ }
+ {
+ r6 = cl0(r1:0) // count leading 0's of dividend (numerator)
+ r7 = cl0(r3:2) // count leading 0's of divisor (denominator)
+ r5:4 = r3:2 // divisor moved into working registers
+ r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder
+ }
+ {
+ r10 = sub(r7,r6) // left shift count for bit & divisor
+ r1:0 = #0 // initialize quotient to 0
+ r15:14 = #1 // initialize bit to 1
+ }
+ {
+ r11 = add(r10,#1) // loop count is 1 more than shift count
+ r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb
+ r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor
+ }
+ {
+ p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend
+ loop0(1f,r11) // register loop
+ }
+ {
+ if (p0) jump .hexagon_moddi3_return // if divisor > dividend, we're done, so return
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder
+ }
+ {
+ r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder
+ r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8)
+ }
+ {
+ r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8)
+ r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6)
+ }
+ {
+ r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration
+ r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration
+ }:endloop0
+
+.hexagon_moddi3_return:
+ {
+ r1:0 = neg(r3:2)
+ }
+ {
+ r1:0 = vmux(p3,r1:0,r3:2)
+ jumpr r31
+ }
+FUNCTION_END __hexagon_moddi3
+
+ .globl __qdsp_moddi3
+ .set __qdsp_moddi3, __hexagon_moddi3
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/moddi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/modsi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/modsi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/modsi3.S (revision 351984)
@@ -0,0 +1,65 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_modsi3
+ {
+ p2 = cmp.ge(r0,#0)
+ r2 = abs(r0)
+ r1 = abs(r1)
+ }
+ {
+ r3 = cl0(r2)
+ r4 = cl0(r1)
+ p0 = cmp.gtu(r1,r2)
+ }
+ {
+ r3 = sub(r4,r3)
+ if (p0) jumpr r31
+ }
+ {
+ p1 = cmp.eq(r3,#0)
+ loop0(1f,r3)
+ r0 = r2
+ r2 = lsl(r1,r3)
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r2,r0)
+ if (!p0.new) r0 = sub(r0,r2)
+ r2 = lsr(r2,#1)
+ if (p1) r1 = #0
+ }:endloop0
+ {
+ p0 = cmp.gtu(r2,r0)
+ if (!p0.new) r0 = sub(r0,r1)
+ if (p2) jumpr r31
+ }
+ {
+ r0 = neg(r0)
+ jumpr r31
+ }
+FUNCTION_END __hexagon_modsi3
+
+ .globl __qdsp_modsi3
+ .set __qdsp_modsi3, __hexagon_modsi3
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/modsi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/sfdiv_opt.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/sfdiv_opt.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/sfdiv_opt.S (revision 351984)
@@ -0,0 +1,65 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG
+#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG
+#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG
+
+FUNCTION_BEGIN __hexagon_divsf3
+ {
+ r2,p0 = sfrecipa(r0,r1)
+ r4 = sffixupd(r0,r1)
+ r3 = ##0x3f800000 // 1.0
+ }
+ {
+ r5 = sffixupn(r0,r1)
+ r3 -= sfmpy(r4,r2):lib // 1-(den/recip) yields error?
+ r6 = ##0x80000000
+ r7 = r3
+ }
+ {
+ r2 += sfmpy(r3,r2):lib
+ r3 = r7
+ r6 = r5
+ r0 = and(r6,r5)
+ }
+ {
+ r3 -= sfmpy(r4,r2):lib
+ r0 += sfmpy(r5,r2):lib
+ }
+ {
+ r2 += sfmpy(r3,r2):lib
+ r6 -= sfmpy(r0,r4):lib
+ }
+ {
+ r0 += sfmpy(r6,r2):lib
+ }
+ {
+ r5 -= sfmpy(r0,r4):lib
+ }
+ {
+ r0 += sfmpy(r5,r2,p0):scale
+ jumpr r31
+ }
+FUNCTION_END __hexagon_divsf3
+
+Q6_ALIAS(divsf3)
+FAST_ALIAS(divsf3)
+FAST2_ALIAS(divsf3)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/sfdiv_opt.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/sfsqrt_opt.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/sfsqrt_opt.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/sfsqrt_opt.S (revision 351984)
@@ -0,0 +1,81 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+#define RIN r0
+#define S r0
+#define H r1
+#define D r2
+#define E r3
+#define HALF r4
+#define R r5
+
+#define Q6_ALIAS(TAG) .global __qdsp_##TAG ; .set __qdsp_##TAG, __hexagon_##TAG
+#define FAST_ALIAS(TAG) .global __hexagon_fast_##TAG ; .set __hexagon_fast_##TAG, __hexagon_##TAG
+#define FAST2_ALIAS(TAG) .global __hexagon_fast2_##TAG ; .set __hexagon_fast2_##TAG, __hexagon_##TAG
+
+FUNCTION_BEGIN __hexagon_sqrtf
+ {
+ E,p0 = sfinvsqrta(RIN)
+ R = sffixupr(RIN)
+ HALF = ##0x3f000000 // 0.5
+ r1:0 = combine(#0,#0) // clear S/H
+ }
+ {
+ S += sfmpy(E,R):lib // S0
+ H += sfmpy(E,HALF):lib // H0
+ D = HALF
+ E = R
+ }
+ {
+ D -= sfmpy(S,H):lib // d0
+ p1 = sfclass(R,#1) // is zero?
+ //E -= sfmpy(S,S):lib // e0
+ }
+ {
+ S += sfmpy(S,D):lib // S1
+ H += sfmpy(H,D):lib // H1
+ D = HALF
+ E = R
+ }
+ {
+ D -= sfmpy(S,H):lib // d0
+ E -= sfmpy(S,S):lib // e0
+ }
+ {
+ S += sfmpy(H,E):lib // S2
+ H += sfmpy(H,D):lib // H2
+ D = HALF
+ E = R
+ }
+ {
+ //D -= sfmpy(S,H):lib // d2
+ E -= sfmpy(S,S):lib // e2
+ if (p1) r0 = or(r0,R) // sqrt(-0.0) = -0.0
+ }
+ {
+ S += sfmpy(H,E,p0):scale // S3
+ jumpr r31
+ }
+
+FUNCTION_END __hexagon_sqrtf
+
+Q6_ALIAS(sqrtf)
+FAST_ALIAS(sqrtf)
+FAST2_ALIAS(sqrtf)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/sfsqrt_opt.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivdi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivdi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivdi3.S (revision 351984)
@@ -0,0 +1,70 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_udivdi3
+ {
+ r6 = cl0(r1:0) // count leading 0's of dividend (numerator)
+ r7 = cl0(r3:2) // count leading 0's of divisor (denominator)
+ r5:4 = r3:2 // divisor moved into working registers
+ r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder
+ }
+ {
+ r10 = sub(r7,r6) // left shift count for bit & divisor
+ r1:0 = #0 // initialize quotient to 0
+ r15:14 = #1 // initialize bit to 1
+ }
+ {
+ r11 = add(r10,#1) // loop count is 1 more than shift count
+ r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb
+ r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor
+ }
+ {
+ p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend
+ loop0(1f,r11) // register loop
+ }
+ {
+ if (p0) jumpr r31 // if divisor > dividend, we're done, so return
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder
+ }
+ {
+ r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder
+ r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8)
+ }
+ {
+ r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8)
+ r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6)
+ }
+ {
+ r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration
+ r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration
+ }:endloop0
+ {
+ jumpr r31 // return
+ }
+FUNCTION_END __hexagon_udivdi3
+
+ .globl __qdsp_udivdi3
+ .set __qdsp_udivdi3, __hexagon_udivdi3
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivdi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivmoddi4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivmoddi4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivmoddi4.S (revision 351984)
@@ -0,0 +1,70 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_udivmoddi4
+ {
+ r6 = cl0(r1:0) // count leading 0's of dividend (numerator)
+ r7 = cl0(r3:2) // count leading 0's of divisor (denominator)
+ r5:4 = r3:2 // divisor moved into working registers
+ r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder
+ }
+ {
+ r10 = sub(r7,r6) // left shift count for bit & divisor
+ r1:0 = #0 // initialize quotient to 0
+ r15:14 = #1 // initialize bit to 1
+ }
+ {
+ r11 = add(r10,#1) // loop count is 1 more than shift count
+ r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb
+ r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor
+ }
+ {
+ p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend
+ loop0(1f,r11) // register loop
+ }
+ {
+ if (p0) jumpr r31 // if divisor > dividend, we're done, so return
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder
+ }
+ {
+ r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder
+ r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8)
+ }
+ {
+ r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8)
+ r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6)
+ }
+ {
+ r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration
+ r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration
+ }:endloop0
+ {
+ jumpr r31 // return
+ }
+FUNCTION_END __hexagon_udivmoddi4
+
+ .globl __qdsp_udivmoddi4
+ .set __qdsp_udivmoddi4, __hexagon_udivmoddi4
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivmoddi4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivmodsi4.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivmodsi4.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivmodsi4.S (revision 351984)
@@ -0,0 +1,59 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_udivmodsi4
+ {
+ r2 = cl0(r0)
+ r3 = cl0(r1)
+ r5:4 = combine(#1,#0)
+ p0 = cmp.gtu(r1,r0)
+ }
+ {
+ r6 = sub(r3,r2)
+ r4 = r1
+ r1:0 = combine(r0,r4)
+ if (p0) jumpr r31
+ }
+ {
+ r3:2 = vlslw(r5:4,r6)
+ loop0(1f,r6)
+ p0 = cmp.eq(r6,#0)
+ if (p0.new) r4 = #0
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r2,r1)
+ if (!p0.new) r1 = sub(r1,r2)
+ if (!p0.new) r0 = add(r0,r3)
+ r3:2 = vlsrw(r3:2,#1)
+ }:endloop0
+ {
+ p0 = cmp.gtu(r2,r1)
+ if (!p0.new) r1 = sub(r1,r4)
+ if (!p0.new) r0 = add(r0,r3)
+ jumpr r31
+ }
+FUNCTION_END __hexagon_udivmodsi4
+
+ .globl __qdsp_udivmodsi4
+ .set __qdsp_udivmodsi4, __hexagon_udivmodsi4
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivmodsi4.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivsi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivsi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivsi3.S (revision 351984)
@@ -0,0 +1,55 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_udivsi3
+ {
+ r2 = cl0(r0)
+ r3 = cl0(r1)
+ r5:4 = combine(#1,#0)
+ p0 = cmp.gtu(r1,r0)
+ }
+ {
+ r6 = sub(r3,r2)
+ r4 = r1
+ r1:0 = combine(r0,r4)
+ if (p0) jumpr r31
+ }
+ {
+ r3:2 = vlslw(r5:4,r6)
+ loop0(1f,r6)
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r2,r1)
+ if (!p0.new) r1 = sub(r1,r2)
+ if (!p0.new) r0 = add(r0,r3)
+ r3:2 = vlsrw(r3:2,#1)
+ }:endloop0
+ {
+ p0 = cmp.gtu(r2,r1)
+ if (!p0.new) r0 = add(r0,r3)
+ jumpr r31
+ }
+FUNCTION_END __hexagon_udivsi3
+
+ .globl __qdsp_udivsi3
+ .set __qdsp_udivsi3, __hexagon_udivsi3
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/udivsi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/umoddi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/umoddi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/umoddi3.S (revision 351984)
@@ -0,0 +1,73 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_umoddi3
+ {
+ r6 = cl0(r1:0) // count leading 0's of dividend (numerator)
+ r7 = cl0(r3:2) // count leading 0's of divisor (denominator)
+ r5:4 = r3:2 // divisor moved into working registers
+ r3:2 = r1:0 // dividend is the initial remainder, r3:2 contains remainder
+ }
+ {
+ r10 = sub(r7,r6) // left shift count for bit & divisor
+ r1:0 = #0 // initialize quotient to 0
+ r15:14 = #1 // initialize bit to 1
+ }
+ {
+ r11 = add(r10,#1) // loop count is 1 more than shift count
+ r13:12 = lsl(r5:4,r10) // shift divisor msb into same bit position as dividend msb
+ r15:14 = lsl(r15:14,r10) // shift the bit left by same amount as divisor
+ }
+ {
+ p0 = cmp.gtu(r5:4,r3:2) // check if divisor > dividend
+ loop0(1f,r11) // register loop
+ }
+ {
+ if (p0) jump .hexagon_umoddi3_return // if divisor > dividend, we're done, so return
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r13:12,r3:2) // set predicate reg if shifted divisor > current remainder
+ }
+ {
+ r7:6 = sub(r3:2, r13:12) // subtract shifted divisor from current remainder
+ r9:8 = add(r1:0, r15:14) // save current quotient to temp (r9:8)
+ }
+ {
+ r1:0 = vmux(p0, r1:0, r9:8) // choose either current quotient or new quotient (r9:8)
+ r3:2 = vmux(p0, r3:2, r7:6) // choose either current remainder or new remainder (r7:6)
+ }
+ {
+ r15:14 = lsr(r15:14, #1) // shift bit right by 1 for next iteration
+ r13:12 = lsr(r13:12, #1) // shift "shifted divisor" right by 1 for next iteration
+ }:endloop0
+
+.hexagon_umoddi3_return:
+ {
+ r1:0 = r3:2
+ jumpr r31
+ }
+FUNCTION_END __hexagon_umoddi3
+
+ .globl __qdsp_umoddi3
+ .set __qdsp_umoddi3, __hexagon_umoddi3
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/umoddi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/umodsi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/umodsi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/umodsi3.S (revision 351984)
@@ -0,0 +1,54 @@
+//===----------------------Hexagon builtin routine ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+
+ .macro FUNCTION_BEGIN name
+ .text
+ .p2align 5
+ .globl \name
+ .type \name, @function
+\name:
+ .endm
+
+ .macro FUNCTION_END name
+ .size \name, . - \name
+ .endm
+
+
+FUNCTION_BEGIN __hexagon_umodsi3
+ {
+ r2 = cl0(r0)
+ r3 = cl0(r1)
+ p0 = cmp.gtu(r1,r0)
+ }
+ {
+ r2 = sub(r3,r2)
+ if (p0) jumpr r31
+ }
+ {
+ loop0(1f,r2)
+ p1 = cmp.eq(r2,#0)
+ r2 = lsl(r1,r2)
+ }
+ .falign
+1:
+ {
+ p0 = cmp.gtu(r2,r0)
+ if (!p0.new) r0 = sub(r0,r2)
+ r2 = lsr(r2,#1)
+ if (p1) r1 = #0
+ }:endloop0
+ {
+ p0 = cmp.gtu(r2,r0)
+ if (!p0.new) r0 = sub(r0,r1)
+ jumpr r31
+ }
+FUNCTION_END __hexagon_umodsi3
+
+ .globl __qdsp_umodsi3
+ .set __qdsp_umodsi3, __hexagon_umodsi3
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/hexagon/umodsi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/ashldi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/ashldi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/ashldi3.S (revision 351984)
@@ -0,0 +1,62 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// di_int __ashldi3(di_int input, int count);
+
+// This routine has some extra memory traffic, loading the 64-bit input via two
+// 32-bit loads, then immediately storing it back to the stack via a single 64-bit
+// store. This is to avoid a write-small, read-large stall.
+// However, if callers of this routine can be safely assumed to store the argument
+// via a 64-bt store, this is unnecessary memory traffic, and should be avoided.
+// It can be turned off by defining the TRUST_CALLERS_USE_64_BIT_STORES macro.
+
+#ifdef __i386__
+#ifdef __SSE2__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__ashldi3)
+ movd 12(%esp), %xmm2 // Load count
+#ifndef TRUST_CALLERS_USE_64_BIT_STORES
+ movd 4(%esp), %xmm0
+ movd 8(%esp), %xmm1
+ punpckldq %xmm1, %xmm0 // Load input
+#else
+ movq 4(%esp), %xmm0 // Load input
+#endif
+ psllq %xmm2, %xmm0 // shift input by count
+ movd %xmm0, %eax
+ psrlq $32, %xmm0
+ movd %xmm0, %edx
+ ret
+END_COMPILERRT_FUNCTION(__ashldi3)
+
+#else // Use GPRs instead of SSE2 instructions, if they aren't available.
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__ashldi3)
+ movl 12(%esp), %ecx // Load count
+ movl 8(%esp), %edx // Load high
+ movl 4(%esp), %eax // Load low
+
+ testl $0x20, %ecx // If count >= 32
+ jnz 1f // goto 1
+ shldl %cl, %eax, %edx // left shift high by count
+ shll %cl, %eax // left shift low by count
+ ret
+
+1: movl %eax, %edx // Move low to high
+ xorl %eax, %eax // clear low
+ shll %cl, %edx // shift high by count - 32
+ ret
+END_COMPILERRT_FUNCTION(__ashldi3)
+
+#endif // __SSE2__
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/ashldi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/ashrdi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/ashrdi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/ashrdi3.S (revision 351984)
@@ -0,0 +1,73 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// di_int __ashrdi3(di_int input, int count);
+
+#ifdef __i386__
+#ifdef __SSE2__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__ashrdi3)
+ movd 12(%esp), %xmm2 // Load count
+ movl 8(%esp), %eax
+#ifndef TRUST_CALLERS_USE_64_BIT_STORES
+ movd 4(%esp), %xmm0
+ movd 8(%esp), %xmm1
+ punpckldq %xmm1, %xmm0 // Load input
+#else
+ movq 4(%esp), %xmm0 // Load input
+#endif
+
+ psrlq %xmm2, %xmm0 // unsigned shift input by count
+
+ testl %eax, %eax // check the sign-bit of the input
+ jns 1f // early out for positive inputs
+
+ // If the input is negative, we need to construct the shifted sign bit
+ // to or into the result, as xmm does not have a signed right shift.
+ pcmpeqb %xmm1, %xmm1 // -1ULL
+ psrlq $58, %xmm1 // 0x3f
+ pandn %xmm1, %xmm2 // 63 - count
+ pcmpeqb %xmm1, %xmm1 // -1ULL
+ psubq %xmm1, %xmm2 // 64 - count
+ psllq %xmm2, %xmm1 // -1 << (64 - count) = leading sign bits
+ por %xmm1, %xmm0
+
+ // Move the result back to the general purpose registers and return
+1: movd %xmm0, %eax
+ psrlq $32, %xmm0
+ movd %xmm0, %edx
+ ret
+END_COMPILERRT_FUNCTION(__ashrdi3)
+
+#else // Use GPRs instead of SSE2 instructions, if they aren't available.
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__ashrdi3)
+ movl 12(%esp), %ecx // Load count
+ movl 8(%esp), %edx // Load high
+ movl 4(%esp), %eax // Load low
+
+ testl $0x20, %ecx // If count >= 32
+ jnz 1f // goto 1
+
+ shrdl %cl, %edx, %eax // right shift low by count
+ sarl %cl, %edx // right shift high by count
+ ret
+
+1: movl %edx, %eax // Move high to low
+ sarl $31, %edx // clear high
+ sarl %cl, %eax // shift low by count - 32
+ ret
+END_COMPILERRT_FUNCTION(__ashrdi3)
+
+#endif // __SSE2__
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/ashrdi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/chkstk.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/chkstk.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/chkstk.S (revision 351984)
@@ -0,0 +1,35 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// _chkstk routine
+// This routine is windows specific
+// http://msdn.microsoft.com/en-us/library/ms648426.aspx
+
+#ifdef __i386__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__chkstk_ms)
+ push %ecx
+ push %eax
+ cmp $0x1000,%eax
+ lea 12(%esp),%ecx
+ jb 1f
+2:
+ sub $0x1000,%ecx
+ test %ecx,(%ecx)
+ sub $0x1000,%eax
+ cmp $0x1000,%eax
+ ja 2b
+1:
+ sub %eax,%ecx
+ test %ecx,(%ecx)
+ pop %eax
+ pop %ecx
+ ret
+END_COMPILERRT_FUNCTION(__chkstk_ms)
+
+#endif // __i386__
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/chkstk.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/chkstk2.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/chkstk2.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/chkstk2.S (revision 351984)
@@ -0,0 +1,41 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+#ifdef __i386__
+
+// _chkstk (_alloca) routine - probe stack between %esp and (%esp-%eax) in 4k increments,
+// then decrement %esp by %eax. Preserves all registers except %esp and flags.
+// This routine is windows specific
+// http://msdn.microsoft.com/en-us/library/ms648426.aspx
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(_alloca) // _chkstk and _alloca are the same function
+DEFINE_COMPILERRT_FUNCTION(__chkstk)
+ push %ecx
+ cmp $0x1000,%eax
+ lea 8(%esp),%ecx // esp before calling this routine -> ecx
+ jb 1f
+2:
+ sub $0x1000,%ecx
+ test %ecx,(%ecx)
+ sub $0x1000,%eax
+ cmp $0x1000,%eax
+ ja 2b
+1:
+ sub %eax,%ecx
+ test %ecx,(%ecx)
+
+ lea 4(%esp),%eax // load pointer to the return address into eax
+ mov %ecx,%esp // install the new top of stack pointer into esp
+ mov -4(%eax),%ecx // restore ecx
+ push (%eax) // push return address onto the stack
+ sub %esp,%eax // restore the original value in eax
+ ret
+END_COMPILERRT_FUNCTION(__chkstk)
+END_COMPILERRT_FUNCTION(_alloca)
+
+#endif // __i386__
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/chkstk2.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/divdi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/divdi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/divdi3.S (revision 351984)
@@ -0,0 +1,166 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// di_int __divdi3(di_int a, di_int b);
+
+// result = a / b.
+// both inputs and the output are 64-bit signed integers.
+// This will do whatever the underlying hardware is set to do on division by zero.
+// No other exceptions are generated, as the divide cannot overflow.
+//
+// This is targeted at 32-bit x86 *only*, as this can be done directly in hardware
+// on x86_64. The performance goal is ~40 cycles per divide, which is faster than
+// currently possible via simulation of integer divides on the x87 unit.
+//
+// Stephen Canon, December 2008
+
+#ifdef __i386__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__divdi3)
+
+// This is currently implemented by wrapping the unsigned divide up in an absolute
+// value, then restoring the correct sign at the end of the computation. This could
+// certainly be improved upon.
+
+ pushl %esi
+ movl 20(%esp), %edx // high word of b
+ movl 16(%esp), %eax // low word of b
+ movl %edx, %ecx
+ sarl $31, %ecx // (b < 0) ? -1 : 0
+ xorl %ecx, %eax
+ xorl %ecx, %edx // EDX:EAX = (b < 0) ? not(b) : b
+ subl %ecx, %eax
+ sbbl %ecx, %edx // EDX:EAX = abs(b)
+ movl %edx, 20(%esp)
+ movl %eax, 16(%esp) // store abs(b) back to stack
+ movl %ecx, %esi // set aside sign of b
+
+ movl 12(%esp), %edx // high word of b
+ movl 8(%esp), %eax // low word of b
+ movl %edx, %ecx
+ sarl $31, %ecx // (a < 0) ? -1 : 0
+ xorl %ecx, %eax
+ xorl %ecx, %edx // EDX:EAX = (a < 0) ? not(a) : a
+ subl %ecx, %eax
+ sbbl %ecx, %edx // EDX:EAX = abs(a)
+ movl %edx, 12(%esp)
+ movl %eax, 8(%esp) // store abs(a) back to stack
+ xorl %ecx, %esi // sign of result = (sign of a) ^ (sign of b)
+
+ pushl %ebx
+ movl 24(%esp), %ebx // Find the index i of the leading bit in b.
+ bsrl %ebx, %ecx // If the high word of b is zero, jump to
+ jz 9f // the code to handle that special case [9].
+
+ // High word of b is known to be non-zero on this branch
+
+ movl 20(%esp), %eax // Construct bhi, containing bits [1+i:32+i] of b
+
+ shrl %cl, %eax // Practically, this means that bhi is given by:
+ shrl %eax //
+ notl %ecx // bhi = (high word of b) << (31 - i) |
+ shll %cl, %ebx // (low word of b) >> (1 + i)
+ orl %eax, %ebx //
+ movl 16(%esp), %edx // Load the high and low words of a, and jump
+ movl 12(%esp), %eax // to [1] if the high word is larger than bhi
+ cmpl %ebx, %edx // to avoid overflowing the upcoming divide.
+ jae 1f
+
+ // High word of a is greater than or equal to (b >> (1 + i)) on this branch
+
+ divl %ebx // eax <-- qs, edx <-- r such that ahi:alo = bs*qs + r
+
+ pushl %edi
+ notl %ecx
+ shrl %eax
+ shrl %cl, %eax // q = qs >> (1 + i)
+ movl %eax, %edi
+ mull 24(%esp) // q*blo
+ movl 16(%esp), %ebx
+ movl 20(%esp), %ecx // ECX:EBX = a
+ subl %eax, %ebx
+ sbbl %edx, %ecx // ECX:EBX = a - q*blo
+ movl 28(%esp), %eax
+ imull %edi, %eax // q*bhi
+ subl %eax, %ecx // ECX:EBX = a - q*b
+ sbbl $0, %edi // decrement q if remainder is negative
+ xorl %edx, %edx
+ movl %edi, %eax
+
+ addl %esi, %eax // Restore correct sign to result
+ adcl %esi, %edx
+ xorl %esi, %eax
+ xorl %esi, %edx
+ popl %edi // Restore callee-save registers
+ popl %ebx
+ popl %esi
+ retl // Return
+
+
+1: // High word of a is greater than or equal to (b >> (1 + i)) on this branch
+
+ subl %ebx, %edx // subtract bhi from ahi so that divide will not
+ divl %ebx // overflow, and find q and r such that
+ //
+ // ahi:alo = (1:q)*bhi + r
+ //
+ // Note that q is a number in (31-i).(1+i)
+ // fix point.
+
+ pushl %edi
+ notl %ecx
+ shrl %eax
+ orl $0x80000000, %eax
+ shrl %cl, %eax // q = (1:qs) >> (1 + i)
+ movl %eax, %edi
+ mull 24(%esp) // q*blo
+ movl 16(%esp), %ebx
+ movl 20(%esp), %ecx // ECX:EBX = a
+ subl %eax, %ebx
+ sbbl %edx, %ecx // ECX:EBX = a - q*blo
+ movl 28(%esp), %eax
+ imull %edi, %eax // q*bhi
+ subl %eax, %ecx // ECX:EBX = a - q*b
+ sbbl $0, %edi // decrement q if remainder is negative
+ xorl %edx, %edx
+ movl %edi, %eax
+
+ addl %esi, %eax // Restore correct sign to result
+ adcl %esi, %edx
+ xorl %esi, %eax
+ xorl %esi, %edx
+ popl %edi // Restore callee-save registers
+ popl %ebx
+ popl %esi
+ retl // Return
+
+
+9: // High word of b is zero on this branch
+
+ movl 16(%esp), %eax // Find qhi and rhi such that
+ movl 20(%esp), %ecx //
+ xorl %edx, %edx // ahi = qhi*b + rhi with 0 ≤ rhi < b
+ divl %ecx //
+ movl %eax, %ebx //
+ movl 12(%esp), %eax // Find qlo such that
+ divl %ecx //
+ movl %ebx, %edx // rhi:alo = qlo*b + rlo with 0 ≤ rlo < b
+
+ addl %esi, %eax // Restore correct sign to result
+ adcl %esi, %edx
+ xorl %esi, %eax
+ xorl %esi, %edx
+ popl %ebx // Restore callee-save registers
+ popl %esi
+ retl // Return
+END_COMPILERRT_FUNCTION(__divdi3)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/divdi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdidf.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdidf.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdidf.S (revision 351984)
@@ -0,0 +1,43 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// double __floatundidf(du_int a);
+
+#ifdef __i386__
+
+CONST_SECTION
+
+ .balign 16
+twop52:
+ .quad 0x4330000000000000
+
+ .balign 16
+twop32:
+ .quad 0x41f0000000000000
+
+#define REL_ADDR(_a) (_a)-0b(%eax)
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatdidf)
+ cvtsi2sd 8(%esp), %xmm1
+ movss 4(%esp), %xmm0 // low 32 bits of a
+ calll 0f
+0: popl %eax
+ mulsd REL_ADDR(twop32), %xmm1 // a_hi as a double (without rounding)
+ movsd REL_ADDR(twop52), %xmm2 // 0x1.0p52
+ subsd %xmm2, %xmm1 // a_hi - 0x1p52 (no rounding occurs)
+ orpd %xmm2, %xmm0 // 0x1p52 + a_lo (no rounding occurs)
+ addsd %xmm1, %xmm0 // a_hi + a_lo (round happens here)
+ movsd %xmm0, 4(%esp)
+ fldl 4(%esp)
+ ret
+END_COMPILERRT_FUNCTION(__floatdidf)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdidf.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdisf.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdisf.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdisf.S (revision 351984)
@@ -0,0 +1,36 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// float __floatdisf(di_int a);
+
+// This routine has some extra memory traffic, loading the 64-bit input via two
+// 32-bit loads, then immediately storing it back to the stack via a single 64-bit
+// store. This is to avoid a write-small, read-large stall.
+// However, if callers of this routine can be safely assumed to store the argument
+// via a 64-bt store, this is unnecessary memory traffic, and should be avoided.
+// It can be turned off by defining the TRUST_CALLERS_USE_64_BIT_STORES macro.
+
+#ifdef __i386__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatdisf)
+#ifndef TRUST_CALLERS_USE_64_BIT_STORES
+ movd 4(%esp), %xmm0
+ movd 8(%esp), %xmm1
+ punpckldq %xmm1, %xmm0
+ movq %xmm0, 4(%esp)
+#endif
+ fildll 4(%esp)
+ fstps 4(%esp)
+ flds 4(%esp)
+ ret
+END_COMPILERRT_FUNCTION(__floatdisf)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdisf.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdixf.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdixf.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdixf.S (revision 351984)
@@ -0,0 +1,34 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// float __floatdixf(di_int a);
+
+#ifdef __i386__
+
+// This routine has some extra memory traffic, loading the 64-bit input via two
+// 32-bit loads, then immediately storing it back to the stack via a single 64-bit
+// store. This is to avoid a write-small, read-large stall.
+// However, if callers of this routine can be safely assumed to store the argument
+// via a 64-bt store, this is unnecessary memory traffic, and should be avoided.
+// It can be turned off by defining the TRUST_CALLERS_USE_64_BIT_STORES macro.
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatdixf)
+#ifndef TRUST_CALLERS_USE_64_BIT_STORES
+ movd 4(%esp), %xmm0
+ movd 8(%esp), %xmm1
+ punpckldq %xmm1, %xmm0
+ movq %xmm0, 4(%esp)
+#endif
+ fildll 4(%esp)
+ ret
+END_COMPILERRT_FUNCTION(__floatdixf)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatdixf.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundidf.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundidf.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundidf.S (revision 351984)
@@ -0,0 +1,54 @@
+//===-- floatundidf.S - Implement __floatundidf for i386 ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatundidf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// double __floatundidf(du_int a);
+
+#ifdef __i386__
+
+CONST_SECTION
+
+ .balign 16
+twop52:
+ .quad 0x4330000000000000
+
+ .balign 16
+twop84_plus_twop52:
+ .quad 0x4530000000100000
+
+ .balign 16
+twop84:
+ .quad 0x4530000000000000
+
+#define REL_ADDR(_a) (_a)-0b(%eax)
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatundidf)
+ movss 8(%esp), %xmm1 // high 32 bits of a
+ movss 4(%esp), %xmm0 // low 32 bits of a
+ calll 0f
+0: popl %eax
+ orpd REL_ADDR(twop84), %xmm1 // 0x1p84 + a_hi (no rounding occurs)
+ subsd REL_ADDR(twop84_plus_twop52), %xmm1 // a_hi - 0x1p52 (no rounding occurs)
+ orpd REL_ADDR(twop52), %xmm0 // 0x1p52 + a_lo (no rounding occurs)
+ addsd %xmm1, %xmm0 // a_hi + a_lo (round happens here)
+ movsd %xmm0, 4(%esp)
+ fldl 4(%esp)
+ ret
+END_COMPILERRT_FUNCTION(__floatundidf)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundidf.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundisf.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundisf.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundisf.S (revision 351984)
@@ -0,0 +1,109 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// float __floatundisf(du_int a);
+
+// Note that there is a hardware instruction, fildll, that does most of what
+// this function needs to do. However, because of our ia32 ABI, it will take
+// a write-small read-large stall, so the software implementation here is
+// actually several cycles faster.
+
+// This is a branch-free implementation. A branchy implementation might be
+// faster for the common case if you know something a priori about the input
+// distribution.
+
+/* branch-free x87 implementation - one cycle slower than without x87.
+
+#ifdef __i386__
+
+CONST_SECTION
+.balign 3
+
+ .quad 0x43f0000000000000
+twop64: .quad 0x0000000000000000
+
+#define TWOp64 twop64-0b(%ecx,%eax,8)
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatundisf)
+ movl 8(%esp), %eax
+ movd 8(%esp), %xmm1
+ movd 4(%esp), %xmm0
+ punpckldq %xmm1, %xmm0
+ calll 0f
+0: popl %ecx
+ sarl $31, %eax
+ movq %xmm0, 4(%esp)
+ fildll 4(%esp)
+ faddl TWOp64
+ fstps 4(%esp)
+ flds 4(%esp)
+ ret
+END_COMPILERRT_FUNCTION(__floatundisf)
+
+#endif // __i386__
+
+*/
+
+// branch-free, x87-free implementation - faster at the expense of code size
+
+#ifdef __i386__
+
+CONST_SECTION
+
+ .balign 16
+twop52:
+ .quad 0x4330000000000000
+ .quad 0x0000000000000fff
+
+ .balign 16
+sticky:
+ .quad 0x0000000000000000
+ .long 0x00000012
+
+ .balign 16
+twelve:
+ .long 0x00000000
+
+#define TWOp52 twop52-0b(%ecx)
+#define STICKY sticky-0b(%ecx,%eax,8)
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatundisf)
+ movl 8(%esp), %eax
+ movd 8(%esp), %xmm1
+ movd 4(%esp), %xmm0
+ punpckldq %xmm1, %xmm0
+
+ calll 0f
+0: popl %ecx
+ shrl %eax // high 31 bits of input as sint32
+ addl $0x7ff80000, %eax
+ sarl $31, %eax // (big input) ? -1 : 0
+ movsd STICKY, %xmm1 // (big input) ? 0xfff : 0
+ movl $12, %edx
+ andl %eax, %edx // (big input) ? 12 : 0
+ movd %edx, %xmm3
+ andpd %xmm0, %xmm1 // (big input) ? input & 0xfff : 0
+ movsd TWOp52, %xmm2 // 0x1.0p52
+ psrlq %xmm3, %xmm0 // (big input) ? input >> 12 : input
+ orpd %xmm2, %xmm1 // 0x1.0p52 + ((big input) ? input & 0xfff : input)
+ orpd %xmm1, %xmm0 // 0x1.0p52 + ((big input) ? (input >> 12 | input & 0xfff) : input)
+ subsd %xmm2, %xmm0 // (double)((big input) ? (input >> 12 | input & 0xfff) : input)
+ cvtsd2ss %xmm0, %xmm0 // (float)((big input) ? (input >> 12 | input & 0xfff) : input)
+ pslld $23, %xmm3
+ paddd %xmm3, %xmm0 // (float)input
+ movd %xmm0, 4(%esp)
+ flds 4(%esp)
+ ret
+END_COMPILERRT_FUNCTION(__floatundisf)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundisf.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundixf.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundixf.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundixf.S (revision 351984)
@@ -0,0 +1,47 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// long double __floatundixf(du_int a);16
+
+#ifdef __i386__
+
+CONST_SECTION
+
+ .balign 16
+twop52:
+ .quad 0x4330000000000000
+
+ .balign 16
+twop84_plus_twop52_neg:
+ .quad 0xc530000000100000
+
+ .balign 16
+twop84:
+ .quad 0x4530000000000000
+
+#define REL_ADDR(_a) (_a)-0b(%eax)
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatundixf)
+ calll 0f
+0: popl %eax
+ movss 8(%esp), %xmm0 // hi 32 bits of input
+ movss 4(%esp), %xmm1 // lo 32 bits of input
+ orpd REL_ADDR(twop84), %xmm0 // 2^84 + hi (as a double)
+ orpd REL_ADDR(twop52), %xmm1 // 2^52 + lo (as a double)
+ addsd REL_ADDR(twop84_plus_twop52_neg), %xmm0 // hi - 2^52 (no rounding occurs)
+ movsd %xmm1, 4(%esp)
+ fldl 4(%esp)
+ movsd %xmm0, 4(%esp)
+ faddl 4(%esp)
+ ret
+END_COMPILERRT_FUNCTION(__floatundixf)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/floatundixf.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/lshrdi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/lshrdi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/lshrdi3.S (revision 351984)
@@ -0,0 +1,63 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// di_int __lshrdi3(di_int input, int count);
+
+// This routine has some extra memory traffic, loading the 64-bit input via two
+// 32-bit loads, then immediately storing it back to the stack via a single 64-bit
+// store. This is to avoid a write-small, read-large stall.
+// However, if callers of this routine can be safely assumed to store the argument
+// via a 64-bt store, this is unnecessary memory traffic, and should be avoided.
+// It can be turned off by defining the TRUST_CALLERS_USE_64_BIT_STORES macro.
+
+#ifdef __i386__
+#ifdef __SSE2__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__lshrdi3)
+ movd 12(%esp), %xmm2 // Load count
+#ifndef TRUST_CALLERS_USE_64_BIT_STORES
+ movd 4(%esp), %xmm0
+ movd 8(%esp), %xmm1
+ punpckldq %xmm1, %xmm0 // Load input
+#else
+ movq 4(%esp), %xmm0 // Load input
+#endif
+ psrlq %xmm2, %xmm0 // shift input by count
+ movd %xmm0, %eax
+ psrlq $32, %xmm0
+ movd %xmm0, %edx
+ ret
+END_COMPILERRT_FUNCTION(__lshrdi3)
+
+#else // Use GPRs instead of SSE2 instructions, if they aren't available.
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__lshrdi3)
+ movl 12(%esp), %ecx // Load count
+ movl 8(%esp), %edx // Load high
+ movl 4(%esp), %eax // Load low
+
+ testl $0x20, %ecx // If count >= 32
+ jnz 1f // goto 1
+
+ shrdl %cl, %edx, %eax // right shift low by count
+ shrl %cl, %edx // right shift high by count
+ ret
+
+1: movl %edx, %eax // Move high to low
+ xorl %edx, %edx // clear high
+ shrl %cl, %eax // shift low by count - 32
+ ret
+END_COMPILERRT_FUNCTION(__lshrdi3)
+
+#endif // __SSE2__
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/lshrdi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/moddi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/moddi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/moddi3.S (revision 351984)
@@ -0,0 +1,170 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// di_int __moddi3(di_int a, di_int b);
+
+// result = remainder of a / b.
+// both inputs and the output are 64-bit signed integers.
+// This will do whatever the underlying hardware is set to do on division by zero.
+// No other exceptions are generated, as the divide cannot overflow.
+//
+// This is targeted at 32-bit x86 *only*, as this can be done directly in hardware
+// on x86_64. The performance goal is ~40 cycles per divide, which is faster than
+// currently possible via simulation of integer divides on the x87 unit.
+//
+
+// Stephen Canon, December 2008
+
+#ifdef __i386__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__moddi3)
+
+// This is currently implemented by wrapping the unsigned modulus up in an absolute
+// value. This could certainly be improved upon.
+
+ pushl %esi
+ movl 20(%esp), %edx // high word of b
+ movl 16(%esp), %eax // low word of b
+ movl %edx, %ecx
+ sarl $31, %ecx // (b < 0) ? -1 : 0
+ xorl %ecx, %eax
+ xorl %ecx, %edx // EDX:EAX = (b < 0) ? not(b) : b
+ subl %ecx, %eax
+ sbbl %ecx, %edx // EDX:EAX = abs(b)
+ movl %edx, 20(%esp)
+ movl %eax, 16(%esp) // store abs(b) back to stack
+
+ movl 12(%esp), %edx // high word of b
+ movl 8(%esp), %eax // low word of b
+ movl %edx, %ecx
+ sarl $31, %ecx // (a < 0) ? -1 : 0
+ xorl %ecx, %eax
+ xorl %ecx, %edx // EDX:EAX = (a < 0) ? not(a) : a
+ subl %ecx, %eax
+ sbbl %ecx, %edx // EDX:EAX = abs(a)
+ movl %edx, 12(%esp)
+ movl %eax, 8(%esp) // store abs(a) back to stack
+ movl %ecx, %esi // set aside sign of a
+
+ pushl %ebx
+ movl 24(%esp), %ebx // Find the index i of the leading bit in b.
+ bsrl %ebx, %ecx // If the high word of b is zero, jump to
+ jz 9f // the code to handle that special case [9].
+
+ // High word of b is known to be non-zero on this branch
+
+ movl 20(%esp), %eax // Construct bhi, containing bits [1+i:32+i] of b
+
+ shrl %cl, %eax // Practically, this means that bhi is given by:
+ shrl %eax //
+ notl %ecx // bhi = (high word of b) << (31 - i) |
+ shll %cl, %ebx // (low word of b) >> (1 + i)
+ orl %eax, %ebx //
+ movl 16(%esp), %edx // Load the high and low words of a, and jump
+ movl 12(%esp), %eax // to [2] if the high word is larger than bhi
+ cmpl %ebx, %edx // to avoid overflowing the upcoming divide.
+ jae 2f
+
+ // High word of a is greater than or equal to (b >> (1 + i)) on this branch
+
+ divl %ebx // eax <-- qs, edx <-- r such that ahi:alo = bs*qs + r
+
+ pushl %edi
+ notl %ecx
+ shrl %eax
+ shrl %cl, %eax // q = qs >> (1 + i)
+ movl %eax, %edi
+ mull 24(%esp) // q*blo
+ movl 16(%esp), %ebx
+ movl 20(%esp), %ecx // ECX:EBX = a
+ subl %eax, %ebx
+ sbbl %edx, %ecx // ECX:EBX = a - q*blo
+ movl 28(%esp), %eax
+ imull %edi, %eax // q*bhi
+ subl %eax, %ecx // ECX:EBX = a - q*b
+
+ jnc 1f // if positive, this is the result.
+ addl 24(%esp), %ebx // otherwise
+ adcl 28(%esp), %ecx // ECX:EBX = a - (q-1)*b = result
+1: movl %ebx, %eax
+ movl %ecx, %edx
+
+ addl %esi, %eax // Restore correct sign to result
+ adcl %esi, %edx
+ xorl %esi, %eax
+ xorl %esi, %edx
+ popl %edi // Restore callee-save registers
+ popl %ebx
+ popl %esi
+ retl // Return
+
+2: // High word of a is greater than or equal to (b >> (1 + i)) on this branch
+
+ subl %ebx, %edx // subtract bhi from ahi so that divide will not
+ divl %ebx // overflow, and find q and r such that
+ //
+ // ahi:alo = (1:q)*bhi + r
+ //
+ // Note that q is a number in (31-i).(1+i)
+ // fix point.
+
+ pushl %edi
+ notl %ecx
+ shrl %eax
+ orl $0x80000000, %eax
+ shrl %cl, %eax // q = (1:qs) >> (1 + i)
+ movl %eax, %edi
+ mull 24(%esp) // q*blo
+ movl 16(%esp), %ebx
+ movl 20(%esp), %ecx // ECX:EBX = a
+ subl %eax, %ebx
+ sbbl %edx, %ecx // ECX:EBX = a - q*blo
+ movl 28(%esp), %eax
+ imull %edi, %eax // q*bhi
+ subl %eax, %ecx // ECX:EBX = a - q*b
+
+ jnc 3f // if positive, this is the result.
+ addl 24(%esp), %ebx // otherwise
+ adcl 28(%esp), %ecx // ECX:EBX = a - (q-1)*b = result
+3: movl %ebx, %eax
+ movl %ecx, %edx
+
+ addl %esi, %eax // Restore correct sign to result
+ adcl %esi, %edx
+ xorl %esi, %eax
+ xorl %esi, %edx
+ popl %edi // Restore callee-save registers
+ popl %ebx
+ popl %esi
+ retl // Return
+
+9: // High word of b is zero on this branch
+
+ movl 16(%esp), %eax // Find qhi and rhi such that
+ movl 20(%esp), %ecx //
+ xorl %edx, %edx // ahi = qhi*b + rhi with 0 ≤ rhi < b
+ divl %ecx //
+ movl %eax, %ebx //
+ movl 12(%esp), %eax // Find rlo such that
+ divl %ecx //
+ movl %edx, %eax // rhi:alo = qlo*b + rlo with 0 ≤ rlo < b
+ popl %ebx //
+ xorl %edx, %edx // and return 0:rlo
+
+ addl %esi, %eax // Restore correct sign to result
+ adcl %esi, %edx
+ xorl %esi, %eax
+ xorl %esi, %edx
+ popl %esi
+ retl // Return
+END_COMPILERRT_FUNCTION(__moddi3)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/moddi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/muldi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/muldi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/muldi3.S (revision 351984)
@@ -0,0 +1,34 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// di_int __muldi3(di_int a, di_int b);
+
+#ifdef __i386__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__muldi3)
+ pushl %ebx
+ movl 16(%esp), %eax // b.lo
+ movl 12(%esp), %ecx // a.hi
+ imull %eax, %ecx // b.lo * a.hi
+
+ movl 8(%esp), %edx // a.lo
+ movl 20(%esp), %ebx // b.hi
+ imull %edx, %ebx // a.lo * b.hi
+
+ mull %edx // EDX:EAX = a.lo * b.lo
+ addl %ecx, %ebx // EBX = (a.lo*b.hi + a.hi*b.lo)
+ addl %ebx, %edx
+
+ popl %ebx
+ retl
+END_COMPILERRT_FUNCTION(__muldi3)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/muldi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/udivdi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/udivdi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/udivdi3.S (revision 351984)
@@ -0,0 +1,119 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// du_int __udivdi3(du_int a, du_int b);
+
+// result = a / b.
+// both inputs and the output are 64-bit unsigned integers.
+// This will do whatever the underlying hardware is set to do on division by zero.
+// No other exceptions are generated, as the divide cannot overflow.
+//
+// This is targeted at 32-bit x86 *only*, as this can be done directly in hardware
+// on x86_64. The performance goal is ~40 cycles per divide, which is faster than
+// currently possible via simulation of integer divides on the x87 unit.
+//
+// Stephen Canon, December 2008
+
+#ifdef __i386__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__udivdi3)
+
+ pushl %ebx
+ movl 20(%esp), %ebx // Find the index i of the leading bit in b.
+ bsrl %ebx, %ecx // If the high word of b is zero, jump to
+ jz 9f // the code to handle that special case [9].
+
+ // High word of b is known to be non-zero on this branch
+
+ movl 16(%esp), %eax // Construct bhi, containing bits [1+i:32+i] of b
+
+ shrl %cl, %eax // Practically, this means that bhi is given by:
+ shrl %eax //
+ notl %ecx // bhi = (high word of b) << (31 - i) |
+ shll %cl, %ebx // (low word of b) >> (1 + i)
+ orl %eax, %ebx //
+ movl 12(%esp), %edx // Load the high and low words of a, and jump
+ movl 8(%esp), %eax // to [1] if the high word is larger than bhi
+ cmpl %ebx, %edx // to avoid overflowing the upcoming divide.
+ jae 1f
+
+ // High word of a is greater than or equal to (b >> (1 + i)) on this branch
+
+ divl %ebx // eax <-- qs, edx <-- r such that ahi:alo = bs*qs + r
+
+ pushl %edi
+ notl %ecx
+ shrl %eax
+ shrl %cl, %eax // q = qs >> (1 + i)
+ movl %eax, %edi
+ mull 20(%esp) // q*blo
+ movl 12(%esp), %ebx
+ movl 16(%esp), %ecx // ECX:EBX = a
+ subl %eax, %ebx
+ sbbl %edx, %ecx // ECX:EBX = a - q*blo
+ movl 24(%esp), %eax
+ imull %edi, %eax // q*bhi
+ subl %eax, %ecx // ECX:EBX = a - q*b
+ sbbl $0, %edi // decrement q if remainder is negative
+ xorl %edx, %edx
+ movl %edi, %eax
+ popl %edi
+ popl %ebx
+ retl
+
+
+1: // High word of a is greater than or equal to (b >> (1 + i)) on this branch
+
+ subl %ebx, %edx // subtract bhi from ahi so that divide will not
+ divl %ebx // overflow, and find q and r such that
+ //
+ // ahi:alo = (1:q)*bhi + r
+ //
+ // Note that q is a number in (31-i).(1+i)
+ // fix point.
+
+ pushl %edi
+ notl %ecx
+ shrl %eax
+ orl $0x80000000, %eax
+ shrl %cl, %eax // q = (1:qs) >> (1 + i)
+ movl %eax, %edi
+ mull 20(%esp) // q*blo
+ movl 12(%esp), %ebx
+ movl 16(%esp), %ecx // ECX:EBX = a
+ subl %eax, %ebx
+ sbbl %edx, %ecx // ECX:EBX = a - q*blo
+ movl 24(%esp), %eax
+ imull %edi, %eax // q*bhi
+ subl %eax, %ecx // ECX:EBX = a - q*b
+ sbbl $0, %edi // decrement q if remainder is negative
+ xorl %edx, %edx
+ movl %edi, %eax
+ popl %edi
+ popl %ebx
+ retl
+
+
+9: // High word of b is zero on this branch
+
+ movl 12(%esp), %eax // Find qhi and rhi such that
+ movl 16(%esp), %ecx //
+ xorl %edx, %edx // ahi = qhi*b + rhi with 0 ≤ rhi < b
+ divl %ecx //
+ movl %eax, %ebx //
+ movl 8(%esp), %eax // Find qlo such that
+ divl %ecx //
+ movl %ebx, %edx // rhi:alo = qlo*b + rlo with 0 ≤ rlo < b
+ popl %ebx //
+ retl // and return qhi:qlo
+END_COMPILERRT_FUNCTION(__udivdi3)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/udivdi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/umoddi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/umoddi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/umoddi3.S (revision 351984)
@@ -0,0 +1,130 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// du_int __umoddi3(du_int a, du_int b);
+
+// result = remainder of a / b.
+// both inputs and the output are 64-bit unsigned integers.
+// This will do whatever the underlying hardware is set to do on division by zero.
+// No other exceptions are generated, as the divide cannot overflow.
+//
+// This is targeted at 32-bit x86 *only*, as this can be done directly in hardware
+// on x86_64. The performance goal is ~40 cycles per divide, which is faster than
+// currently possible via simulation of integer divides on the x87 unit.
+//
+
+// Stephen Canon, December 2008
+
+#ifdef __i386__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__umoddi3)
+
+ pushl %ebx
+ movl 20(%esp), %ebx // Find the index i of the leading bit in b.
+ bsrl %ebx, %ecx // If the high word of b is zero, jump to
+ jz 9f // the code to handle that special case [9].
+
+ // High word of b is known to be non-zero on this branch
+
+ movl 16(%esp), %eax // Construct bhi, containing bits [1+i:32+i] of b
+
+ shrl %cl, %eax // Practically, this means that bhi is given by:
+ shrl %eax //
+ notl %ecx // bhi = (high word of b) << (31 - i) |
+ shll %cl, %ebx // (low word of b) >> (1 + i)
+ orl %eax, %ebx //
+ movl 12(%esp), %edx // Load the high and low words of a, and jump
+ movl 8(%esp), %eax // to [2] if the high word is larger than bhi
+ cmpl %ebx, %edx // to avoid overflowing the upcoming divide.
+ jae 2f
+
+ // High word of a is greater than or equal to (b >> (1 + i)) on this branch
+
+ divl %ebx // eax <-- qs, edx <-- r such that ahi:alo = bs*qs + r
+
+ pushl %edi
+ notl %ecx
+ shrl %eax
+ shrl %cl, %eax // q = qs >> (1 + i)
+ movl %eax, %edi
+ mull 20(%esp) // q*blo
+ movl 12(%esp), %ebx
+ movl 16(%esp), %ecx // ECX:EBX = a
+ subl %eax, %ebx
+ sbbl %edx, %ecx // ECX:EBX = a - q*blo
+ movl 24(%esp), %eax
+ imull %edi, %eax // q*bhi
+ subl %eax, %ecx // ECX:EBX = a - q*b
+
+ jnc 1f // if positive, this is the result.
+ addl 20(%esp), %ebx // otherwise
+ adcl 24(%esp), %ecx // ECX:EBX = a - (q-1)*b = result
+1: movl %ebx, %eax
+ movl %ecx, %edx
+
+ popl %edi
+ popl %ebx
+ retl
+
+
+2: // High word of a is greater than or equal to (b >> (1 + i)) on this branch
+
+ subl %ebx, %edx // subtract bhi from ahi so that divide will not
+ divl %ebx // overflow, and find q and r such that
+ //
+ // ahi:alo = (1:q)*bhi + r
+ //
+ // Note that q is a number in (31-i).(1+i)
+ // fix point.
+
+ pushl %edi
+ notl %ecx
+ shrl %eax
+ orl $0x80000000, %eax
+ shrl %cl, %eax // q = (1:qs) >> (1 + i)
+ movl %eax, %edi
+ mull 20(%esp) // q*blo
+ movl 12(%esp), %ebx
+ movl 16(%esp), %ecx // ECX:EBX = a
+ subl %eax, %ebx
+ sbbl %edx, %ecx // ECX:EBX = a - q*blo
+ movl 24(%esp), %eax
+ imull %edi, %eax // q*bhi
+ subl %eax, %ecx // ECX:EBX = a - q*b
+
+ jnc 3f // if positive, this is the result.
+ addl 20(%esp), %ebx // otherwise
+ adcl 24(%esp), %ecx // ECX:EBX = a - (q-1)*b = result
+3: movl %ebx, %eax
+ movl %ecx, %edx
+
+ popl %edi
+ popl %ebx
+ retl
+
+
+
+9: // High word of b is zero on this branch
+
+ movl 12(%esp), %eax // Find qhi and rhi such that
+ movl 16(%esp), %ecx //
+ xorl %edx, %edx // ahi = qhi*b + rhi with 0 ≤ rhi < b
+ divl %ecx //
+ movl %eax, %ebx //
+ movl 8(%esp), %eax // Find rlo such that
+ divl %ecx //
+ movl %edx, %eax // rhi:alo = qlo*b + rlo with 0 ≤ rlo < b
+ popl %ebx //
+ xorl %edx, %edx // and return 0:rlo
+ retl //
+END_COMPILERRT_FUNCTION(__umoddi3)
+
+#endif // __i386__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/i386/umoddi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_endianness.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_endianness.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_endianness.h (revision 351984)
@@ -0,0 +1,114 @@
+//===-- int_endianness.h - configuration header for compiler-rt -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a configuration header for compiler-rt.
+// This file is not part of the interface of this library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INT_ENDIANNESS_H
+#define INT_ENDIANNESS_H
+
+#if defined(__BYTE_ORDER__) && defined(__ORDER_BIG_ENDIAN__) && \
+ defined(__ORDER_LITTLE_ENDIAN__)
+
+// Clang and GCC provide built-in endianness definitions.
+#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif // __BYTE_ORDER__
+
+#else // Compilers other than Clang or GCC.
+
+#if defined(__SVR4) && defined(__sun)
+#include <sys/byteorder.h>
+
+#if defined(_BIG_ENDIAN)
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif defined(_LITTLE_ENDIAN)
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#else // !_LITTLE_ENDIAN
+#error "unknown endianness"
+#endif // !_LITTLE_ENDIAN
+
+#endif // Solaris and AuroraUX.
+
+// ..
+
+#if defined(__FreeBSD__) || defined(__NetBSD__) || defined(__DragonFly__) || \
+ defined(__minix)
+#include <sys/endian.h>
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif // _BYTE_ORDER
+
+#endif // *BSD
+
+#if defined(__OpenBSD__)
+#include <machine/endian.h>
+
+#if _BYTE_ORDER == _BIG_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#elif _BYTE_ORDER == _LITTLE_ENDIAN
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif // _BYTE_ORDER
+
+#endif // OpenBSD
+
+// ..
+
+// Mac OSX has __BIG_ENDIAN__ or __LITTLE_ENDIAN__ automatically set by the
+// compiler (at least with GCC)
+#if defined(__APPLE__) || defined(__ellcc__)
+
+#ifdef __BIG_ENDIAN__
+#if __BIG_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 0
+#define _YUGA_BIG_ENDIAN 1
+#endif
+#endif // __BIG_ENDIAN__
+
+#ifdef __LITTLE_ENDIAN__
+#if __LITTLE_ENDIAN__
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+#endif
+#endif // __LITTLE_ENDIAN__
+
+#endif // Mac OSX
+
+// ..
+
+#if defined(_WIN32)
+
+#define _YUGA_LITTLE_ENDIAN 1
+#define _YUGA_BIG_ENDIAN 0
+
+#endif // Windows
+
+#endif // Clang or GCC.
+
+// .
+
+#if !defined(_YUGA_LITTLE_ENDIAN) || !defined(_YUGA_BIG_ENDIAN)
+#error Unable to determine endian
+#endif // Check we found an endianness correctly.
+
+#endif // INT_ENDIANNESS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_endianness.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_lib.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_lib.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_lib.h (revision 351984)
@@ -0,0 +1,141 @@
+//===-- int_lib.h - configuration header for compiler-rt -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a configuration header for compiler-rt.
+// This file is not part of the interface of this library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INT_LIB_H
+#define INT_LIB_H
+
+// Assumption: Signed integral is 2's complement.
+// Assumption: Right shift of signed negative is arithmetic shift.
+// Assumption: Endianness is little or big (not mixed).
+
+// ABI macro definitions
+
+#if __ARM_EABI__
+#ifdef COMPILER_RT_ARMHF_TARGET
+#define COMPILER_RT_ABI
+#else
+#define COMPILER_RT_ABI __attribute__((__pcs__("aapcs")))
+#endif
+#else
+#define COMPILER_RT_ABI
+#endif
+
+#define AEABI_RTABI __attribute__((__pcs__("aapcs")))
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define ALWAYS_INLINE __forceinline
+#define NOINLINE __declspec(noinline)
+#define NORETURN __declspec(noreturn)
+#define UNUSED
+#else
+#define ALWAYS_INLINE __attribute__((always_inline))
+#define NOINLINE __attribute__((noinline))
+#define NORETURN __attribute__((noreturn))
+#define UNUSED __attribute__((unused))
+#endif
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+#define SYMBOL_NAME(name) XSTR(__USER_LABEL_PREFIX__) #name
+
+#if defined(__ELF__) || defined(__MINGW32__) || defined(__wasm__)
+#define COMPILER_RT_ALIAS(name, aliasname) \
+ COMPILER_RT_ABI __typeof(name) aliasname __attribute__((__alias__(#name)));
+#elif defined(__APPLE__)
+#define COMPILER_RT_ALIAS(name, aliasname) \
+ __asm__(".globl " SYMBOL_NAME(aliasname)); \
+ __asm__(SYMBOL_NAME(aliasname) " = " SYMBOL_NAME(name)); \
+ COMPILER_RT_ABI __typeof(name) aliasname;
+#elif defined(_WIN32)
+#define COMPILER_RT_ALIAS(name, aliasname)
+#else
+#error Unsupported target
+#endif
+
+#if defined(__NetBSD__) && (defined(_KERNEL) || defined(_STANDALONE))
+//
+// Kernel and boot environment can't use normal headers,
+// so use the equivalent system headers.
+//
+#include <machine/limits.h>
+#include <sys/stdint.h>
+#include <sys/types.h>
+#else
+// Include the standard compiler builtin headers we use functionality from.
+#include <float.h>
+#include <limits.h>
+#include <stdbool.h>
+#include <stdint.h>
+#endif
+
+// Include the commonly used internal type definitions.
+#include "int_types.h"
+
+// Include internal utility function declarations.
+#include "int_util.h"
+
+COMPILER_RT_ABI si_int __paritysi2(si_int a);
+COMPILER_RT_ABI si_int __paritydi2(di_int a);
+
+COMPILER_RT_ABI di_int __divdi3(di_int a, di_int b);
+COMPILER_RT_ABI si_int __divsi3(si_int a, si_int b);
+COMPILER_RT_ABI su_int __udivsi3(su_int n, su_int d);
+
+COMPILER_RT_ABI su_int __udivmodsi4(su_int a, su_int b, su_int *rem);
+COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem);
+#ifdef CRT_HAS_128BIT
+COMPILER_RT_ABI si_int __clzti2(ti_int a);
+COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int *rem);
+#endif
+
+// Definitions for builtins unavailable on MSVC
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <intrin.h>
+
+uint32_t __inline __builtin_ctz(uint32_t value) {
+ unsigned long trailing_zero = 0;
+ if (_BitScanForward(&trailing_zero, value))
+ return trailing_zero;
+ return 32;
+}
+
+uint32_t __inline __builtin_clz(uint32_t value) {
+ unsigned long leading_zero = 0;
+ if (_BitScanReverse(&leading_zero, value))
+ return 31 - leading_zero;
+ return 32;
+}
+
+#if defined(_M_ARM) || defined(_M_X64)
+uint32_t __inline __builtin_clzll(uint64_t value) {
+ unsigned long leading_zero = 0;
+ if (_BitScanReverse64(&leading_zero, value))
+ return 63 - leading_zero;
+ return 64;
+}
+#else
+uint32_t __inline __builtin_clzll(uint64_t value) {
+ if (value == 0)
+ return 64;
+ uint32_t msh = (uint32_t)(value >> 32);
+ uint32_t lsh = (uint32_t)(value & 0xFFFFFFFF);
+ if (msh != 0)
+ return __builtin_clz(msh);
+ return 32 + __builtin_clz(lsh);
+}
+#endif
+
+#define __builtin_clzl __builtin_clzll
+#endif // defined(_MSC_VER) && !defined(__clang__)
+
+#endif // INT_LIB_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_lib.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_math.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_math.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_math.h (revision 351984)
@@ -0,0 +1,106 @@
+//===-- int_math.h - internal math inlines --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is not part of the interface of this library.
+//
+// This file defines substitutes for the libm functions used in some of the
+// compiler-rt implementations, defined in such a way that there is not a direct
+// dependency on libm or math.h. Instead, we use the compiler builtin versions
+// where available. This reduces our dependencies on the system SDK by foisting
+// the responsibility onto the compiler.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INT_MATH_H
+#define INT_MATH_H
+
+#ifndef __has_builtin
+#define __has_builtin(x) 0
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#include <math.h>
+#include <stdlib.h>
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define CRT_INFINITY INFINITY
+#else
+#define CRT_INFINITY __builtin_huge_valf()
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_isfinite(x) _finite((x))
+#define crt_isinf(x) !_finite((x))
+#define crt_isnan(x) _isnan((x))
+#else
+// Define crt_isfinite in terms of the builtin if available, otherwise provide
+// an alternate version in terms of our other functions. This supports some
+// versions of GCC which didn't have __builtin_isfinite.
+#if __has_builtin(__builtin_isfinite)
+#define crt_isfinite(x) __builtin_isfinite((x))
+#elif defined(__GNUC__)
+#define crt_isfinite(x) \
+ __extension__(({ \
+ __typeof((x)) x_ = (x); \
+ !crt_isinf(x_) && !crt_isnan(x_); \
+ }))
+#else
+#error "Do not know how to check for infinity"
+#endif // __has_builtin(__builtin_isfinite)
+#define crt_isinf(x) __builtin_isinf((x))
+#define crt_isnan(x) __builtin_isnan((x))
+#endif // _MSC_VER
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_copysign(x, y) copysign((x), (y))
+#define crt_copysignf(x, y) copysignf((x), (y))
+#define crt_copysignl(x, y) copysignl((x), (y))
+#else
+#define crt_copysign(x, y) __builtin_copysign((x), (y))
+#define crt_copysignf(x, y) __builtin_copysignf((x), (y))
+#define crt_copysignl(x, y) __builtin_copysignl((x), (y))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_fabs(x) fabs((x))
+#define crt_fabsf(x) fabsf((x))
+#define crt_fabsl(x) fabs((x))
+#else
+#define crt_fabs(x) __builtin_fabs((x))
+#define crt_fabsf(x) __builtin_fabsf((x))
+#define crt_fabsl(x) __builtin_fabsl((x))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_fmax(x, y) __max((x), (y))
+#define crt_fmaxf(x, y) __max((x), (y))
+#define crt_fmaxl(x, y) __max((x), (y))
+#else
+#define crt_fmax(x, y) __builtin_fmax((x), (y))
+#define crt_fmaxf(x, y) __builtin_fmaxf((x), (y))
+#define crt_fmaxl(x, y) __builtin_fmaxl((x), (y))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_logbl(x) logbl((x))
+#else
+#define crt_logbl(x) __builtin_logbl((x))
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+#define crt_scalbn(x, y) scalbn((x), (y))
+#define crt_scalbnf(x, y) scalbnf((x), (y))
+#define crt_scalbnl(x, y) scalbnl((x), (y))
+#else
+#define crt_scalbn(x, y) __builtin_scalbn((x), (y))
+#define crt_scalbnf(x, y) __builtin_scalbnf((x), (y))
+#define crt_scalbnl(x, y) __builtin_scalbnl((x), (y))
+#endif
+
+#endif // INT_MATH_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_math.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_types.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_types.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_types.h (revision 351984)
@@ -0,0 +1,174 @@
+//===-- int_lib.h - configuration header for compiler-rt -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is not part of the interface of this library.
+//
+// This file defines various standard types, most importantly a number of unions
+// used to access parts of larger types.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INT_TYPES_H
+#define INT_TYPES_H
+
+#include "int_endianness.h"
+
+// si_int is defined in Linux sysroot's asm-generic/siginfo.h
+#ifdef si_int
+#undef si_int
+#endif
+typedef int si_int;
+typedef unsigned su_int;
+
+typedef long long di_int;
+typedef unsigned long long du_int;
+
+typedef union {
+ di_int all;
+ struct {
+#if _YUGA_LITTLE_ENDIAN
+ su_int low;
+ si_int high;
+#else
+ si_int high;
+ su_int low;
+#endif // _YUGA_LITTLE_ENDIAN
+ } s;
+} dwords;
+
+typedef union {
+ du_int all;
+ struct {
+#if _YUGA_LITTLE_ENDIAN
+ su_int low;
+ su_int high;
+#else
+ su_int high;
+ su_int low;
+#endif // _YUGA_LITTLE_ENDIAN
+ } s;
+} udwords;
+
+#if defined(__LP64__) || defined(__wasm__) || defined(__mips64) || \
+ defined(__riscv) || defined(_WIN64)
+#define CRT_HAS_128BIT
+#endif
+
+// MSVC doesn't have a working 128bit integer type. Users should really compile
+// compiler-rt with clang, but if they happen to be doing a standalone build for
+// asan or something else, disable the 128 bit parts so things sort of work.
+#if defined(_MSC_VER) && !defined(__clang__)
+#undef CRT_HAS_128BIT
+#endif
+
+#ifdef CRT_HAS_128BIT
+typedef int ti_int __attribute__((mode(TI)));
+typedef unsigned tu_int __attribute__((mode(TI)));
+
+typedef union {
+ ti_int all;
+ struct {
+#if _YUGA_LITTLE_ENDIAN
+ du_int low;
+ di_int high;
+#else
+ di_int high;
+ du_int low;
+#endif // _YUGA_LITTLE_ENDIAN
+ } s;
+} twords;
+
+typedef union {
+ tu_int all;
+ struct {
+#if _YUGA_LITTLE_ENDIAN
+ du_int low;
+ du_int high;
+#else
+ du_int high;
+ du_int low;
+#endif // _YUGA_LITTLE_ENDIAN
+ } s;
+} utwords;
+
+static __inline ti_int make_ti(di_int h, di_int l) {
+ twords r;
+ r.s.high = h;
+ r.s.low = l;
+ return r.all;
+}
+
+static __inline tu_int make_tu(du_int h, du_int l) {
+ utwords r;
+ r.s.high = h;
+ r.s.low = l;
+ return r.all;
+}
+
+#endif // CRT_HAS_128BIT
+
+typedef union {
+ su_int u;
+ float f;
+} float_bits;
+
+typedef union {
+ udwords u;
+ double f;
+} double_bits;
+
+typedef struct {
+#if _YUGA_LITTLE_ENDIAN
+ udwords low;
+ udwords high;
+#else
+ udwords high;
+ udwords low;
+#endif // _YUGA_LITTLE_ENDIAN
+} uqwords;
+
+// Check if the target supports 80 bit extended precision long doubles.
+// Notably, on x86 Windows, MSVC only provides a 64-bit long double, but GCC
+// still makes it 80 bits. Clang will match whatever compiler it is trying to
+// be compatible with.
+#if ((defined(__i386__) || defined(__x86_64__)) && !defined(_MSC_VER)) || \
+ defined(__m68k__) || defined(__ia64__)
+#define HAS_80_BIT_LONG_DOUBLE 1
+#else
+#define HAS_80_BIT_LONG_DOUBLE 0
+#endif
+
+typedef union {
+ uqwords u;
+ long double f;
+} long_double_bits;
+
+#if __STDC_VERSION__ >= 199901L
+typedef float _Complex Fcomplex;
+typedef double _Complex Dcomplex;
+typedef long double _Complex Lcomplex;
+
+#define COMPLEX_REAL(x) __real__(x)
+#define COMPLEX_IMAGINARY(x) __imag__(x)
+#else
+typedef struct {
+ float real, imaginary;
+} Fcomplex;
+
+typedef struct {
+ double real, imaginary;
+} Dcomplex;
+
+typedef struct {
+ long double real, imaginary;
+} Lcomplex;
+
+#define COMPLEX_REAL(x) (x).real
+#define COMPLEX_IMAGINARY(x) (x).imaginary
+#endif
+#endif // INT_TYPES_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_types.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_util.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_util.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_util.c (revision 351984)
@@ -0,0 +1,67 @@
+//===-- int_util.c - Implement internal utilities -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// NOTE: The definitions in this file are declared weak because we clients to be
+// able to arbitrarily package individual functions into separate .a files. If
+// we did not declare these weak, some link situations might end up seeing
+// duplicate strong definitions of the same symbol.
+//
+// We can't use this solution for kernel use (which may not support weak), but
+// currently expect that when built for kernel use all the functionality is
+// packaged into a single library.
+
+#ifdef KERNEL_USE
+
+NORETURN extern void panic(const char *, ...);
+#ifndef _WIN32
+__attribute__((visibility("hidden")))
+#endif
+void __compilerrt_abort_impl(const char *file, int line, const char *function) {
+ panic("%s:%d: abort in %s", file, line, function);
+}
+
+#elif __APPLE__
+
+// from libSystem.dylib
+NORETURN extern void __assert_rtn(const char *func, const char *file, int line,
+ const char *message);
+
+#ifndef _WIN32
+__attribute__((weak))
+__attribute__((visibility("hidden")))
+#endif
+void __compilerrt_abort_impl(const char *file, int line, const char *function) {
+ __assert_rtn(function, file, line, "libcompiler_rt abort");
+}
+
+#elif __Fuchsia__
+
+#ifndef _WIN32
+__attribute__((weak))
+__attribute__((visibility("hidden")))
+#endif
+void __compilerrt_abort_impl(const char *file, int line, const char *function) {
+ __builtin_trap();
+}
+
+#else
+
+// Get the system definition of abort()
+#include <stdlib.h>
+
+#ifndef _WIN32
+__attribute__((weak))
+__attribute__((visibility("hidden")))
+#endif
+void __compilerrt_abort_impl(const char *file, int line, const char *function) {
+ abort();
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_util.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_util.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_util.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_util.h (revision 351984)
@@ -0,0 +1,31 @@
+//===-- int_util.h - internal utility functions ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is not part of the interface of this library.
+//
+// This file defines non-inline utilities which are available for use in the
+// library. The function definitions themselves are all contained in int_util.c
+// which will always be compiled into any compiler-rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef INT_UTIL_H
+#define INT_UTIL_H
+
+/// \brief Trigger a program abort (or panic for kernel code).
+#define compilerrt_abort() __compilerrt_abort_impl(__FILE__, __LINE__, __func__)
+
+NORETURN void __compilerrt_abort_impl(const char *file, int line,
+ const char *function);
+
+#define COMPILE_TIME_ASSERT(expr) COMPILE_TIME_ASSERT1(expr, __COUNTER__)
+#define COMPILE_TIME_ASSERT1(expr, cnt) COMPILE_TIME_ASSERT2(expr, cnt)
+#define COMPILE_TIME_ASSERT2(expr, cnt) \
+ typedef char ct_assert_##cnt[(expr) ? 1 : -1] UNUSED
+
+#endif // INT_UTIL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/int_util.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/lshrdi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/lshrdi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/lshrdi3.c (revision 351984)
@@ -0,0 +1,38 @@
+//===-- lshrdi3.c - Implement __lshrdi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __lshrdi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: logical a >> b
+
+// Precondition: 0 <= b < bits_in_dword
+
+COMPILER_RT_ABI di_int __lshrdi3(di_int a, si_int b) {
+ const int bits_in_word = (int)(sizeof(si_int) * CHAR_BIT);
+ udwords input;
+ udwords result;
+ input.all = a;
+ if (b & bits_in_word) /* bits_in_word <= b < bits_in_dword */ {
+ result.s.high = 0;
+ result.s.low = input.s.high >> (b - bits_in_word);
+ } else /* 0 <= b < bits_in_word */ {
+ if (b == 0)
+ return a;
+ result.s.high = input.s.high >> b;
+ result.s.low = (input.s.high << (bits_in_word - b)) | (input.s.low >> b);
+ }
+ return result.all;
+}
+
+#if defined(__ARM_EABI__)
+COMPILER_RT_ALIAS(__lshrdi3, __aeabi_llsr)
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/lshrdi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/lshrti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/lshrti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/lshrti3.c (revision 351984)
@@ -0,0 +1,38 @@
+//===-- lshrti3.c - Implement __lshrti3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __lshrti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: logical a >> b
+
+// Precondition: 0 <= b < bits_in_tword
+
+COMPILER_RT_ABI ti_int __lshrti3(ti_int a, si_int b) {
+ const int bits_in_dword = (int)(sizeof(di_int) * CHAR_BIT);
+ utwords input;
+ utwords result;
+ input.all = a;
+ if (b & bits_in_dword) /* bits_in_dword <= b < bits_in_tword */ {
+ result.s.high = 0;
+ result.s.low = input.s.high >> (b - bits_in_dword);
+ } else /* 0 <= b < bits_in_dword */ {
+ if (b == 0)
+ return a;
+ result.s.high = input.s.high >> b;
+ result.s.low = (input.s.high << (bits_in_dword - b)) | (input.s.low >> b);
+ }
+ return result.all;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/lshrti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mingw_fixfloat.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mingw_fixfloat.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mingw_fixfloat.c (revision 351984)
@@ -0,0 +1,34 @@
+//===-- mingw_fixfloat.c - Wrap int/float conversions for arm/windows -----===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+COMPILER_RT_ABI di_int __fixdfdi(double a);
+COMPILER_RT_ABI di_int __fixsfdi(float a);
+COMPILER_RT_ABI du_int __fixunsdfdi(double a);
+COMPILER_RT_ABI du_int __fixunssfdi(float a);
+COMPILER_RT_ABI double __floatdidf(di_int a);
+COMPILER_RT_ABI float __floatdisf(di_int a);
+COMPILER_RT_ABI double __floatundidf(du_int a);
+COMPILER_RT_ABI float __floatundisf(du_int a);
+
+COMPILER_RT_ABI di_int __dtoi64(double a) { return __fixdfdi(a); }
+
+COMPILER_RT_ABI di_int __stoi64(float a) { return __fixsfdi(a); }
+
+COMPILER_RT_ABI du_int __dtou64(double a) { return __fixunsdfdi(a); }
+
+COMPILER_RT_ABI du_int __stou64(float a) { return __fixunssfdi(a); }
+
+COMPILER_RT_ABI double __i64tod(di_int a) { return __floatdidf(a); }
+
+COMPILER_RT_ABI float __i64tos(di_int a) { return __floatdisf(a); }
+
+COMPILER_RT_ABI double __u64tod(du_int a) { return __floatundidf(a); }
+
+COMPILER_RT_ABI float __u64tos(du_int a) { return __floatundisf(a); }
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mingw_fixfloat.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/moddi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/moddi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/moddi3.c (revision 351984)
@@ -0,0 +1,26 @@
+//===-- moddi3.c - Implement __moddi3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __moddi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a % b
+
+COMPILER_RT_ABI di_int __moddi3(di_int a, di_int b) {
+ const int bits_in_dword_m1 = (int)(sizeof(di_int) * CHAR_BIT) - 1;
+ di_int s = b >> bits_in_dword_m1; // s = b < 0 ? -1 : 0
+ b = (b ^ s) - s; // negate if s == -1
+ s = a >> bits_in_dword_m1; // s = a < 0 ? -1 : 0
+ a = (a ^ s) - s; // negate if s == -1
+ du_int r;
+ __udivmoddi4(a, b, &r);
+ return ((di_int)r ^ s) - s; // negate if s == -1
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/moddi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/modsi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/modsi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/modsi3.c (revision 351984)
@@ -0,0 +1,19 @@
+//===-- modsi3.c - Implement __modsi3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __modsi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a % b
+
+COMPILER_RT_ABI si_int __modsi3(si_int a, si_int b) {
+ return a - __divsi3(a, b) * b;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/modsi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/modti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/modti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/modti3.c (revision 351984)
@@ -0,0 +1,30 @@
+//===-- modti3.c - Implement __modti3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __modti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a % b
+
+COMPILER_RT_ABI ti_int __modti3(ti_int a, ti_int b) {
+ const int bits_in_tword_m1 = (int)(sizeof(ti_int) * CHAR_BIT) - 1;
+ ti_int s = b >> bits_in_tword_m1; // s = b < 0 ? -1 : 0
+ b = (b ^ s) - s; // negate if s == -1
+ s = a >> bits_in_tword_m1; // s = a < 0 ? -1 : 0
+ a = (a ^ s) - s; // negate if s == -1
+ tu_int r;
+ __udivmodti4(a, b, &r);
+ return ((ti_int)r ^ s) - s; // negate if s == -1
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/modti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldc3.c (revision 351984)
@@ -0,0 +1,65 @@
+//===-- muldc3.c - Implement __muldc3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __muldc3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+#include "int_math.h"
+
+// Returns: the product of a + ib and c + id
+
+COMPILER_RT_ABI Dcomplex __muldc3(double __a, double __b, double __c,
+ double __d) {
+ double __ac = __a * __c;
+ double __bd = __b * __d;
+ double __ad = __a * __d;
+ double __bc = __b * __c;
+ Dcomplex z;
+ COMPLEX_REAL(z) = __ac - __bd;
+ COMPLEX_IMAGINARY(z) = __ad + __bc;
+ if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
+ int __recalc = 0;
+ if (crt_isinf(__a) || crt_isinf(__b)) {
+ __a = crt_copysign(crt_isinf(__a) ? 1 : 0, __a);
+ __b = crt_copysign(crt_isinf(__b) ? 1 : 0, __b);
+ if (crt_isnan(__c))
+ __c = crt_copysign(0, __c);
+ if (crt_isnan(__d))
+ __d = crt_copysign(0, __d);
+ __recalc = 1;
+ }
+ if (crt_isinf(__c) || crt_isinf(__d)) {
+ __c = crt_copysign(crt_isinf(__c) ? 1 : 0, __c);
+ __d = crt_copysign(crt_isinf(__d) ? 1 : 0, __d);
+ if (crt_isnan(__a))
+ __a = crt_copysign(0, __a);
+ if (crt_isnan(__b))
+ __b = crt_copysign(0, __b);
+ __recalc = 1;
+ }
+ if (!__recalc && (crt_isinf(__ac) || crt_isinf(__bd) || crt_isinf(__ad) ||
+ crt_isinf(__bc))) {
+ if (crt_isnan(__a))
+ __a = crt_copysign(0, __a);
+ if (crt_isnan(__b))
+ __b = crt_copysign(0, __b);
+ if (crt_isnan(__c))
+ __c = crt_copysign(0, __c);
+ if (crt_isnan(__d))
+ __d = crt_copysign(0, __d);
+ __recalc = 1;
+ }
+ if (__recalc) {
+ COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d);
+ COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__a * __d + __b * __c);
+ }
+ }
+ return z;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldf3.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- lib/muldf3.c - Double-precision multiplication ------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements double-precision soft-float multiplication
+// with the IEEE-754 default rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_mul_impl.inc"
+
+COMPILER_RT_ABI fp_t __muldf3(fp_t a, fp_t b) { return __mulXf3__(a, b); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_dmul(fp_t a, fp_t b) { return __muldf3(a, b); }
+#else
+COMPILER_RT_ALIAS(__muldf3, __aeabi_dmul)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldi3.c (revision 351984)
@@ -0,0 +1,51 @@
+//===-- muldi3.c - Implement __muldi3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __muldi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a * b
+
+static di_int __muldsi3(su_int a, su_int b) {
+ dwords r;
+ const int bits_in_word_2 = (int)(sizeof(si_int) * CHAR_BIT) / 2;
+ const su_int lower_mask = (su_int)~0 >> bits_in_word_2;
+ r.s.low = (a & lower_mask) * (b & lower_mask);
+ su_int t = r.s.low >> bits_in_word_2;
+ r.s.low &= lower_mask;
+ t += (a >> bits_in_word_2) * (b & lower_mask);
+ r.s.low += (t & lower_mask) << bits_in_word_2;
+ r.s.high = t >> bits_in_word_2;
+ t = r.s.low >> bits_in_word_2;
+ r.s.low &= lower_mask;
+ t += (b >> bits_in_word_2) * (a & lower_mask);
+ r.s.low += (t & lower_mask) << bits_in_word_2;
+ r.s.high += t >> bits_in_word_2;
+ r.s.high += (a >> bits_in_word_2) * (b >> bits_in_word_2);
+ return r.all;
+}
+
+// Returns: a * b
+
+COMPILER_RT_ABI di_int __muldi3(di_int a, di_int b) {
+ dwords x;
+ x.all = a;
+ dwords y;
+ y.all = b;
+ dwords r;
+ r.all = __muldsi3(x.s.low, y.s.low);
+ r.s.high += x.s.high * y.s.low + x.s.low * y.s.high;
+ return r.all;
+}
+
+#if defined(__ARM_EABI__)
+COMPILER_RT_ALIAS(__muldi3, __aeabi_lmul)
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muldi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulodi4.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulodi4.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulodi4.c (revision 351984)
@@ -0,0 +1,49 @@
+//===-- mulodi4.c - Implement __mulodi4 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __mulodi4 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a * b
+
+// Effects: sets *overflow to 1 if a * b overflows
+
+COMPILER_RT_ABI di_int __mulodi4(di_int a, di_int b, int *overflow) {
+ const int N = (int)(sizeof(di_int) * CHAR_BIT);
+ const di_int MIN = (di_int)1 << (N - 1);
+ const di_int MAX = ~MIN;
+ *overflow = 0;
+ di_int result = a * b;
+ if (a == MIN) {
+ if (b != 0 && b != 1)
+ *overflow = 1;
+ return result;
+ }
+ if (b == MIN) {
+ if (a != 0 && a != 1)
+ *overflow = 1;
+ return result;
+ }
+ di_int sa = a >> (N - 1);
+ di_int abs_a = (a ^ sa) - sa;
+ di_int sb = b >> (N - 1);
+ di_int abs_b = (b ^ sb) - sb;
+ if (abs_a < 2 || abs_b < 2)
+ return result;
+ if (sa == sb) {
+ if (abs_a > MAX / abs_b)
+ *overflow = 1;
+ } else {
+ if (abs_a > MIN / -abs_b)
+ *overflow = 1;
+ }
+ return result;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulodi4.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulosi4.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulosi4.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulosi4.c (revision 351984)
@@ -0,0 +1,49 @@
+//===-- mulosi4.c - Implement __mulosi4 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __mulosi4 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a * b
+
+// Effects: sets *overflow to 1 if a * b overflows
+
+COMPILER_RT_ABI si_int __mulosi4(si_int a, si_int b, int *overflow) {
+ const int N = (int)(sizeof(si_int) * CHAR_BIT);
+ const si_int MIN = (si_int)1 << (N - 1);
+ const si_int MAX = ~MIN;
+ *overflow = 0;
+ si_int result = a * b;
+ if (a == MIN) {
+ if (b != 0 && b != 1)
+ *overflow = 1;
+ return result;
+ }
+ if (b == MIN) {
+ if (a != 0 && a != 1)
+ *overflow = 1;
+ return result;
+ }
+ si_int sa = a >> (N - 1);
+ si_int abs_a = (a ^ sa) - sa;
+ si_int sb = b >> (N - 1);
+ si_int abs_b = (b ^ sb) - sb;
+ if (abs_a < 2 || abs_b < 2)
+ return result;
+ if (sa == sb) {
+ if (abs_a > MAX / abs_b)
+ *overflow = 1;
+ } else {
+ if (abs_a > MIN / -abs_b)
+ *overflow = 1;
+ }
+ return result;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulosi4.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muloti4.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muloti4.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muloti4.c (revision 351984)
@@ -0,0 +1,53 @@
+//===-- muloti4.c - Implement __muloti4 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __muloti4 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a * b
+
+// Effects: sets *overflow to 1 if a * b overflows
+
+COMPILER_RT_ABI ti_int __muloti4(ti_int a, ti_int b, int *overflow) {
+ const int N = (int)(sizeof(ti_int) * CHAR_BIT);
+ const ti_int MIN = (ti_int)1 << (N - 1);
+ const ti_int MAX = ~MIN;
+ *overflow = 0;
+ ti_int result = a * b;
+ if (a == MIN) {
+ if (b != 0 && b != 1)
+ *overflow = 1;
+ return result;
+ }
+ if (b == MIN) {
+ if (a != 0 && a != 1)
+ *overflow = 1;
+ return result;
+ }
+ ti_int sa = a >> (N - 1);
+ ti_int abs_a = (a ^ sa) - sa;
+ ti_int sb = b >> (N - 1);
+ ti_int abs_b = (b ^ sb) - sb;
+ if (abs_a < 2 || abs_b < 2)
+ return result;
+ if (sa == sb) {
+ if (abs_a > MAX / abs_b)
+ *overflow = 1;
+ } else {
+ if (abs_a > MIN / -abs_b)
+ *overflow = 1;
+ }
+ return result;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/muloti4.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulsc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulsc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulsc3.c (revision 351984)
@@ -0,0 +1,64 @@
+//===-- mulsc3.c - Implement __mulsc3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __mulsc3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+#include "int_math.h"
+
+// Returns: the product of a + ib and c + id
+
+COMPILER_RT_ABI Fcomplex __mulsc3(float __a, float __b, float __c, float __d) {
+ float __ac = __a * __c;
+ float __bd = __b * __d;
+ float __ad = __a * __d;
+ float __bc = __b * __c;
+ Fcomplex z;
+ COMPLEX_REAL(z) = __ac - __bd;
+ COMPLEX_IMAGINARY(z) = __ad + __bc;
+ if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
+ int __recalc = 0;
+ if (crt_isinf(__a) || crt_isinf(__b)) {
+ __a = crt_copysignf(crt_isinf(__a) ? 1 : 0, __a);
+ __b = crt_copysignf(crt_isinf(__b) ? 1 : 0, __b);
+ if (crt_isnan(__c))
+ __c = crt_copysignf(0, __c);
+ if (crt_isnan(__d))
+ __d = crt_copysignf(0, __d);
+ __recalc = 1;
+ }
+ if (crt_isinf(__c) || crt_isinf(__d)) {
+ __c = crt_copysignf(crt_isinf(__c) ? 1 : 0, __c);
+ __d = crt_copysignf(crt_isinf(__d) ? 1 : 0, __d);
+ if (crt_isnan(__a))
+ __a = crt_copysignf(0, __a);
+ if (crt_isnan(__b))
+ __b = crt_copysignf(0, __b);
+ __recalc = 1;
+ }
+ if (!__recalc && (crt_isinf(__ac) || crt_isinf(__bd) || crt_isinf(__ad) ||
+ crt_isinf(__bc))) {
+ if (crt_isnan(__a))
+ __a = crt_copysignf(0, __a);
+ if (crt_isnan(__b))
+ __b = crt_copysignf(0, __b);
+ if (crt_isnan(__c))
+ __c = crt_copysignf(0, __c);
+ if (crt_isnan(__d))
+ __d = crt_copysignf(0, __d);
+ __recalc = 1;
+ }
+ if (__recalc) {
+ COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d);
+ COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__a * __d + __b * __c);
+ }
+ }
+ return z;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulsc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulsf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulsf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulsf3.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- lib/mulsf3.c - Single-precision multiplication ------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements single-precision soft-float multiplication
+// with the IEEE-754 default rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_mul_impl.inc"
+
+COMPILER_RT_ABI fp_t __mulsf3(fp_t a, fp_t b) { return __mulXf3__(a, b); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_fmul(fp_t a, fp_t b) { return __mulsf3(a, b); }
+#else
+COMPILER_RT_ALIAS(__mulsf3, __aeabi_fmul)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulsf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multc3.c (revision 351984)
@@ -0,0 +1,65 @@
+//===-- multc3.c - Implement __multc3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __multc3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+#include "int_math.h"
+
+// Returns: the product of a + ib and c + id
+
+COMPILER_RT_ABI long double _Complex __multc3(long double a, long double b,
+ long double c, long double d) {
+ long double ac = a * c;
+ long double bd = b * d;
+ long double ad = a * d;
+ long double bc = b * c;
+ long double _Complex z;
+ __real__ z = ac - bd;
+ __imag__ z = ad + bc;
+ if (crt_isnan(__real__ z) && crt_isnan(__imag__ z)) {
+ int recalc = 0;
+ if (crt_isinf(a) || crt_isinf(b)) {
+ a = crt_copysignl(crt_isinf(a) ? 1 : 0, a);
+ b = crt_copysignl(crt_isinf(b) ? 1 : 0, b);
+ if (crt_isnan(c))
+ c = crt_copysignl(0, c);
+ if (crt_isnan(d))
+ d = crt_copysignl(0, d);
+ recalc = 1;
+ }
+ if (crt_isinf(c) || crt_isinf(d)) {
+ c = crt_copysignl(crt_isinf(c) ? 1 : 0, c);
+ d = crt_copysignl(crt_isinf(d) ? 1 : 0, d);
+ if (crt_isnan(a))
+ a = crt_copysignl(0, a);
+ if (crt_isnan(b))
+ b = crt_copysignl(0, b);
+ recalc = 1;
+ }
+ if (!recalc &&
+ (crt_isinf(ac) || crt_isinf(bd) || crt_isinf(ad) || crt_isinf(bc))) {
+ if (crt_isnan(a))
+ a = crt_copysignl(0, a);
+ if (crt_isnan(b))
+ b = crt_copysignl(0, b);
+ if (crt_isnan(c))
+ c = crt_copysignl(0, c);
+ if (crt_isnan(d))
+ d = crt_copysignl(0, d);
+ recalc = 1;
+ }
+ if (recalc) {
+ __real__ z = CRT_INFINITY * (a * c - b * d);
+ __imag__ z = CRT_INFINITY * (a * d + b * c);
+ }
+ }
+ return z;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multf3.c (revision 351984)
@@ -0,0 +1,22 @@
+//===-- lib/multf3.c - Quad-precision multiplication --------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements quad-precision soft-float multiplication
+// with the IEEE-754 default rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#include "fp_mul_impl.inc"
+
+COMPILER_RT_ABI fp_t __multf3(fp_t a, fp_t b) { return __mulXf3__(a, b); }
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multi3.c (revision 351984)
@@ -0,0 +1,51 @@
+//===-- multi3.c - Implement __multi3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __multi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a * b
+
+static ti_int __mulddi3(du_int a, du_int b) {
+ twords r;
+ const int bits_in_dword_2 = (int)(sizeof(di_int) * CHAR_BIT) / 2;
+ const du_int lower_mask = (du_int)~0 >> bits_in_dword_2;
+ r.s.low = (a & lower_mask) * (b & lower_mask);
+ du_int t = r.s.low >> bits_in_dword_2;
+ r.s.low &= lower_mask;
+ t += (a >> bits_in_dword_2) * (b & lower_mask);
+ r.s.low += (t & lower_mask) << bits_in_dword_2;
+ r.s.high = t >> bits_in_dword_2;
+ t = r.s.low >> bits_in_dword_2;
+ r.s.low &= lower_mask;
+ t += (b >> bits_in_dword_2) * (a & lower_mask);
+ r.s.low += (t & lower_mask) << bits_in_dword_2;
+ r.s.high += t >> bits_in_dword_2;
+ r.s.high += (a >> bits_in_dword_2) * (b >> bits_in_dword_2);
+ return r.all;
+}
+
+// Returns: a * b
+
+COMPILER_RT_ABI ti_int __multi3(ti_int a, ti_int b) {
+ twords x;
+ x.all = a;
+ twords y;
+ y.all = b;
+ twords r;
+ r.all = __mulddi3(x.s.low, y.s.low);
+ r.s.high += x.s.high * y.s.low + x.s.low * y.s.high;
+ return r.all;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/multi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvdi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvdi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvdi3.c (revision 351984)
@@ -0,0 +1,47 @@
+//===-- mulvdi3.c - Implement __mulvdi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __mulvdi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a * b
+
+// Effects: aborts if a * b overflows
+
+COMPILER_RT_ABI di_int __mulvdi3(di_int a, di_int b) {
+ const int N = (int)(sizeof(di_int) * CHAR_BIT);
+ const di_int MIN = (di_int)1 << (N - 1);
+ const di_int MAX = ~MIN;
+ if (a == MIN) {
+ if (b == 0 || b == 1)
+ return a * b;
+ compilerrt_abort();
+ }
+ if (b == MIN) {
+ if (a == 0 || a == 1)
+ return a * b;
+ compilerrt_abort();
+ }
+ di_int sa = a >> (N - 1);
+ di_int abs_a = (a ^ sa) - sa;
+ di_int sb = b >> (N - 1);
+ di_int abs_b = (b ^ sb) - sb;
+ if (abs_a < 2 || abs_b < 2)
+ return a * b;
+ if (sa == sb) {
+ if (abs_a > MAX / abs_b)
+ compilerrt_abort();
+ } else {
+ if (abs_a > MIN / -abs_b)
+ compilerrt_abort();
+ }
+ return a * b;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvdi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvsi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvsi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvsi3.c (revision 351984)
@@ -0,0 +1,47 @@
+//===-- mulvsi3.c - Implement __mulvsi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __mulvsi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a * b
+
+// Effects: aborts if a * b overflows
+
+COMPILER_RT_ABI si_int __mulvsi3(si_int a, si_int b) {
+ const int N = (int)(sizeof(si_int) * CHAR_BIT);
+ const si_int MIN = (si_int)1 << (N - 1);
+ const si_int MAX = ~MIN;
+ if (a == MIN) {
+ if (b == 0 || b == 1)
+ return a * b;
+ compilerrt_abort();
+ }
+ if (b == MIN) {
+ if (a == 0 || a == 1)
+ return a * b;
+ compilerrt_abort();
+ }
+ si_int sa = a >> (N - 1);
+ si_int abs_a = (a ^ sa) - sa;
+ si_int sb = b >> (N - 1);
+ si_int abs_b = (b ^ sb) - sb;
+ if (abs_a < 2 || abs_b < 2)
+ return a * b;
+ if (sa == sb) {
+ if (abs_a > MAX / abs_b)
+ compilerrt_abort();
+ } else {
+ if (abs_a > MIN / -abs_b)
+ compilerrt_abort();
+ }
+ return a * b;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvsi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvti3.c (revision 351984)
@@ -0,0 +1,51 @@
+//===-- mulvti3.c - Implement __mulvti3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __mulvti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a * b
+
+// Effects: aborts if a * b overflows
+
+COMPILER_RT_ABI ti_int __mulvti3(ti_int a, ti_int b) {
+ const int N = (int)(sizeof(ti_int) * CHAR_BIT);
+ const ti_int MIN = (ti_int)1 << (N - 1);
+ const ti_int MAX = ~MIN;
+ if (a == MIN) {
+ if (b == 0 || b == 1)
+ return a * b;
+ compilerrt_abort();
+ }
+ if (b == MIN) {
+ if (a == 0 || a == 1)
+ return a * b;
+ compilerrt_abort();
+ }
+ ti_int sa = a >> (N - 1);
+ ti_int abs_a = (a ^ sa) - sa;
+ ti_int sb = b >> (N - 1);
+ ti_int abs_b = (b ^ sb) - sb;
+ if (abs_a < 2 || abs_b < 2)
+ return a * b;
+ if (sa == sb) {
+ if (abs_a > MAX / abs_b)
+ compilerrt_abort();
+ } else {
+ if (abs_a > MIN / -abs_b)
+ compilerrt_abort();
+ }
+ return a * b;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulvti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulxc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulxc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulxc3.c (revision 351984)
@@ -0,0 +1,69 @@
+//===-- mulxc3.c - Implement __mulxc3 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __mulxc3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#if !_ARCH_PPC
+
+#include "int_lib.h"
+#include "int_math.h"
+
+// Returns: the product of a + ib and c + id
+
+COMPILER_RT_ABI Lcomplex __mulxc3(long double __a, long double __b,
+ long double __c, long double __d) {
+ long double __ac = __a * __c;
+ long double __bd = __b * __d;
+ long double __ad = __a * __d;
+ long double __bc = __b * __c;
+ Lcomplex z;
+ COMPLEX_REAL(z) = __ac - __bd;
+ COMPLEX_IMAGINARY(z) = __ad + __bc;
+ if (crt_isnan(COMPLEX_REAL(z)) && crt_isnan(COMPLEX_IMAGINARY(z))) {
+ int __recalc = 0;
+ if (crt_isinf(__a) || crt_isinf(__b)) {
+ __a = crt_copysignl(crt_isinf(__a) ? 1 : 0, __a);
+ __b = crt_copysignl(crt_isinf(__b) ? 1 : 0, __b);
+ if (crt_isnan(__c))
+ __c = crt_copysignl(0, __c);
+ if (crt_isnan(__d))
+ __d = crt_copysignl(0, __d);
+ __recalc = 1;
+ }
+ if (crt_isinf(__c) || crt_isinf(__d)) {
+ __c = crt_copysignl(crt_isinf(__c) ? 1 : 0, __c);
+ __d = crt_copysignl(crt_isinf(__d) ? 1 : 0, __d);
+ if (crt_isnan(__a))
+ __a = crt_copysignl(0, __a);
+ if (crt_isnan(__b))
+ __b = crt_copysignl(0, __b);
+ __recalc = 1;
+ }
+ if (!__recalc && (crt_isinf(__ac) || crt_isinf(__bd) || crt_isinf(__ad) ||
+ crt_isinf(__bc))) {
+ if (crt_isnan(__a))
+ __a = crt_copysignl(0, __a);
+ if (crt_isnan(__b))
+ __b = crt_copysignl(0, __b);
+ if (crt_isnan(__c))
+ __c = crt_copysignl(0, __c);
+ if (crt_isnan(__d))
+ __d = crt_copysignl(0, __d);
+ __recalc = 1;
+ }
+ if (__recalc) {
+ COMPLEX_REAL(z) = CRT_INFINITY * (__a * __c - __b * __d);
+ COMPLEX_IMAGINARY(z) = CRT_INFINITY * (__a * __d + __b * __c);
+ }
+ }
+ return z;
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/mulxc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negdf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negdf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negdf2.c (revision 351984)
@@ -0,0 +1,24 @@
+//===-- lib/negdf2.c - double-precision negation ------------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements double-precision soft-float negation.
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+
+COMPILER_RT_ABI fp_t __negdf2(fp_t a) { return fromRep(toRep(a) ^ signBit); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_dneg(fp_t a) { return __negdf2(a); }
+#else
+COMPILER_RT_ALIAS(__negdf2, __aeabi_dneg)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negdf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negdi2.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- negdi2.c - Implement __negdi2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __negdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: -a
+
+COMPILER_RT_ABI di_int __negdi2(di_int a) {
+ // Note: this routine is here for API compatibility; any sane compiler
+ // should expand it inline.
+ return -a;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negsf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negsf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negsf2.c (revision 351984)
@@ -0,0 +1,24 @@
+//===-- lib/negsf2.c - single-precision negation ------------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements single-precision soft-float negation.
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+COMPILER_RT_ABI fp_t __negsf2(fp_t a) { return fromRep(toRep(a) ^ signBit); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_fneg(fp_t a) { return __negsf2(a); }
+#else
+COMPILER_RT_ALIAS(__negsf2, __aeabi_fneg)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negsf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negti2.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- negti2.c - Implement __negti2 -------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __negti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: -a
+
+COMPILER_RT_ABI ti_int __negti2(ti_int a) {
+ // Note: this routine is here for API compatibility; any sane compiler
+ // should expand it inline.
+ return -a;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvdi2.c (revision 351984)
@@ -0,0 +1,24 @@
+//===-- negvdi2.c - Implement __negvdi2 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __negvdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: -a
+
+// Effects: aborts if -a overflows
+
+COMPILER_RT_ABI di_int __negvdi2(di_int a) {
+ const di_int MIN = (di_int)1 << ((int)(sizeof(di_int) * CHAR_BIT) - 1);
+ if (a == MIN)
+ compilerrt_abort();
+ return -a;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvsi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvsi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvsi2.c (revision 351984)
@@ -0,0 +1,24 @@
+//===-- negvsi2.c - Implement __negvsi2 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __negvsi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: -a
+
+// Effects: aborts if -a overflows
+
+COMPILER_RT_ABI si_int __negvsi2(si_int a) {
+ const si_int MIN = (si_int)1 << ((int)(sizeof(si_int) * CHAR_BIT) - 1);
+ if (a == MIN)
+ compilerrt_abort();
+ return -a;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvsi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvti2.c (revision 351984)
@@ -0,0 +1,28 @@
+//===-- negvti2.c - Implement __negvti2 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __negvti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: -a
+
+// Effects: aborts if -a overflows
+
+COMPILER_RT_ABI ti_int __negvti2(ti_int a) {
+ const ti_int MIN = (ti_int)1 << ((int)(sizeof(ti_int) * CHAR_BIT) - 1);
+ if (a == MIN)
+ compilerrt_abort();
+ return -a;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/negvti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/os_version_check.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/os_version_check.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/os_version_check.c (revision 351984)
@@ -0,0 +1,224 @@
+//===-- os_version_check.c - OS version checking -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the function __isOSVersionAtLeast, used by
+// Objective-C's @available
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef __APPLE__
+
+#include <TargetConditionals.h>
+#include <dispatch/dispatch.h>
+#include <dlfcn.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+// These three variables hold the host's OS version.
+static int32_t GlobalMajor, GlobalMinor, GlobalSubminor;
+static dispatch_once_t DispatchOnceCounter;
+
+// We can't include <CoreFoundation/CoreFoundation.h> directly from here, so
+// just forward declare everything that we need from it.
+
+typedef const void *CFDataRef, *CFAllocatorRef, *CFPropertyListRef,
+ *CFStringRef, *CFDictionaryRef, *CFTypeRef, *CFErrorRef;
+
+#if __LLP64__
+typedef unsigned long long CFTypeID;
+typedef unsigned long long CFOptionFlags;
+typedef signed long long CFIndex;
+#else
+typedef unsigned long CFTypeID;
+typedef unsigned long CFOptionFlags;
+typedef signed long CFIndex;
+#endif
+
+typedef unsigned char UInt8;
+typedef _Bool Boolean;
+typedef CFIndex CFPropertyListFormat;
+typedef uint32_t CFStringEncoding;
+
+// kCFStringEncodingASCII analog.
+#define CF_STRING_ENCODING_ASCII 0x0600
+// kCFStringEncodingUTF8 analog.
+#define CF_STRING_ENCODING_UTF8 0x08000100
+#define CF_PROPERTY_LIST_IMMUTABLE 0
+
+typedef CFDataRef (*CFDataCreateWithBytesNoCopyFuncTy)(CFAllocatorRef,
+ const UInt8 *, CFIndex,
+ CFAllocatorRef);
+typedef CFPropertyListRef (*CFPropertyListCreateWithDataFuncTy)(
+ CFAllocatorRef, CFDataRef, CFOptionFlags, CFPropertyListFormat *,
+ CFErrorRef *);
+typedef CFPropertyListRef (*CFPropertyListCreateFromXMLDataFuncTy)(
+ CFAllocatorRef, CFDataRef, CFOptionFlags, CFStringRef *);
+typedef CFStringRef (*CFStringCreateWithCStringNoCopyFuncTy)(CFAllocatorRef,
+ const char *,
+ CFStringEncoding,
+ CFAllocatorRef);
+typedef const void *(*CFDictionaryGetValueFuncTy)(CFDictionaryRef,
+ const void *);
+typedef CFTypeID (*CFGetTypeIDFuncTy)(CFTypeRef);
+typedef CFTypeID (*CFStringGetTypeIDFuncTy)(void);
+typedef Boolean (*CFStringGetCStringFuncTy)(CFStringRef, char *, CFIndex,
+ CFStringEncoding);
+typedef void (*CFReleaseFuncTy)(CFTypeRef);
+
+// Find and parse the SystemVersion.plist file.
+static void parseSystemVersionPList(void *Unused) {
+ (void)Unused;
+ // Load CoreFoundation dynamically
+ const void *NullAllocator = dlsym(RTLD_DEFAULT, "kCFAllocatorNull");
+ if (!NullAllocator)
+ return;
+ const CFAllocatorRef AllocatorNull = *(const CFAllocatorRef *)NullAllocator;
+ CFDataCreateWithBytesNoCopyFuncTy CFDataCreateWithBytesNoCopyFunc =
+ (CFDataCreateWithBytesNoCopyFuncTy)dlsym(RTLD_DEFAULT,
+ "CFDataCreateWithBytesNoCopy");
+ if (!CFDataCreateWithBytesNoCopyFunc)
+ return;
+ CFPropertyListCreateWithDataFuncTy CFPropertyListCreateWithDataFunc =
+ (CFPropertyListCreateWithDataFuncTy)dlsym(RTLD_DEFAULT,
+ "CFPropertyListCreateWithData");
+// CFPropertyListCreateWithData was introduced only in macOS 10.6+, so it
+// will be NULL on earlier OS versions.
+#pragma clang diagnostic push
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+ CFPropertyListCreateFromXMLDataFuncTy CFPropertyListCreateFromXMLDataFunc =
+ (CFPropertyListCreateFromXMLDataFuncTy)dlsym(
+ RTLD_DEFAULT, "CFPropertyListCreateFromXMLData");
+#pragma clang diagnostic pop
+ // CFPropertyListCreateFromXMLDataFunc is deprecated in macOS 10.10, so it
+ // might be NULL in future OS versions.
+ if (!CFPropertyListCreateWithDataFunc && !CFPropertyListCreateFromXMLDataFunc)
+ return;
+ CFStringCreateWithCStringNoCopyFuncTy CFStringCreateWithCStringNoCopyFunc =
+ (CFStringCreateWithCStringNoCopyFuncTy)dlsym(
+ RTLD_DEFAULT, "CFStringCreateWithCStringNoCopy");
+ if (!CFStringCreateWithCStringNoCopyFunc)
+ return;
+ CFDictionaryGetValueFuncTy CFDictionaryGetValueFunc =
+ (CFDictionaryGetValueFuncTy)dlsym(RTLD_DEFAULT, "CFDictionaryGetValue");
+ if (!CFDictionaryGetValueFunc)
+ return;
+ CFGetTypeIDFuncTy CFGetTypeIDFunc =
+ (CFGetTypeIDFuncTy)dlsym(RTLD_DEFAULT, "CFGetTypeID");
+ if (!CFGetTypeIDFunc)
+ return;
+ CFStringGetTypeIDFuncTy CFStringGetTypeIDFunc =
+ (CFStringGetTypeIDFuncTy)dlsym(RTLD_DEFAULT, "CFStringGetTypeID");
+ if (!CFStringGetTypeIDFunc)
+ return;
+ CFStringGetCStringFuncTy CFStringGetCStringFunc =
+ (CFStringGetCStringFuncTy)dlsym(RTLD_DEFAULT, "CFStringGetCString");
+ if (!CFStringGetCStringFunc)
+ return;
+ CFReleaseFuncTy CFReleaseFunc =
+ (CFReleaseFuncTy)dlsym(RTLD_DEFAULT, "CFRelease");
+ if (!CFReleaseFunc)
+ return;
+
+ char *PListPath = "/System/Library/CoreServices/SystemVersion.plist";
+
+#if TARGET_OS_SIMULATOR
+ char *PListPathPrefix = getenv("IPHONE_SIMULATOR_ROOT");
+ if (!PListPathPrefix)
+ return;
+ char FullPath[strlen(PListPathPrefix) + strlen(PListPath) + 1];
+ strcpy(FullPath, PListPathPrefix);
+ strcat(FullPath, PListPath);
+ PListPath = FullPath;
+#endif
+ FILE *PropertyList = fopen(PListPath, "r");
+ if (!PropertyList)
+ return;
+
+ // Dynamically allocated stuff.
+ CFDictionaryRef PListRef = NULL;
+ CFDataRef FileContentsRef = NULL;
+ UInt8 *PListBuf = NULL;
+
+ fseek(PropertyList, 0, SEEK_END);
+ long PListFileSize = ftell(PropertyList);
+ if (PListFileSize < 0)
+ goto Fail;
+ rewind(PropertyList);
+
+ PListBuf = malloc((size_t)PListFileSize);
+ if (!PListBuf)
+ goto Fail;
+
+ size_t NumRead = fread(PListBuf, 1, (size_t)PListFileSize, PropertyList);
+ if (NumRead != (size_t)PListFileSize)
+ goto Fail;
+
+ // Get the file buffer into CF's format. We pass in a null allocator here *
+ // because we free PListBuf ourselves
+ FileContentsRef = (*CFDataCreateWithBytesNoCopyFunc)(
+ NULL, PListBuf, (CFIndex)NumRead, AllocatorNull);
+ if (!FileContentsRef)
+ goto Fail;
+
+ if (CFPropertyListCreateWithDataFunc)
+ PListRef = (*CFPropertyListCreateWithDataFunc)(
+ NULL, FileContentsRef, CF_PROPERTY_LIST_IMMUTABLE, NULL, NULL);
+ else
+ PListRef = (*CFPropertyListCreateFromXMLDataFunc)(
+ NULL, FileContentsRef, CF_PROPERTY_LIST_IMMUTABLE, NULL);
+ if (!PListRef)
+ goto Fail;
+
+ CFStringRef ProductVersion = (*CFStringCreateWithCStringNoCopyFunc)(
+ NULL, "ProductVersion", CF_STRING_ENCODING_ASCII, AllocatorNull);
+ if (!ProductVersion)
+ goto Fail;
+ CFTypeRef OpaqueValue = (*CFDictionaryGetValueFunc)(PListRef, ProductVersion);
+ (*CFReleaseFunc)(ProductVersion);
+ if (!OpaqueValue ||
+ (*CFGetTypeIDFunc)(OpaqueValue) != (*CFStringGetTypeIDFunc)())
+ goto Fail;
+
+ char VersionStr[32];
+ if (!(*CFStringGetCStringFunc)((CFStringRef)OpaqueValue, VersionStr,
+ sizeof(VersionStr), CF_STRING_ENCODING_UTF8))
+ goto Fail;
+ sscanf(VersionStr, "%d.%d.%d", &GlobalMajor, &GlobalMinor, &GlobalSubminor);
+
+Fail:
+ if (PListRef)
+ (*CFReleaseFunc)(PListRef);
+ if (FileContentsRef)
+ (*CFReleaseFunc)(FileContentsRef);
+ free(PListBuf);
+ fclose(PropertyList);
+}
+
+int32_t __isOSVersionAtLeast(int32_t Major, int32_t Minor, int32_t Subminor) {
+ // Populate the global version variables, if they haven't already.
+ dispatch_once_f(&DispatchOnceCounter, NULL, parseSystemVersionPList);
+
+ if (Major < GlobalMajor)
+ return 1;
+ if (Major > GlobalMajor)
+ return 0;
+ if (Minor < GlobalMinor)
+ return 1;
+ if (Minor > GlobalMinor)
+ return 0;
+ return Subminor <= GlobalSubminor;
+}
+
+#else
+
+// Silence an empty translation unit warning.
+typedef int unused;
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/os_version_check.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/paritydi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/paritydi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/paritydi2.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- paritydi2.c - Implement __paritydi2 -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __paritydi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: 1 if number of bits is odd else returns 0
+
+COMPILER_RT_ABI si_int __paritydi2(di_int a) {
+ dwords x;
+ x.all = a;
+ return __paritysi2(x.s.high ^ x.s.low);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/paritydi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/paritysi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/paritysi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/paritysi2.c (revision 351984)
@@ -0,0 +1,23 @@
+//===-- paritysi2.c - Implement __paritysi2 -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __paritysi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: 1 if number of bits is odd else returns 0
+
+COMPILER_RT_ABI si_int __paritysi2(si_int a) {
+ su_int x = (su_int)a;
+ x ^= x >> 16;
+ x ^= x >> 8;
+ x ^= x >> 4;
+ return (0x6996 >> (x & 0xF)) & 1;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/paritysi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/parityti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/parityti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/parityti2.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- parityti2.c - Implement __parityti2 -------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __parityti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: 1 if number of bits is odd else returns 0
+
+COMPILER_RT_ABI si_int __parityti2(ti_int a) {
+ twords x;
+ x.all = a;
+ return __paritydi2(x.s.high ^ x.s.low);
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/parityti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountdi2.c (revision 351984)
@@ -0,0 +1,32 @@
+//===-- popcountdi2.c - Implement __popcountdi2 ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __popcountdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: count of 1 bits
+
+COMPILER_RT_ABI si_int __popcountdi2(di_int a) {
+ du_int x2 = (du_int)a;
+ x2 = x2 - ((x2 >> 1) & 0x5555555555555555uLL);
+ // Every 2 bits holds the sum of every pair of bits (32)
+ x2 = ((x2 >> 2) & 0x3333333333333333uLL) + (x2 & 0x3333333333333333uLL);
+ // Every 4 bits holds the sum of every 4-set of bits (3 significant bits) (16)
+ x2 = (x2 + (x2 >> 4)) & 0x0F0F0F0F0F0F0F0FuLL;
+ // Every 8 bits holds the sum of every 8-set of bits (4 significant bits) (8)
+ su_int x = (su_int)(x2 + (x2 >> 32));
+ // The lower 32 bits hold four 16 bit sums (5 significant bits).
+ // Upper 32 bits are garbage
+ x = x + (x >> 16);
+ // The lower 16 bits hold two 32 bit sums (6 significant bits).
+ // Upper 16 bits are garbage
+ return (x + (x >> 8)) & 0x0000007F; // (7 significant bits)
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountsi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountsi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountsi2.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- popcountsi2.c - Implement __popcountsi2 ---------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __popcountsi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: count of 1 bits
+
+COMPILER_RT_ABI si_int __popcountsi2(si_int a) {
+ su_int x = (su_int)a;
+ x = x - ((x >> 1) & 0x55555555);
+ // Every 2 bits holds the sum of every pair of bits
+ x = ((x >> 2) & 0x33333333) + (x & 0x33333333);
+ // Every 4 bits holds the sum of every 4-set of bits (3 significant bits)
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ // Every 8 bits holds the sum of every 8-set of bits (4 significant bits)
+ x = (x + (x >> 16));
+ // The lower 16 bits hold two 8 bit sums (5 significant bits).
+ // Upper 16 bits are garbage
+ return (x + (x >> 8)) & 0x0000003F; // (6 significant bits)
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountsi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountti2.c (revision 351984)
@@ -0,0 +1,43 @@
+//===-- popcountti2.c - Implement __popcountti2
+//----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __popcountti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: count of 1 bits
+
+COMPILER_RT_ABI si_int __popcountti2(ti_int a) {
+ tu_int x3 = (tu_int)a;
+ x3 = x3 - ((x3 >> 1) &
+ (((tu_int)0x5555555555555555uLL << 64) | 0x5555555555555555uLL));
+ // Every 2 bits holds the sum of every pair of bits (64)
+ x3 = ((x3 >> 2) &
+ (((tu_int)0x3333333333333333uLL << 64) | 0x3333333333333333uLL)) +
+ (x3 & (((tu_int)0x3333333333333333uLL << 64) | 0x3333333333333333uLL));
+ // Every 4 bits holds the sum of every 4-set of bits (3 significant bits) (32)
+ x3 = (x3 + (x3 >> 4)) &
+ (((tu_int)0x0F0F0F0F0F0F0F0FuLL << 64) | 0x0F0F0F0F0F0F0F0FuLL);
+ // Every 8 bits holds the sum of every 8-set of bits (4 significant bits) (16)
+ du_int x2 = (du_int)(x3 + (x3 >> 64));
+ // Every 8 bits holds the sum of every 8-set of bits (5 significant bits) (8)
+ su_int x = (su_int)(x2 + (x2 >> 32));
+ // Every 8 bits holds the sum of every 8-set of bits (6 significant bits) (4)
+ x = x + (x >> 16);
+ // Every 8 bits holds the sum of every 8-set of bits (7 significant bits) (2)
+ //
+ // Upper 16 bits are garbage
+ return (x + (x >> 8)) & 0xFF; // (8 significant bits)
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/popcountti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powidf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powidf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powidf2.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- powidf2.cpp - Implement __powidf2 ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __powidf2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a ^ b
+
+COMPILER_RT_ABI double __powidf2(double a, si_int b) {
+ const int recip = b < 0;
+ double r = 1;
+ while (1) {
+ if (b & 1)
+ r *= a;
+ b /= 2;
+ if (b == 0)
+ break;
+ a *= a;
+ }
+ return recip ? 1 / r : r;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powidf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powisf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powisf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powisf2.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- powisf2.cpp - Implement __powisf2 ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __powisf2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a ^ b
+
+COMPILER_RT_ABI float __powisf2(float a, si_int b) {
+ const int recip = b < 0;
+ float r = 1;
+ while (1) {
+ if (b & 1)
+ r *= a;
+ b /= 2;
+ if (b == 0)
+ break;
+ a *= a;
+ }
+ return recip ? 1 / r : r;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powisf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powitf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powitf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powitf2.c (revision 351984)
@@ -0,0 +1,33 @@
+//===-- powitf2.cpp - Implement __powitf2 ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __powitf2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#if _ARCH_PPC
+
+// Returns: a ^ b
+
+COMPILER_RT_ABI long double __powitf2(long double a, si_int b) {
+ const int recip = b < 0;
+ long double r = 1;
+ while (1) {
+ if (b & 1)
+ r *= a;
+ b /= 2;
+ if (b == 0)
+ break;
+ a *= a;
+ }
+ return recip ? 1 / r : r;
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powitf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powixf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powixf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powixf2.c (revision 351984)
@@ -0,0 +1,33 @@
+//===-- powixf2.cpp - Implement __powixf2 ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __powixf2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#if !_ARCH_PPC
+
+#include "int_lib.h"
+
+// Returns: a ^ b
+
+COMPILER_RT_ABI long double __powixf2(long double a, si_int b) {
+ const int recip = b < 0;
+ long double r = 1;
+ while (1) {
+ if (b & 1)
+ r *= a;
+ b /= 2;
+ if (b == 0)
+ break;
+ a *= a;
+ }
+ return recip ? 1 / r : r;
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/powixf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/DD.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/DD.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/DD.h (revision 351984)
@@ -0,0 +1,45 @@
+#ifndef COMPILERRT_DD_HEADER
+#define COMPILERRT_DD_HEADER
+
+#include "../int_lib.h"
+
+typedef union {
+ long double ld;
+ struct {
+ double hi;
+ double lo;
+ } s;
+} DD;
+
+typedef union {
+ double d;
+ uint64_t x;
+} doublebits;
+
+#define LOWORDER(xy, xHi, xLo, yHi, yLo) \
+ (((((xHi) * (yHi) - (xy)) + (xHi) * (yLo)) + (xLo) * (yHi)) + (xLo) * (yLo))
+
+static __inline ALWAYS_INLINE double local_fabs(double x) {
+ doublebits result = {.d = x};
+ result.x &= UINT64_C(0x7fffffffffffffff);
+ return result.d;
+}
+
+static __inline ALWAYS_INLINE double high26bits(double x) {
+ doublebits result = {.d = x};
+ result.x &= UINT64_C(0xfffffffff8000000);
+ return result.d;
+}
+
+static __inline ALWAYS_INLINE int different_sign(double x, double y) {
+ doublebits xsignbit = {.d = x}, ysignbit = {.d = y};
+ int result = (int)(xsignbit.x >> 63) ^ (int)(ysignbit.x >> 63);
+ return result;
+}
+
+long double __gcc_qadd(long double, long double);
+long double __gcc_qsub(long double, long double);
+long double __gcc_qmul(long double, long double);
+long double __gcc_qdiv(long double, long double);
+
+#endif // COMPILERRT_DD_HEADER
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/DD.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/divtc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/divtc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/divtc3.c (revision 351984)
@@ -0,0 +1,96 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../int_math.h"
+#include "DD.h"
+// Use DOUBLE_PRECISION because the soft-fp method we use is logb (on the upper
+// half of the long doubles), even though this file defines complex division for
+// 128-bit floats.
+#define DOUBLE_PRECISION
+#include "../fp_lib.h"
+
+#if !defined(CRT_INFINITY) && defined(HUGE_VAL)
+#define CRT_INFINITY HUGE_VAL
+#endif // CRT_INFINITY
+
+#define makeFinite(x) \
+ { \
+ (x).s.hi = crt_copysign(crt_isinf((x).s.hi) ? 1.0 : 0.0, (x).s.hi); \
+ (x).s.lo = 0.0; \
+ }
+
+long double _Complex __divtc3(long double a, long double b, long double c,
+ long double d) {
+ DD cDD = {.ld = c};
+ DD dDD = {.ld = d};
+
+ int ilogbw = 0;
+ const double logbw =
+ __compiler_rt_logb(crt_fmax(crt_fabs(cDD.s.hi), crt_fabs(dDD.s.hi)));
+
+ if (crt_isfinite(logbw)) {
+ ilogbw = (int)logbw;
+
+ cDD.s.hi = crt_scalbn(cDD.s.hi, -ilogbw);
+ cDD.s.lo = crt_scalbn(cDD.s.lo, -ilogbw);
+ dDD.s.hi = crt_scalbn(dDD.s.hi, -ilogbw);
+ dDD.s.lo = crt_scalbn(dDD.s.lo, -ilogbw);
+ }
+
+ const long double denom =
+ __gcc_qadd(__gcc_qmul(cDD.ld, cDD.ld), __gcc_qmul(dDD.ld, dDD.ld));
+ const long double realNumerator =
+ __gcc_qadd(__gcc_qmul(a, cDD.ld), __gcc_qmul(b, dDD.ld));
+ const long double imagNumerator =
+ __gcc_qsub(__gcc_qmul(b, cDD.ld), __gcc_qmul(a, dDD.ld));
+
+ DD real = {.ld = __gcc_qdiv(realNumerator, denom)};
+ DD imag = {.ld = __gcc_qdiv(imagNumerator, denom)};
+
+ real.s.hi = crt_scalbn(real.s.hi, -ilogbw);
+ real.s.lo = crt_scalbn(real.s.lo, -ilogbw);
+ imag.s.hi = crt_scalbn(imag.s.hi, -ilogbw);
+ imag.s.lo = crt_scalbn(imag.s.lo, -ilogbw);
+
+ if (crt_isnan(real.s.hi) && crt_isnan(imag.s.hi)) {
+ DD aDD = {.ld = a};
+ DD bDD = {.ld = b};
+ DD rDD = {.ld = denom};
+
+ if ((rDD.s.hi == 0.0) && (!crt_isnan(aDD.s.hi) || !crt_isnan(bDD.s.hi))) {
+ real.s.hi = crt_copysign(CRT_INFINITY, cDD.s.hi) * aDD.s.hi;
+ real.s.lo = 0.0;
+ imag.s.hi = crt_copysign(CRT_INFINITY, cDD.s.hi) * bDD.s.hi;
+ imag.s.lo = 0.0;
+ }
+
+ else if ((crt_isinf(aDD.s.hi) || crt_isinf(bDD.s.hi)) &&
+ crt_isfinite(cDD.s.hi) && crt_isfinite(dDD.s.hi)) {
+ makeFinite(aDD);
+ makeFinite(bDD);
+ real.s.hi = CRT_INFINITY * (aDD.s.hi * cDD.s.hi + bDD.s.hi * dDD.s.hi);
+ real.s.lo = 0.0;
+ imag.s.hi = CRT_INFINITY * (bDD.s.hi * cDD.s.hi - aDD.s.hi * dDD.s.hi);
+ imag.s.lo = 0.0;
+ }
+
+ else if ((crt_isinf(cDD.s.hi) || crt_isinf(dDD.s.hi)) &&
+ crt_isfinite(aDD.s.hi) && crt_isfinite(bDD.s.hi)) {
+ makeFinite(cDD);
+ makeFinite(dDD);
+ real.s.hi =
+ crt_copysign(0.0, (aDD.s.hi * cDD.s.hi + bDD.s.hi * dDD.s.hi));
+ real.s.lo = 0.0;
+ imag.s.hi =
+ crt_copysign(0.0, (bDD.s.hi * cDD.s.hi - aDD.s.hi * dDD.s.hi));
+ imag.s.lo = 0.0;
+ }
+ }
+
+ long double _Complex z;
+ __real__ z = real.ld;
+ __imag__ z = imag.ld;
+
+ return z;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/divtc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixtfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixtfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixtfdi.c (revision 351984)
@@ -0,0 +1,98 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// int64_t __fixunstfdi(long double x);
+// This file implements the PowerPC 128-bit double-double -> int64_t conversion
+
+#include "../int_math.h"
+#include "DD.h"
+
+uint64_t __fixtfdi(long double input) {
+ const DD x = {.ld = input};
+ const doublebits hibits = {.d = x.s.hi};
+
+ const uint32_t absHighWord =
+ (uint32_t)(hibits.x >> 32) & UINT32_C(0x7fffffff);
+ const uint32_t absHighWordMinusOne = absHighWord - UINT32_C(0x3ff00000);
+
+ // If (1.0 - tiny) <= input < 0x1.0p63:
+ if (UINT32_C(0x03f00000) > absHighWordMinusOne) {
+ // Do an unsigned conversion of the absolute value, then restore the sign.
+ const int unbiasedHeadExponent = absHighWordMinusOne >> 20;
+
+ int64_t result = hibits.x & INT64_C(0x000fffffffffffff); // mantissa(hi)
+ result |= INT64_C(0x0010000000000000); // matissa(hi) with implicit bit
+ result <<= 10; // mantissa(hi) with one zero preceding bit.
+
+ const int64_t hiNegationMask = ((int64_t)(hibits.x)) >> 63;
+
+ // If the tail is non-zero, we need to patch in the tail bits.
+ if (0.0 != x.s.lo) {
+ const doublebits lobits = {.d = x.s.lo};
+ int64_t tailMantissa = lobits.x & INT64_C(0x000fffffffffffff);
+ tailMantissa |= INT64_C(0x0010000000000000);
+
+ // At this point we have the mantissa of |tail|
+ // We need to negate it if head and tail have different signs.
+ const int64_t loNegationMask = ((int64_t)(lobits.x)) >> 63;
+ const int64_t negationMask = loNegationMask ^ hiNegationMask;
+ tailMantissa = (tailMantissa ^ negationMask) - negationMask;
+
+ // Now we have the mantissa of tail as a signed 2s-complement integer
+
+ const int biasedTailExponent = (int)(lobits.x >> 52) & 0x7ff;
+
+ // Shift the tail mantissa into the right position, accounting for the
+ // bias of 10 that we shifted the head mantissa by.
+ tailMantissa >>=
+ (unbiasedHeadExponent - (biasedTailExponent - (1023 - 10)));
+
+ result += tailMantissa;
+ }
+
+ result >>= (62 - unbiasedHeadExponent);
+
+ // Restore the sign of the result and return
+ result = (result ^ hiNegationMask) - hiNegationMask;
+ return result;
+ }
+
+ // Edge cases handled here:
+
+ // |x| < 1, result is zero.
+ if (1.0 > crt_fabs(x.s.hi))
+ return INT64_C(0);
+
+ // x very close to INT64_MIN, care must be taken to see which side we are on.
+ if (x.s.hi == -0x1.0p63) {
+
+ int64_t result = INT64_MIN;
+
+ if (0.0 < x.s.lo) {
+ // If the tail is positive, the correct result is something other than
+ // INT64_MIN. we'll need to figure out what it is.
+
+ const doublebits lobits = {.d = x.s.lo};
+ int64_t tailMantissa = lobits.x & INT64_C(0x000fffffffffffff);
+ tailMantissa |= INT64_C(0x0010000000000000);
+
+ // Now we negate the tailMantissa
+ tailMantissa = (tailMantissa ^ INT64_C(-1)) + INT64_C(1);
+
+ // And shift it by the appropriate amount
+ const int biasedTailExponent = (int)(lobits.x >> 52) & 0x7ff;
+ tailMantissa >>= 1075 - biasedTailExponent;
+
+ result -= tailMantissa;
+ }
+
+ return result;
+ }
+
+ // Signed overflows, infinities, and NaNs
+ if (x.s.hi > 0.0)
+ return INT64_MAX;
+ else
+ return INT64_MIN;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixtfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixunstfdi.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixunstfdi.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixunstfdi.c (revision 351984)
@@ -0,0 +1,57 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// uint64_t __fixunstfdi(long double x);
+// This file implements the PowerPC 128-bit double-double -> uint64_t conversion
+
+#include "DD.h"
+
+uint64_t __fixunstfdi(long double input) {
+ const DD x = {.ld = input};
+ const doublebits hibits = {.d = x.s.hi};
+
+ const uint32_t highWordMinusOne =
+ (uint32_t)(hibits.x >> 32) - UINT32_C(0x3ff00000);
+
+ // If (1.0 - tiny) <= input < 0x1.0p64:
+ if (UINT32_C(0x04000000) > highWordMinusOne) {
+ const int unbiasedHeadExponent = highWordMinusOne >> 20;
+
+ uint64_t result = hibits.x & UINT64_C(0x000fffffffffffff); // mantissa(hi)
+ result |= UINT64_C(0x0010000000000000); // matissa(hi) with implicit bit
+ result <<= 11; // mantissa(hi) left aligned in the int64 field.
+
+ // If the tail is non-zero, we need to patch in the tail bits.
+ if (0.0 != x.s.lo) {
+ const doublebits lobits = {.d = x.s.lo};
+ int64_t tailMantissa = lobits.x & INT64_C(0x000fffffffffffff);
+ tailMantissa |= INT64_C(0x0010000000000000);
+
+ // At this point we have the mantissa of |tail|
+
+ const int64_t negationMask = ((int64_t)(lobits.x)) >> 63;
+ tailMantissa = (tailMantissa ^ negationMask) - negationMask;
+
+ // Now we have the mantissa of tail as a signed 2s-complement integer
+
+ const int biasedTailExponent = (int)(lobits.x >> 52) & 0x7ff;
+
+ // Shift the tail mantissa into the right position, accounting for the
+ // bias of 11 that we shifted the head mantissa by.
+ tailMantissa >>=
+ (unbiasedHeadExponent - (biasedTailExponent - (1023 - 11)));
+
+ result += tailMantissa;
+ }
+
+ result >>= (63 - unbiasedHeadExponent);
+ return result;
+ }
+
+ // Edge cases are handled here, with saturation.
+ if (1.0 > x.s.hi)
+ return UINT64_C(0);
+ else
+ return UINT64_MAX;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixunstfdi.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixunstfti.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixunstfti.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/fixunstfti.c (revision 351984)
@@ -0,0 +1,105 @@
+//===-- lib/builtins/ppc/fixunstfti.c - Convert long double->int128 *-C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements converting the 128bit IBM/PowerPC long double (double-
+// double) data type to an unsigned 128 bit integer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../int_math.h"
+#define BIAS 1023
+
+// Convert long double into an unsigned 128-bit integer.
+__uint128_t __fixunstfti(long double input) {
+
+ // If we are trying to convert a NaN, return the NaN bit pattern.
+ if (crt_isnan(input)) {
+ return ((__uint128_t)0x7FF8000000000000ll) << 64 |
+ (__uint128_t)0x0000000000000000ll;
+ }
+
+ __uint128_t result, hiResult, loResult;
+ int hiExponent, loExponent, shift;
+ // The long double representation, with the high and low portions of
+ // the long double, and the corresponding bit patterns of each double.
+ union {
+ long double ld;
+ double d[2]; // [0] is the high double, [1] is the low double.
+ unsigned long long ull[2]; // High and low doubles as 64-bit integers.
+ } ldUnion;
+
+ // If the long double is less than 1.0 or negative,
+ // return 0.0.
+ if (input < 1.0)
+ return 0.0;
+
+ // Retrieve the 64-bit patterns of high and low doubles.
+ // Compute the unbiased exponent of both high and low doubles by
+ // removing the signs, isolating the exponent, and subtracting
+ // the bias from it.
+ ldUnion.ld = input;
+ hiExponent = ((ldUnion.ull[0] & 0x7FFFFFFFFFFFFFFFll) >> 52) - BIAS;
+ loExponent = ((ldUnion.ull[1] & 0x7FFFFFFFFFFFFFFFll) >> 52) - BIAS;
+
+ // Convert each double into int64; they will be added to the int128 result.
+ // CASE 1: High or low double fits in int64
+ // - Convert the each double normally into int64.
+ //
+ // CASE 2: High or low double does not fit in int64
+ // - Scale the double to fit within a 64-bit integer
+ // - Calculate the shift (amount to scale the double by in the int128)
+ // - Clear all the bits of the exponent (with 0x800FFFFFFFFFFFFF)
+ // - Add BIAS+53 (0x4350000000000000) to exponent to correct the value
+ // - Scale (move) the double to the correct place in the int128
+ // (Move it by 2^53 places)
+ //
+ // Note: If the high double is assumed to be positive, an unsigned conversion
+ // from long double to 64-bit integer is needed. The low double can be either
+ // positive or negative, so a signed conversion is needed to retain the result
+ // of the low double and to ensure it does not simply get converted to 0.
+
+ // CASE 1 - High double fits in int64.
+ if (hiExponent < 63) {
+ hiResult = (unsigned long long)ldUnion.d[0];
+ } else if (hiExponent < 128) {
+ // CASE 2 - High double does not fit in int64, scale and convert it.
+ shift = hiExponent - 54;
+ ldUnion.ull[0] &= 0x800FFFFFFFFFFFFFll;
+ ldUnion.ull[0] |= 0x4350000000000000ll;
+ hiResult = (unsigned long long)ldUnion.d[0];
+ hiResult <<= shift;
+ } else {
+ // Detect cases for overflow. When the exponent of the high
+ // double is greater than 128 bits and when the long double
+ // input is positive, return the max 128-bit integer.
+ // For negative inputs with exponents > 128, return 1, like gcc.
+ if (ldUnion.d[0] > 0) {
+ return ((__uint128_t)0xFFFFFFFFFFFFFFFFll) << 64 |
+ (__uint128_t)0xFFFFFFFFFFFFFFFFll;
+ } else {
+ return ((__uint128_t)0x0000000000000000ll) << 64 |
+ (__uint128_t)0x0000000000000001ll;
+ }
+ }
+
+ // CASE 1 - Low double fits in int64.
+ if (loExponent < 63) {
+ loResult = (long long)ldUnion.d[1];
+ } else {
+ // CASE 2 - Low double does not fit in int64, scale and convert it.
+ shift = loExponent - 54;
+ ldUnion.ull[1] &= 0x800FFFFFFFFFFFFFll;
+ ldUnion.ull[1] |= 0x4350000000000000ll;
+ loResult = (long long)ldUnion.d[1];
+ loResult <<= shift;
+ }
+
+ // Add the high and low doublewords together to form a 128 bit integer.
+ result = loResult + hiResult;
+ return result;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floatditf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floatditf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floatditf.c (revision 351984)
@@ -0,0 +1,33 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __floatditf(long long x);
+// This file implements the PowerPC long long -> long double conversion
+
+#include "DD.h"
+
+long double __floatditf(int64_t a) {
+
+ static const double twop32 = 0x1.0p32;
+ static const double twop52 = 0x1.0p52;
+
+ doublebits low = {.d = twop52};
+ low.x |= a & UINT64_C(0x00000000ffffffff); // 0x1.0p52 + low 32 bits of a.
+
+ const double high_addend = (double)((int32_t)(a >> 32)) * twop32 - twop52;
+
+ // At this point, we have two double precision numbers
+ // high_addend and low.d, and we wish to return their sum
+ // as a canonicalized long double:
+
+ // This implementation sets the inexact flag spuriously.
+ // This could be avoided, but at some substantial cost.
+
+ DD result;
+
+ result.s.hi = high_addend + low.d;
+ result.s.lo = (high_addend - result.s.hi) + low.d;
+
+ return result.ld;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floatditf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floattitf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floattitf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floattitf.c (revision 351984)
@@ -0,0 +1,46 @@
+//===-- lib/builtins/ppc/floattitf.c - Convert int128->long double -*-C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements converting a signed 128 bit integer to a 128bit IBM /
+// PowerPC long double (double-double) value.
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdint.h>
+
+// Conversions from signed and unsigned 64-bit int to long double.
+long double __floatditf(int64_t);
+long double __floatunditf(uint64_t);
+
+// Convert a signed 128-bit integer to long double.
+// This uses the following property: Let hi and lo be 64-bits each,
+// and let signed_val_k() and unsigned_val_k() be the value of the
+// argument interpreted as a signed or unsigned k-bit integer. Then,
+//
+// signed_val_128(hi,lo) = signed_val_64(hi) * 2^64 + unsigned_val_64(lo)
+// = (long double)hi * 2^64 + (long double)lo,
+//
+// where (long double)hi and (long double)lo are signed and
+// unsigned 64-bit integer to long double conversions, respectively.
+long double __floattitf(__int128_t arg) {
+ // Split the int128 argument into 64-bit high and low int64 parts.
+ int64_t ArgHiPart = (int64_t)(arg >> 64);
+ uint64_t ArgLoPart = (uint64_t)arg;
+
+ // Convert each 64-bit part into long double. The high part
+ // must be a signed conversion and the low part an unsigned conversion
+ // to ensure the correct result.
+ long double ConvertedHiPart = __floatditf(ArgHiPart);
+ long double ConvertedLoPart = __floatunditf(ArgLoPart);
+
+ // The low bit of ArgHiPart corresponds to the 2^64 bit in arg.
+ // Multiply the high part by 2^64 to undo the right shift by 64-bits
+ // done in the splitting. Then, add to the low part to obtain the
+ // final result.
+ return ((ConvertedHiPart * 0x1.0p64) + ConvertedLoPart);
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floatunditf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floatunditf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floatunditf.c (revision 351984)
@@ -0,0 +1,39 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __floatunditf(unsigned long long x);
+// This file implements the PowerPC unsigned long long -> long double conversion
+
+#include "DD.h"
+
+long double __floatunditf(uint64_t a) {
+
+ // Begins with an exact copy of the code from __floatundidf
+
+ static const double twop52 = 0x1.0p52;
+ static const double twop84 = 0x1.0p84;
+ static const double twop84_plus_twop52 = 0x1.00000001p84;
+
+ doublebits high = {.d = twop84};
+ doublebits low = {.d = twop52};
+
+ high.x |= a >> 32; // 0x1.0p84 + high 32 bits of a
+ low.x |= a & UINT64_C(0x00000000ffffffff); // 0x1.0p52 + low 32 bits of a
+
+ const double high_addend = high.d - twop84_plus_twop52;
+
+ // At this point, we have two double precision numbers
+ // high_addend and low.d, and we wish to return their sum
+ // as a canonicalized long double:
+
+ // This implementation sets the inexact flag spuriously.
+ // This could be avoided, but at some substantial cost.
+
+ DD result;
+
+ result.s.hi = high_addend + low.d;
+ result.s.lo = (high_addend - result.s.hi) + low.d;
+
+ return result.ld;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/floatunditf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qadd.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qadd.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qadd.c (revision 351984)
@@ -0,0 +1,74 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __gcc_qadd(long double x, long double y);
+// This file implements the PowerPC 128-bit double-double add operation.
+// This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
+
+#include "DD.h"
+
+long double __gcc_qadd(long double x, long double y) {
+ static const uint32_t infinityHi = UINT32_C(0x7ff00000);
+
+ DD dst = {.ld = x}, src = {.ld = y};
+
+ register double A = dst.s.hi, a = dst.s.lo, B = src.s.hi, b = src.s.lo;
+
+ // If both operands are zero:
+ if ((A == 0.0) && (B == 0.0)) {
+ dst.s.hi = A + B;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // If either operand is NaN or infinity:
+ const doublebits abits = {.d = A};
+ const doublebits bbits = {.d = B};
+ if ((((uint32_t)(abits.x >> 32) & infinityHi) == infinityHi) ||
+ (((uint32_t)(bbits.x >> 32) & infinityHi) == infinityHi)) {
+ dst.s.hi = A + B;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // If the computation overflows:
+ // This may be playing things a little bit fast and loose, but it will do for
+ // a start.
+ const double testForOverflow = A + (B + (a + b));
+ const doublebits testbits = {.d = testForOverflow};
+ if (((uint32_t)(testbits.x >> 32) & infinityHi) == infinityHi) {
+ dst.s.hi = testForOverflow;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ double H, h;
+ double T, t;
+ double W, w;
+ double Y;
+
+ H = B + (A - (A + B));
+ T = b + (a - (a + b));
+ h = A + (B - (A + B));
+ t = a + (b - (a + b));
+
+ if (local_fabs(A) <= local_fabs(B))
+ w = (a + b) + h;
+ else
+ w = (a + b) + H;
+
+ W = (A + B) + w;
+ Y = (A + B) - W;
+ Y += w;
+
+ if (local_fabs(a) <= local_fabs(b))
+ w = t + Y;
+ else
+ w = T + Y;
+
+ dst.s.hi = Y = W + w;
+ dst.s.lo = (W - Y) + w;
+
+ return dst.ld;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qadd.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qdiv.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qdiv.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qdiv.c (revision 351984)
@@ -0,0 +1,52 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __gcc_qdiv(long double x, long double y);
+// This file implements the PowerPC 128-bit double-double division operation.
+// This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
+
+#include "DD.h"
+
+long double __gcc_qdiv(long double a, long double b) {
+ static const uint32_t infinityHi = UINT32_C(0x7ff00000);
+ DD dst = {.ld = a}, src = {.ld = b};
+
+ register double x = dst.s.hi, x1 = dst.s.lo, y = src.s.hi, y1 = src.s.lo;
+
+ double yHi, yLo, qHi, qLo;
+ double yq, tmp, q;
+
+ q = x / y;
+
+ // Detect special cases
+ if (q == 0.0) {
+ dst.s.hi = q;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ const doublebits qBits = {.d = q};
+ if (((uint32_t)(qBits.x >> 32) & infinityHi) == infinityHi) {
+ dst.s.hi = q;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ yHi = high26bits(y);
+ qHi = high26bits(q);
+
+ yq = y * q;
+ yLo = y - yHi;
+ qLo = q - qHi;
+
+ tmp = LOWORDER(yq, yHi, yLo, qHi, qLo);
+ tmp = (x - yq) - tmp;
+ tmp = ((tmp + x1) - y1 * q) / y;
+ x = q + tmp;
+
+ dst.s.lo = (q - x) + tmp;
+ dst.s.hi = x;
+
+ return dst.ld;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qdiv.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qmul.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qmul.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qmul.c (revision 351984)
@@ -0,0 +1,50 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __gcc_qmul(long double x, long double y);
+// This file implements the PowerPC 128-bit double-double multiply operation.
+// This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
+
+#include "DD.h"
+
+long double __gcc_qmul(long double x, long double y) {
+ static const uint32_t infinityHi = UINT32_C(0x7ff00000);
+ DD dst = {.ld = x}, src = {.ld = y};
+
+ register double A = dst.s.hi, a = dst.s.lo, B = src.s.hi, b = src.s.lo;
+
+ double aHi, aLo, bHi, bLo;
+ double ab, tmp, tau;
+
+ ab = A * B;
+
+ // Detect special cases
+ if (ab == 0.0) {
+ dst.s.hi = ab;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ const doublebits abBits = {.d = ab};
+ if (((uint32_t)(abBits.x >> 32) & infinityHi) == infinityHi) {
+ dst.s.hi = ab;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // Generic cases handled here.
+ aHi = high26bits(A);
+ bHi = high26bits(B);
+ aLo = A - aHi;
+ bLo = B - bHi;
+
+ tmp = LOWORDER(ab, aHi, aLo, bHi, bLo);
+ tmp += (A * b + a * B);
+ tau = ab + tmp;
+
+ dst.s.lo = (ab - tau) + tmp;
+ dst.s.hi = tau;
+
+ return dst.ld;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qmul.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qsub.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qsub.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qsub.c (revision 351984)
@@ -0,0 +1,74 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __gcc_qsub(long double x, long double y);
+// This file implements the PowerPC 128-bit double-double add operation.
+// This implementation is shamelessly cribbed from Apple's DDRT, circa 1993(!)
+
+#include "DD.h"
+
+long double __gcc_qsub(long double x, long double y) {
+ static const uint32_t infinityHi = UINT32_C(0x7ff00000);
+
+ DD dst = {.ld = x}, src = {.ld = y};
+
+ register double A = dst.s.hi, a = dst.s.lo, B = -src.s.hi, b = -src.s.lo;
+
+ // If both operands are zero:
+ if ((A == 0.0) && (B == 0.0)) {
+ dst.s.hi = A + B;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // If either operand is NaN or infinity:
+ const doublebits abits = {.d = A};
+ const doublebits bbits = {.d = B};
+ if ((((uint32_t)(abits.x >> 32) & infinityHi) == infinityHi) ||
+ (((uint32_t)(bbits.x >> 32) & infinityHi) == infinityHi)) {
+ dst.s.hi = A + B;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ // If the computation overflows:
+ // This may be playing things a little bit fast and loose, but it will do for
+ // a start.
+ const double testForOverflow = A + (B + (a + b));
+ const doublebits testbits = {.d = testForOverflow};
+ if (((uint32_t)(testbits.x >> 32) & infinityHi) == infinityHi) {
+ dst.s.hi = testForOverflow;
+ dst.s.lo = 0.0;
+ return dst.ld;
+ }
+
+ double H, h;
+ double T, t;
+ double W, w;
+ double Y;
+
+ H = B + (A - (A + B));
+ T = b + (a - (a + b));
+ h = A + (B - (A + B));
+ t = a + (b - (a + b));
+
+ if (local_fabs(A) <= local_fabs(B))
+ w = (a + b) + h;
+ else
+ w = (a + b) + H;
+
+ W = (A + B) + w;
+ Y = (A + B) - W;
+ Y += w;
+
+ if (local_fabs(a) <= local_fabs(b))
+ w = t + Y;
+ else
+ w = T + Y;
+
+ dst.s.hi = Y = W + w;
+ dst.s.lo = (W - Y) + w;
+
+ return dst.ld;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/gcc_qsub.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/multc3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/multc3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/multc3.c (revision 351984)
@@ -0,0 +1,85 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../int_math.h"
+#include "DD.h"
+
+#define makeFinite(x) \
+ { \
+ (x).s.hi = crt_copysign(crt_isinf((x).s.hi) ? 1.0 : 0.0, (x).s.hi); \
+ (x).s.lo = 0.0; \
+ }
+
+#define zeroNaN(x) \
+ { \
+ if (crt_isnan((x).s.hi)) { \
+ (x).s.hi = crt_copysign(0.0, (x).s.hi); \
+ (x).s.lo = 0.0; \
+ } \
+ }
+
+long double _Complex __multc3(long double a, long double b, long double c,
+ long double d) {
+ long double ac = __gcc_qmul(a, c);
+ long double bd = __gcc_qmul(b, d);
+ long double ad = __gcc_qmul(a, d);
+ long double bc = __gcc_qmul(b, c);
+
+ DD real = {.ld = __gcc_qsub(ac, bd)};
+ DD imag = {.ld = __gcc_qadd(ad, bc)};
+
+ if (crt_isnan(real.s.hi) && crt_isnan(imag.s.hi)) {
+ int recalc = 0;
+
+ DD aDD = {.ld = a};
+ DD bDD = {.ld = b};
+ DD cDD = {.ld = c};
+ DD dDD = {.ld = d};
+
+ if (crt_isinf(aDD.s.hi) || crt_isinf(bDD.s.hi)) {
+ makeFinite(aDD);
+ makeFinite(bDD);
+ zeroNaN(cDD);
+ zeroNaN(dDD);
+ recalc = 1;
+ }
+
+ if (crt_isinf(cDD.s.hi) || crt_isinf(dDD.s.hi)) {
+ makeFinite(cDD);
+ makeFinite(dDD);
+ zeroNaN(aDD);
+ zeroNaN(bDD);
+ recalc = 1;
+ }
+
+ if (!recalc) {
+ DD acDD = {.ld = ac};
+ DD bdDD = {.ld = bd};
+ DD adDD = {.ld = ad};
+ DD bcDD = {.ld = bc};
+
+ if (crt_isinf(acDD.s.hi) || crt_isinf(bdDD.s.hi) ||
+ crt_isinf(adDD.s.hi) || crt_isinf(bcDD.s.hi)) {
+ zeroNaN(aDD);
+ zeroNaN(bDD);
+ zeroNaN(cDD);
+ zeroNaN(dDD);
+ recalc = 1;
+ }
+ }
+
+ if (recalc) {
+ real.s.hi = CRT_INFINITY * (aDD.s.hi * cDD.s.hi - bDD.s.hi * dDD.s.hi);
+ real.s.lo = 0.0;
+ imag.s.hi = CRT_INFINITY * (aDD.s.hi * dDD.s.hi + bDD.s.hi * cDD.s.hi);
+ imag.s.lo = 0.0;
+ }
+ }
+
+ long double _Complex z;
+ __real__ z = real.ld;
+ __imag__ z = imag.ld;
+
+ return z;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/multc3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/restFP.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/restFP.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/restFP.S (revision 351984)
@@ -0,0 +1,45 @@
+//===-- restFP.S - Implement restFP ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// Helper function used by compiler to restore ppc floating point registers at
+// the end of the function epilog. This function returns to the address
+// in the LR slot. So a function epilog must branch (b) not branch and link
+// (bl) to this function.
+// If the compiler wants to restore f27..f31, it does a "b restFP+52"
+//
+// This function should never be exported by a shared library. Each linkage
+// unit carries its own copy of this function.
+//
+DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(restFP)
+ lfd f14,-144(r1)
+ lfd f15,-136(r1)
+ lfd f16,-128(r1)
+ lfd f17,-120(r1)
+ lfd f18,-112(r1)
+ lfd f19,-104(r1)
+ lfd f20,-96(r1)
+ lfd f21,-88(r1)
+ lfd f22,-80(r1)
+ lfd f23,-72(r1)
+ lfd f24,-64(r1)
+ lfd f25,-56(r1)
+ lfd f26,-48(r1)
+ lfd f27,-40(r1)
+ lfd f28,-32(r1)
+ lfd f29,-24(r1)
+ lfd f30,-16(r1)
+ lfd f31,-8(r1)
+ lwz r0,8(r1)
+ mtlr r0
+ blr
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/restFP.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/saveFP.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/saveFP.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/saveFP.S (revision 351984)
@@ -0,0 +1,42 @@
+//===-- saveFP.S - Implement saveFP ---------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+//
+// Helper function used by compiler to save ppc floating point registers in
+// function prologs. This routines also saves r0 in the LR slot.
+// If the compiler wants to save f27..f31, it does a "bl saveFP+52"
+//
+// This function should never be exported by a shared library. Each linkage
+// unit carries its own copy of this function.
+//
+DEFINE_COMPILERRT_PRIVATE_FUNCTION_UNMANGLED(saveFP)
+ stfd f14,-144(r1)
+ stfd f15,-136(r1)
+ stfd f16,-128(r1)
+ stfd f17,-120(r1)
+ stfd f18,-112(r1)
+ stfd f19,-104(r1)
+ stfd f20,-96(r1)
+ stfd f21,-88(r1)
+ stfd f22,-80(r1)
+ stfd f23,-72(r1)
+ stfd f24,-64(r1)
+ stfd f25,-56(r1)
+ stfd f26,-48(r1)
+ stfd f27,-40(r1)
+ stfd f28,-32(r1)
+ stfd f29,-24(r1)
+ stfd f30,-16(r1)
+ stfd f31,-8(r1)
+ stw r0,8(r1)
+ blr
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ppc/saveFP.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/riscv/mulsi3.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/riscv/mulsi3.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/riscv/mulsi3.S (revision 351984)
@@ -0,0 +1,27 @@
+//===--- mulsi3.S - Integer multiplication routines routines ---===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#if !defined(__riscv_mul) && __riscv_xlen == 32
+ .text
+ .align 2
+
+ .globl __mulsi3
+ .type __mulsi3, @function
+__mulsi3:
+ mv a2, a0
+ mv a0, zero
+.L1:
+ andi a3, a1, 1
+ beqz a3, .L2
+ add a0, a0, a2
+.L2:
+ srli a1, a1, 1
+ slli a2, a2, 1
+ bnez a1, .L1
+ ret
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/riscv/mulsi3.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subdf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subdf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subdf3.c (revision 351984)
@@ -0,0 +1,28 @@
+//===-- lib/adddf3.c - Double-precision subtraction ---------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements double-precision soft-float subtraction with the
+// IEEE-754 default rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#define DOUBLE_PRECISION
+#include "fp_lib.h"
+
+// Subtraction; flip the sign bit of b and add.
+COMPILER_RT_ABI fp_t __subdf3(fp_t a, fp_t b) {
+ return __adddf3(a, fromRep(toRep(b) ^ signBit));
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_dsub(fp_t a, fp_t b) { return __subdf3(a, b); }
+#else
+COMPILER_RT_ALIAS(__subdf3, __aeabi_dsub)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subdf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subsf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subsf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subsf3.c (revision 351984)
@@ -0,0 +1,28 @@
+//===-- lib/subsf3.c - Single-precision subtraction ---------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements single-precision soft-float subtraction with the
+// IEEE-754 default rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#define SINGLE_PRECISION
+#include "fp_lib.h"
+
+// Subtraction; flip the sign bit of b and add.
+COMPILER_RT_ABI fp_t __subsf3(fp_t a, fp_t b) {
+ return __addsf3(a, fromRep(toRep(b) ^ signBit));
+}
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI fp_t __aeabi_fsub(fp_t a, fp_t b) { return __subsf3(a, b); }
+#else
+COMPILER_RT_ALIAS(__subsf3, __aeabi_fsub)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subsf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subtf3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subtf3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subtf3.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- lib/subtf3.c - Quad-precision subtraction -----------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements quad-precision soft-float subtraction with the
+// IEEE-754 default rounding (to nearest, ties to even).
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+COMPILER_RT_ABI fp_t __addtf3(fp_t a, fp_t b);
+
+// Subtraction; flip the sign bit of b and add.
+COMPILER_RT_ABI fp_t __subtf3(fp_t a, fp_t b) {
+ return __addtf3(a, fromRep(toRep(b) ^ signBit));
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subtf3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvdi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvdi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvdi3.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- subvdi3.c - Implement __subvdi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __subvdi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a - b
+
+// Effects: aborts if a - b overflows
+
+COMPILER_RT_ABI di_int __subvdi3(di_int a, di_int b) {
+ di_int s = (du_int)a - (du_int)b;
+ if (b >= 0) {
+ if (s > a)
+ compilerrt_abort();
+ } else {
+ if (s <= a)
+ compilerrt_abort();
+ }
+ return s;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvdi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvsi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvsi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvsi3.c (revision 351984)
@@ -0,0 +1,29 @@
+//===-- subvsi3.c - Implement __subvsi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __subvsi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a - b
+
+// Effects: aborts if a - b overflows
+
+COMPILER_RT_ABI si_int __subvsi3(si_int a, si_int b) {
+ si_int s = (su_int)a - (su_int)b;
+ if (b >= 0) {
+ if (s > a)
+ compilerrt_abort();
+ } else {
+ if (s <= a)
+ compilerrt_abort();
+ }
+ return s;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvsi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvti3.c (revision 351984)
@@ -0,0 +1,33 @@
+//===-- subvti3.c - Implement __subvti3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __subvti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a - b
+
+// Effects: aborts if a - b overflows
+
+COMPILER_RT_ABI ti_int __subvti3(ti_int a, ti_int b) {
+ ti_int s = (tu_int)a - (tu_int)b;
+ if (b >= 0) {
+ if (s > a)
+ compilerrt_abort();
+ } else {
+ if (s <= a)
+ compilerrt_abort();
+ }
+ return s;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/subvti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trampoline_setup.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trampoline_setup.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trampoline_setup.c (revision 351984)
@@ -0,0 +1,43 @@
+//===----- trampoline_setup.c - Implement __trampoline_setup -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+extern void __clear_cache(void *start, void *end);
+
+// The ppc compiler generates calls to __trampoline_setup() when creating
+// trampoline functions on the stack for use with nested functions.
+// This function creates a custom 40-byte trampoline function on the stack
+// which loads r11 with a pointer to the outer function's locals
+// and then jumps to the target nested function.
+
+#if __ppc__ && !defined(__powerpc64__)
+COMPILER_RT_ABI void __trampoline_setup(uint32_t *trampOnStack,
+ int trampSizeAllocated,
+ const void *realFunc, void *localsPtr) {
+ // should never happen, but if compiler did not allocate
+ // enough space on stack for the trampoline, abort
+ if (trampSizeAllocated < 40)
+ compilerrt_abort();
+
+ // create trampoline
+ trampOnStack[0] = 0x7c0802a6; // mflr r0
+ trampOnStack[1] = 0x4800000d; // bl Lbase
+ trampOnStack[2] = (uint32_t)realFunc;
+ trampOnStack[3] = (uint32_t)localsPtr;
+ trampOnStack[4] = 0x7d6802a6; // Lbase: mflr r11
+ trampOnStack[5] = 0x818b0000; // lwz r12,0(r11)
+ trampOnStack[6] = 0x7c0803a6; // mtlr r0
+ trampOnStack[7] = 0x7d8903a6; // mtctr r12
+ trampOnStack[8] = 0x816b0004; // lwz r11,4(r11)
+ trampOnStack[9] = 0x4e800420; // bctr
+
+ // clear instruction cache
+ __clear_cache(trampOnStack, &trampOnStack[10]);
+}
+#endif // __ppc__ && !defined(__powerpc64__)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trampoline_setup.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncdfhf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncdfhf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncdfhf2.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- lib/truncdfhf2.c - double -> half conversion --------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SRC_DOUBLE
+#define DST_HALF
+#include "fp_trunc_impl.inc"
+
+COMPILER_RT_ABI uint16_t __truncdfhf2(double a) { return __truncXfYf2__(a); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI uint16_t __aeabi_d2h(double a) { return __truncdfhf2(a); }
+#else
+COMPILER_RT_ALIAS(__truncdfhf2, __aeabi_d2h)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncdfhf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncdfsf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncdfsf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncdfsf2.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- lib/truncdfsf2.c - double -> single conversion ------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SRC_DOUBLE
+#define DST_SINGLE
+#include "fp_trunc_impl.inc"
+
+COMPILER_RT_ABI float __truncdfsf2(double a) { return __truncXfYf2__(a); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI float __aeabi_d2f(double a) { return __truncdfsf2(a); }
+#else
+COMPILER_RT_ALIAS(__truncdfsf2, __aeabi_d2f)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncdfsf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncsfhf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncsfhf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncsfhf2.c (revision 351984)
@@ -0,0 +1,27 @@
+//===-- lib/truncsfhf2.c - single -> half conversion --------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define SRC_SINGLE
+#define DST_HALF
+#include "fp_trunc_impl.inc"
+
+// Use a forwarding definition and noinline to implement a poor man's alias,
+// as there isn't a good cross-platform way of defining one.
+COMPILER_RT_ABI NOINLINE uint16_t __truncsfhf2(float a) {
+ return __truncXfYf2__(a);
+}
+
+COMPILER_RT_ABI uint16_t __gnu_f2h_ieee(float a) { return __truncsfhf2(a); }
+
+#if defined(__ARM_EABI__)
+#if defined(COMPILER_RT_ARMHF_TARGET)
+AEABI_RTABI uint16_t __aeabi_f2h(float a) { return __truncsfhf2(a); }
+#else
+COMPILER_RT_ALIAS(__truncsfhf2, __aeabi_f2h)
+#endif
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/truncsfhf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trunctfdf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trunctfdf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trunctfdf2.c (revision 351984)
@@ -0,0 +1,19 @@
+//===-- lib/truncdfsf2.c - quad -> double conversion --------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#define SRC_QUAD
+#define DST_DOUBLE
+#include "fp_trunc_impl.inc"
+
+COMPILER_RT_ABI double __trunctfdf2(long double a) { return __truncXfYf2__(a); }
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trunctfdf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trunctfsf2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trunctfsf2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trunctfsf2.c (revision 351984)
@@ -0,0 +1,19 @@
+//===-- lib/trunctfsf2.c - quad -> single conversion --------------*- C -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#define QUAD_PRECISION
+#include "fp_lib.h"
+
+#if defined(CRT_HAS_128BIT) && defined(CRT_LDBL_128BIT)
+#define SRC_QUAD
+#define DST_SINGLE
+#include "fp_trunc_impl.inc"
+
+COMPILER_RT_ABI float __trunctfsf2(long double a) { return __truncXfYf2__(a); }
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/trunctfsf2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ucmpdi2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ucmpdi2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ucmpdi2.c (revision 351984)
@@ -0,0 +1,42 @@
+//===-- ucmpdi2.c - Implement __ucmpdi2 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ucmpdi2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: if (a < b) returns 0
+// if (a == b) returns 1
+// if (a > b) returns 2
+
+COMPILER_RT_ABI si_int __ucmpdi2(du_int a, du_int b) {
+ udwords x;
+ x.all = a;
+ udwords y;
+ y.all = b;
+ if (x.s.high < y.s.high)
+ return 0;
+ if (x.s.high > y.s.high)
+ return 2;
+ if (x.s.low < y.s.low)
+ return 0;
+ if (x.s.low > y.s.low)
+ return 2;
+ return 1;
+}
+
+#ifdef __ARM_EABI__
+// Returns: if (a < b) returns -1
+// if (a == b) returns 0
+// if (a > b) returns 1
+COMPILER_RT_ABI si_int __aeabi_ulcmp(di_int a, di_int b) {
+ return __ucmpdi2(a, b) - 1;
+}
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ucmpdi2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ucmpti2.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ucmpti2.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ucmpti2.c (revision 351984)
@@ -0,0 +1,37 @@
+//===-- ucmpti2.c - Implement __ucmpti2 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __ucmpti2 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: if (a < b) returns 0
+// if (a == b) returns 1
+// if (a > b) returns 2
+
+COMPILER_RT_ABI si_int __ucmpti2(tu_int a, tu_int b) {
+ utwords x;
+ x.all = a;
+ utwords y;
+ y.all = b;
+ if (x.s.high < y.s.high)
+ return 0;
+ if (x.s.high > y.s.high)
+ return 2;
+ if (x.s.low < y.s.low)
+ return 0;
+ if (x.s.low > y.s.low)
+ return 2;
+ return 1;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/ucmpti2.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivdi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivdi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivdi3.c (revision 351984)
@@ -0,0 +1,19 @@
+//===-- udivdi3.c - Implement __udivdi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __udivdi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a / b
+
+COMPILER_RT_ABI du_int __udivdi3(du_int a, du_int b) {
+ return __udivmoddi4(a, b, 0);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivdi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmoddi4.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmoddi4.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmoddi4.c (revision 351984)
@@ -0,0 +1,189 @@
+//===-- udivmoddi4.c - Implement __udivmoddi4 -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __udivmoddi4 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Effects: if rem != 0, *rem = a % b
+// Returns: a / b
+
+// Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide
+
+COMPILER_RT_ABI du_int __udivmoddi4(du_int a, du_int b, du_int *rem) {
+ const unsigned n_uword_bits = sizeof(su_int) * CHAR_BIT;
+ const unsigned n_udword_bits = sizeof(du_int) * CHAR_BIT;
+ udwords n;
+ n.all = a;
+ udwords d;
+ d.all = b;
+ udwords q;
+ udwords r;
+ unsigned sr;
+ // special cases, X is unknown, K != 0
+ if (n.s.high == 0) {
+ if (d.s.high == 0) {
+ // 0 X
+ // ---
+ // 0 X
+ if (rem)
+ *rem = n.s.low % d.s.low;
+ return n.s.low / d.s.low;
+ }
+ // 0 X
+ // ---
+ // K X
+ if (rem)
+ *rem = n.s.low;
+ return 0;
+ }
+ // n.s.high != 0
+ if (d.s.low == 0) {
+ if (d.s.high == 0) {
+ // K X
+ // ---
+ // 0 0
+ if (rem)
+ *rem = n.s.high % d.s.low;
+ return n.s.high / d.s.low;
+ }
+ // d.s.high != 0
+ if (n.s.low == 0) {
+ // K 0
+ // ---
+ // K 0
+ if (rem) {
+ r.s.high = n.s.high % d.s.high;
+ r.s.low = 0;
+ *rem = r.all;
+ }
+ return n.s.high / d.s.high;
+ }
+ // K K
+ // ---
+ // K 0
+ if ((d.s.high & (d.s.high - 1)) == 0) /* if d is a power of 2 */ {
+ if (rem) {
+ r.s.low = n.s.low;
+ r.s.high = n.s.high & (d.s.high - 1);
+ *rem = r.all;
+ }
+ return n.s.high >> __builtin_ctz(d.s.high);
+ }
+ // K K
+ // ---
+ // K 0
+ sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high);
+ // 0 <= sr <= n_uword_bits - 2 or sr large
+ if (sr > n_uword_bits - 2) {
+ if (rem)
+ *rem = n.all;
+ return 0;
+ }
+ ++sr;
+ // 1 <= sr <= n_uword_bits - 1
+ // q.all = n.all << (n_udword_bits - sr);
+ q.s.low = 0;
+ q.s.high = n.s.low << (n_uword_bits - sr);
+ // r.all = n.all >> sr;
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ } else /* d.s.low != 0 */ {
+ if (d.s.high == 0) {
+ // K X
+ // ---
+ // 0 K
+ if ((d.s.low & (d.s.low - 1)) == 0) /* if d is a power of 2 */ {
+ if (rem)
+ *rem = n.s.low & (d.s.low - 1);
+ if (d.s.low == 1)
+ return n.all;
+ sr = __builtin_ctz(d.s.low);
+ q.s.high = n.s.high >> sr;
+ q.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ return q.all;
+ }
+ // K X
+ // ---
+ // 0 K
+ sr = 1 + n_uword_bits + __builtin_clz(d.s.low) - __builtin_clz(n.s.high);
+ // 2 <= sr <= n_udword_bits - 1
+ // q.all = n.all << (n_udword_bits - sr);
+ // r.all = n.all >> sr;
+ if (sr == n_uword_bits) {
+ q.s.low = 0;
+ q.s.high = n.s.low;
+ r.s.high = 0;
+ r.s.low = n.s.high;
+ } else if (sr < n_uword_bits) /* 2 <= sr <= n_uword_bits - 1 */ {
+ q.s.low = 0;
+ q.s.high = n.s.low << (n_uword_bits - sr);
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ } else /* n_uword_bits + 1 <= sr <= n_udword_bits - 1 */ {
+ q.s.low = n.s.low << (n_udword_bits - sr);
+ q.s.high = (n.s.high << (n_udword_bits - sr)) |
+ (n.s.low >> (sr - n_uword_bits));
+ r.s.high = 0;
+ r.s.low = n.s.high >> (sr - n_uword_bits);
+ }
+ } else {
+ // K X
+ // ---
+ // K K
+ sr = __builtin_clz(d.s.high) - __builtin_clz(n.s.high);
+ // 0 <= sr <= n_uword_bits - 1 or sr large
+ if (sr > n_uword_bits - 1) {
+ if (rem)
+ *rem = n.all;
+ return 0;
+ }
+ ++sr;
+ // 1 <= sr <= n_uword_bits
+ // q.all = n.all << (n_udword_bits - sr);
+ q.s.low = 0;
+ if (sr == n_uword_bits) {
+ q.s.high = n.s.low;
+ r.s.high = 0;
+ r.s.low = n.s.high;
+ } else {
+ q.s.high = n.s.low << (n_uword_bits - sr);
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_uword_bits - sr)) | (n.s.low >> sr);
+ }
+ }
+ }
+ // Not a special case
+ // q and r are initialized with:
+ // q.all = n.all << (n_udword_bits - sr);
+ // r.all = n.all >> sr;
+ // 1 <= sr <= n_udword_bits - 1
+ su_int carry = 0;
+ for (; sr > 0; --sr) {
+ // r:q = ((r:q) << 1) | carry
+ r.s.high = (r.s.high << 1) | (r.s.low >> (n_uword_bits - 1));
+ r.s.low = (r.s.low << 1) | (q.s.high >> (n_uword_bits - 1));
+ q.s.high = (q.s.high << 1) | (q.s.low >> (n_uword_bits - 1));
+ q.s.low = (q.s.low << 1) | carry;
+ // carry = 0;
+ // if (r.all >= d.all)
+ // {
+ // r.all -= d.all;
+ // carry = 1;
+ // }
+ const di_int s = (di_int)(d.all - r.all - 1) >> (n_udword_bits - 1);
+ carry = s & 1;
+ r.all -= d.all & s;
+ }
+ q.all = (q.all << 1) | carry;
+ if (rem)
+ *rem = r.all;
+ return q.all;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmoddi4.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmodsi4.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmodsi4.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmodsi4.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- udivmodsi4.c - Implement __udivmodsi4 -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __udivmodsi4 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a / b, *rem = a % b
+
+COMPILER_RT_ABI su_int __udivmodsi4(su_int a, su_int b, su_int *rem) {
+ si_int d = __udivsi3(a, b);
+ *rem = a - (d * b);
+ return d;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmodsi4.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmodti4.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmodti4.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmodti4.c (revision 351984)
@@ -0,0 +1,195 @@
+//===-- udivmodti4.c - Implement __udivmodti4 -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __udivmodti4 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Effects: if rem != 0, *rem = a % b
+// Returns: a / b
+
+// Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide
+
+COMPILER_RT_ABI tu_int __udivmodti4(tu_int a, tu_int b, tu_int *rem) {
+ const unsigned n_udword_bits = sizeof(du_int) * CHAR_BIT;
+ const unsigned n_utword_bits = sizeof(tu_int) * CHAR_BIT;
+ utwords n;
+ n.all = a;
+ utwords d;
+ d.all = b;
+ utwords q;
+ utwords r;
+ unsigned sr;
+ // special cases, X is unknown, K != 0
+ if (n.s.high == 0) {
+ if (d.s.high == 0) {
+ // 0 X
+ // ---
+ // 0 X
+ if (rem)
+ *rem = n.s.low % d.s.low;
+ return n.s.low / d.s.low;
+ }
+ // 0 X
+ // ---
+ // K X
+ if (rem)
+ *rem = n.s.low;
+ return 0;
+ }
+ // n.s.high != 0
+ if (d.s.low == 0) {
+ if (d.s.high == 0) {
+ // K X
+ // ---
+ // 0 0
+ if (rem)
+ *rem = n.s.high % d.s.low;
+ return n.s.high / d.s.low;
+ }
+ // d.s.high != 0
+ if (n.s.low == 0) {
+ // K 0
+ // ---
+ // K 0
+ if (rem) {
+ r.s.high = n.s.high % d.s.high;
+ r.s.low = 0;
+ *rem = r.all;
+ }
+ return n.s.high / d.s.high;
+ }
+ // K K
+ // ---
+ // K 0
+ if ((d.s.high & (d.s.high - 1)) == 0) /* if d is a power of 2 */ {
+ if (rem) {
+ r.s.low = n.s.low;
+ r.s.high = n.s.high & (d.s.high - 1);
+ *rem = r.all;
+ }
+ return n.s.high >> __builtin_ctzll(d.s.high);
+ }
+ // K K
+ // ---
+ // K 0
+ sr = __builtin_clzll(d.s.high) - __builtin_clzll(n.s.high);
+ // 0 <= sr <= n_udword_bits - 2 or sr large
+ if (sr > n_udword_bits - 2) {
+ if (rem)
+ *rem = n.all;
+ return 0;
+ }
+ ++sr;
+ // 1 <= sr <= n_udword_bits - 1
+ // q.all = n.all << (n_utword_bits - sr);
+ q.s.low = 0;
+ q.s.high = n.s.low << (n_udword_bits - sr);
+ // r.all = n.all >> sr;
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_udword_bits - sr)) | (n.s.low >> sr);
+ } else /* d.s.low != 0 */ {
+ if (d.s.high == 0) {
+ // K X
+ // ---
+ // 0 K
+ if ((d.s.low & (d.s.low - 1)) == 0) /* if d is a power of 2 */ {
+ if (rem)
+ *rem = n.s.low & (d.s.low - 1);
+ if (d.s.low == 1)
+ return n.all;
+ sr = __builtin_ctzll(d.s.low);
+ q.s.high = n.s.high >> sr;
+ q.s.low = (n.s.high << (n_udword_bits - sr)) | (n.s.low >> sr);
+ return q.all;
+ }
+ // K X
+ // ---
+ // 0 K
+ sr = 1 + n_udword_bits + __builtin_clzll(d.s.low) -
+ __builtin_clzll(n.s.high);
+ // 2 <= sr <= n_utword_bits - 1
+ // q.all = n.all << (n_utword_bits - sr);
+ // r.all = n.all >> sr;
+ if (sr == n_udword_bits) {
+ q.s.low = 0;
+ q.s.high = n.s.low;
+ r.s.high = 0;
+ r.s.low = n.s.high;
+ } else if (sr < n_udword_bits) /* 2 <= sr <= n_udword_bits - 1 */ {
+ q.s.low = 0;
+ q.s.high = n.s.low << (n_udword_bits - sr);
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_udword_bits - sr)) | (n.s.low >> sr);
+ } else /* n_udword_bits + 1 <= sr <= n_utword_bits - 1 */ {
+ q.s.low = n.s.low << (n_utword_bits - sr);
+ q.s.high = (n.s.high << (n_utword_bits - sr)) |
+ (n.s.low >> (sr - n_udword_bits));
+ r.s.high = 0;
+ r.s.low = n.s.high >> (sr - n_udword_bits);
+ }
+ } else {
+ // K X
+ // ---
+ // K K
+ sr = __builtin_clzll(d.s.high) - __builtin_clzll(n.s.high);
+ // 0 <= sr <= n_udword_bits - 1 or sr large
+ if (sr > n_udword_bits - 1) {
+ if (rem)
+ *rem = n.all;
+ return 0;
+ }
+ ++sr;
+ // 1 <= sr <= n_udword_bits
+ // q.all = n.all << (n_utword_bits - sr);
+ // r.all = n.all >> sr;
+ q.s.low = 0;
+ if (sr == n_udword_bits) {
+ q.s.high = n.s.low;
+ r.s.high = 0;
+ r.s.low = n.s.high;
+ } else {
+ r.s.high = n.s.high >> sr;
+ r.s.low = (n.s.high << (n_udword_bits - sr)) | (n.s.low >> sr);
+ q.s.high = n.s.low << (n_udword_bits - sr);
+ }
+ }
+ }
+ // Not a special case
+ // q and r are initialized with:
+ // q.all = n.all << (n_utword_bits - sr);
+ // r.all = n.all >> sr;
+ // 1 <= sr <= n_utword_bits - 1
+ su_int carry = 0;
+ for (; sr > 0; --sr) {
+ // r:q = ((r:q) << 1) | carry
+ r.s.high = (r.s.high << 1) | (r.s.low >> (n_udword_bits - 1));
+ r.s.low = (r.s.low << 1) | (q.s.high >> (n_udword_bits - 1));
+ q.s.high = (q.s.high << 1) | (q.s.low >> (n_udword_bits - 1));
+ q.s.low = (q.s.low << 1) | carry;
+ // carry = 0;
+ // if (r.all >= d.all)
+ // {
+ // r.all -= d.all;
+ // carry = 1;
+ // }
+ const ti_int s = (ti_int)(d.all - r.all - 1) >> (n_utword_bits - 1);
+ carry = s & 1;
+ r.all -= d.all & s;
+ }
+ q.all = (q.all << 1) | carry;
+ if (rem)
+ *rem = r.all;
+ return q.all;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivmodti4.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivsi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivsi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivsi3.c (revision 351984)
@@ -0,0 +1,62 @@
+//===-- udivsi3.c - Implement __udivsi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __udivsi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a / b
+
+// Translated from Figure 3-40 of The PowerPC Compiler Writer's Guide
+
+// This function should not call __divsi3!
+COMPILER_RT_ABI su_int __udivsi3(su_int n, su_int d) {
+ const unsigned n_uword_bits = sizeof(su_int) * CHAR_BIT;
+ su_int q;
+ su_int r;
+ unsigned sr;
+ // special cases
+ if (d == 0)
+ return 0; // ?!
+ if (n == 0)
+ return 0;
+ sr = __builtin_clz(d) - __builtin_clz(n);
+ // 0 <= sr <= n_uword_bits - 1 or sr large
+ if (sr > n_uword_bits - 1) // d > r
+ return 0;
+ if (sr == n_uword_bits - 1) // d == 1
+ return n;
+ ++sr;
+ // 1 <= sr <= n_uword_bits - 1
+ // Not a special case
+ q = n << (n_uword_bits - sr);
+ r = n >> sr;
+ su_int carry = 0;
+ for (; sr > 0; --sr) {
+ // r:q = ((r:q) << 1) | carry
+ r = (r << 1) | (q >> (n_uword_bits - 1));
+ q = (q << 1) | carry;
+ // carry = 0;
+ // if (r.all >= d.all)
+ // {
+ // r.all -= d.all;
+ // carry = 1;
+ // }
+ const si_int s = (si_int)(d - r - 1) >> (n_uword_bits - 1);
+ carry = s & 1;
+ r -= d & s;
+ }
+ q = (q << 1) | carry;
+ return q;
+}
+
+#if defined(__ARM_EABI__)
+COMPILER_RT_ALIAS(__udivsi3, __aeabi_uidiv)
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivsi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivti3.c (revision 351984)
@@ -0,0 +1,23 @@
+//===-- udivti3.c - Implement __udivti3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __udivti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a / b
+
+COMPILER_RT_ABI tu_int __udivti3(tu_int a, tu_int b) {
+ return __udivmodti4(a, b, 0);
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/udivti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umoddi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umoddi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umoddi3.c (revision 351984)
@@ -0,0 +1,21 @@
+//===-- umoddi3.c - Implement __umoddi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __umoddi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a % b
+
+COMPILER_RT_ABI du_int __umoddi3(du_int a, du_int b) {
+ du_int r;
+ __udivmoddi4(a, b, &r);
+ return r;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umoddi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umodsi3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umodsi3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umodsi3.c (revision 351984)
@@ -0,0 +1,19 @@
+//===-- umodsi3.c - Implement __umodsi3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __umodsi3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+// Returns: a % b
+
+COMPILER_RT_ABI su_int __umodsi3(su_int a, su_int b) {
+ return a - __udivsi3(a, b) * b;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umodsi3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umodti3.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umodti3.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umodti3.c (revision 351984)
@@ -0,0 +1,25 @@
+//===-- umodti3.c - Implement __umodti3 -----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __umodti3 for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "int_lib.h"
+
+#ifdef CRT_HAS_128BIT
+
+// Returns: a % b
+
+COMPILER_RT_ABI tu_int __umodti3(tu_int a, tu_int b) {
+ tu_int r;
+ __udivmodti4(a, b, &r);
+ return r;
+}
+
+#endif // CRT_HAS_128BIT
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/umodti3.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/unwind-ehabi-helpers.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/unwind-ehabi-helpers.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/unwind-ehabi-helpers.h (revision 351984)
@@ -0,0 +1,51 @@
+//===-- arm-ehabi-helpers.h - Supplementary ARM EHABI declarations --------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===--------------------------------------------------------------------===//
+
+#ifndef UNWIND_EHABI_HELPERS_H
+#define UNWIND_EHABI_HELPERS_H
+
+#include <stdint.h>
+// NOTE: see reasoning for this inclusion below
+#include <unwind.h>
+
+#if !defined(__ARM_EABI_UNWINDER__)
+
+// NOTE: _URC_OK, _URC_FAILURE must be present as preprocessor tokens. This
+// allows for a substitution of a constant which can be cast into the
+// appropriate enumerated type. This header is expected to always be included
+// AFTER unwind.h (which is why it is forcefully included above). This ensures
+// that we do not overwrite the token for the enumeration. Subsequent uses of
+// the token would be clean to rewrite with constant values.
+//
+// The typedef redeclaration should be safe. Due to the protection granted to
+// us by the `__ARM_EABI_UNWINDER__` above, we are guaranteed that we are in a
+// header not vended by gcc. The HP unwinder (being an itanium unwinder) does
+// not support EHABI, and the GNU unwinder, derived from the HP unwinder, also
+// does not support EHABI as of the introduction of this header. As such, we
+// are fairly certain that we are in the LLVM case. Here, _Unwind_State is a
+// typedef, and so we can get away with a redeclaration.
+//
+// Guarded redefinitions of the needed unwind state prevent the redefinition of
+// those states.
+
+#define _URC_OK 0
+#define _URC_FAILURE 9
+
+typedef uint32_t _Unwind_State;
+
+#if !defined(_US_UNWIND_FRAME_STARTING)
+#define _US_UNWIND_FRAME_STARTING ((_Unwind_State)1)
+#endif
+
+#if !defined(_US_ACTION_MASK)
+#define _US_ACTION_MASK ((_Unwind_State)3)
+#endif
+
+#endif
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/unwind-ehabi-helpers.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/chkstk.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/chkstk.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/chkstk.S (revision 351984)
@@ -0,0 +1,40 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// _chkstk routine
+// This routine is windows specific
+// http://msdn.microsoft.com/en-us/library/ms648426.aspx
+
+// Notes from r227519
+// MSVC x64s __chkstk and cygmings ___chkstk_ms do not adjust %rsp
+// themselves. It also does not clobber %rax so we can reuse it when
+// adjusting %rsp.
+
+#ifdef __x86_64__
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(___chkstk_ms)
+ push %rcx
+ push %rax
+ cmp $0x1000,%rax
+ lea 24(%rsp),%rcx
+ jb 1f
+2:
+ sub $0x1000,%rcx
+ test %rcx,(%rcx)
+ sub $0x1000,%rax
+ cmp $0x1000,%rax
+ ja 2b
+1:
+ sub %rax,%rcx
+ test %rcx,(%rcx)
+ pop %rax
+ pop %rcx
+ ret
+END_COMPILERRT_FUNCTION(___chkstk_ms)
+
+#endif // __x86_64__
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/chkstk.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/chkstk2.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/chkstk2.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/chkstk2.S (revision 351984)
@@ -0,0 +1,43 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+#ifdef __x86_64__
+
+// _chkstk (_alloca) routine - probe stack between %rsp and (%rsp-%rax) in 4k increments,
+// then decrement %rsp by %rax. Preserves all registers except %rsp and flags.
+// This routine is windows specific
+// http://msdn.microsoft.com/en-us/library/ms648426.aspx
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__alloca)
+ mov %rcx,%rax // x64 _alloca is a normal function with parameter in rcx
+ // fallthrough
+DEFINE_COMPILERRT_FUNCTION(___chkstk)
+ push %rcx
+ cmp $0x1000,%rax
+ lea 16(%rsp),%rcx // rsp before calling this routine -> rcx
+ jb 1f
+2:
+ sub $0x1000,%rcx
+ test %rcx,(%rcx)
+ sub $0x1000,%rax
+ cmp $0x1000,%rax
+ ja 2b
+1:
+ sub %rax,%rcx
+ test %rcx,(%rcx)
+
+ lea 8(%rsp),%rax // load pointer to the return address into rax
+ mov %rcx,%rsp // install the new top of stack pointer into rsp
+ mov -8(%rax),%rcx // restore rcx
+ push (%rax) // push return address onto the stack
+ sub %rsp,%rax // restore the original value in rax
+ ret
+END_COMPILERRT_FUNCTION(___chkstk)
+END_COMPILERRT_FUNCTION(__alloca)
+
+#endif // __x86_64__
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/chkstk2.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdidf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdidf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdidf.c (revision 351984)
@@ -0,0 +1,13 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// double __floatdidf(di_int a);
+
+#if defined(__x86_64__) || defined(_M_X64)
+
+#include "../int_lib.h"
+
+double __floatdidf(int64_t a) { return (double)a; }
+
+#endif // __x86_64__
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdidf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdisf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdisf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdisf.c (revision 351984)
@@ -0,0 +1,11 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#if defined(__x86_64__) || defined(_M_X64)
+
+#include "../int_lib.h"
+
+float __floatdisf(int64_t a) { return (float)a; }
+
+#endif // __x86_64__
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdisf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdixf.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdixf.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdixf.c (revision 351984)
@@ -0,0 +1,13 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+// long double __floatdixf(di_int a);
+
+#ifdef __x86_64__
+
+#include "../int_lib.h"
+
+long double __floatdixf(int64_t a) { return (long double)a; }
+
+#endif // __i386__
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatdixf.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundidf.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundidf.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundidf.S (revision 351984)
@@ -0,0 +1,51 @@
+//===-- floatundidf.S - Implement __floatundidf for x86_64 ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements __floatundidf for the compiler_rt library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../assembly.h"
+
+// double __floatundidf(du_int a);
+
+#ifdef __x86_64__
+
+CONST_SECTION
+
+ .balign 16
+twop52:
+ .quad 0x4330000000000000
+
+ .balign 16
+twop84_plus_twop52:
+ .quad 0x4530000000100000
+
+ .balign 16
+twop84:
+ .quad 0x4530000000000000
+
+#define REL_ADDR(_a) (_a)(%rip)
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatundidf)
+ movd %edi, %xmm0 // low 32 bits of a
+ shrq $32, %rdi // high 32 bits of a
+ orq REL_ADDR(twop84), %rdi // 0x1p84 + a_hi (no rounding occurs)
+ orpd REL_ADDR(twop52), %xmm0 // 0x1p52 + a_lo (no rounding occurs)
+ movd %rdi, %xmm1
+ subsd REL_ADDR(twop84_plus_twop52), %xmm1 // a_hi - 0x1p52 (no rounding occurs)
+ addsd %xmm1, %xmm0 // a_hi + a_lo (round happens here)
+ ret
+END_COMPILERRT_FUNCTION(__floatundidf)
+
+#endif // __x86_64__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundidf.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundisf.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundisf.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundisf.S (revision 351984)
@@ -0,0 +1,39 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// float __floatundisf(du_int a);
+
+#ifdef __x86_64__
+
+CONST_SECTION
+
+ .balign 16
+two:
+ .single 2.0
+
+#define REL_ADDR(_a) (_a)(%rip)
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatundisf)
+ movq $1, %rsi
+ testq %rdi, %rdi
+ js 1f
+ cvtsi2ssq %rdi, %xmm0
+ ret
+
+1: andq %rdi, %rsi
+ shrq %rdi
+ orq %rsi, %rdi
+ cvtsi2ssq %rdi, %xmm0
+ mulss REL_ADDR(two), %xmm0
+ ret
+END_COMPILERRT_FUNCTION(__floatundisf)
+
+#endif // __x86_64__
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundisf.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundixf.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundixf.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundixf.S (revision 351984)
@@ -0,0 +1,72 @@
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+
+#include "../assembly.h"
+
+// long double __floatundixf(du_int a);
+
+#ifdef __x86_64__
+
+CONST_SECTION
+
+ .balign 16
+twop64:
+ .quad 0x43f0000000000000
+
+#define REL_ADDR(_a) (_a)(%rip)
+
+ .text
+
+ .balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatundixf)
+ movq %rdi, -8(%rsp)
+ fildq -8(%rsp)
+ test %rdi, %rdi
+ js 1f
+ ret
+1: faddl REL_ADDR(twop64)
+ ret
+END_COMPILERRT_FUNCTION(__floatundixf)
+
+#endif // __x86_64__
+
+
+/* Branch-free implementation is ever so slightly slower, but more beautiful.
+ It is likely superior for inlining, so I kept it around for future reference.
+
+#ifdef __x86_64__
+
+CONST_SECTION
+
+ .balign 4
+twop52:
+ .quad 0x4330000000000000
+twop84_plus_twop52_neg:
+ .quad 0xc530000000100000
+twop84:
+ .quad 0x4530000000000000
+
+#define REL_ADDR(_a) (_a)(%rip)
+
+.text
+.balign 4
+DEFINE_COMPILERRT_FUNCTION(__floatundixf)
+ movl %edi, %esi // low 32 bits of input
+ shrq $32, %rdi // hi 32 bits of input
+ orq REL_ADDR(twop84), %rdi // 2^84 + hi (as a double)
+ orq REL_ADDR(twop52), %rsi // 2^52 + lo (as a double)
+ movq %rdi, -8(%rsp)
+ movq %rsi, -16(%rsp)
+ fldl REL_ADDR(twop84_plus_twop52_neg)
+ faddl -8(%rsp) // hi - 2^52 (as double extended, no rounding occurs)
+ faddl -16(%rsp) // hi + lo (as double extended)
+ ret
+END_COMPILERRT_FUNCTION(__floatundixf)
+
+#endif // __x86_64__
+
+*/
+
+NO_EXEC_STACK_DIRECTIVE
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/x86_64/floatundixf.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/README.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/README.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/README.txt (revision 351984)
@@ -0,0 +1,346 @@
+Compiler-RT
+================================
+
+This directory and its subdirectories contain source code for the compiler
+support routines.
+
+Compiler-RT is open source software. You may freely distribute it under the
+terms of the license agreement found in LICENSE.txt.
+
+================================
+
+This is a replacement library for libgcc. Each function is contained
+in its own file. Each function has a corresponding unit test under
+test/Unit.
+
+A rudimentary script to test each file is in the file called
+test/Unit/test.
+
+Here is the specification for this library:
+
+http://gcc.gnu.org/onlinedocs/gccint/Libgcc.html#Libgcc
+
+Here is a synopsis of the contents of this library:
+
+typedef int si_int;
+typedef unsigned su_int;
+
+typedef long long di_int;
+typedef unsigned long long du_int;
+
+// Integral bit manipulation
+
+di_int __ashldi3(di_int a, si_int b); // a << b
+ti_int __ashlti3(ti_int a, si_int b); // a << b
+
+di_int __ashrdi3(di_int a, si_int b); // a >> b arithmetic (sign fill)
+ti_int __ashrti3(ti_int a, si_int b); // a >> b arithmetic (sign fill)
+di_int __lshrdi3(di_int a, si_int b); // a >> b logical (zero fill)
+ti_int __lshrti3(ti_int a, si_int b); // a >> b logical (zero fill)
+
+si_int __clzsi2(si_int a); // count leading zeros
+si_int __clzdi2(di_int a); // count leading zeros
+si_int __clzti2(ti_int a); // count leading zeros
+si_int __ctzsi2(si_int a); // count trailing zeros
+si_int __ctzdi2(di_int a); // count trailing zeros
+si_int __ctzti2(ti_int a); // count trailing zeros
+
+si_int __ffssi2(si_int a); // find least significant 1 bit
+si_int __ffsdi2(di_int a); // find least significant 1 bit
+si_int __ffsti2(ti_int a); // find least significant 1 bit
+
+si_int __paritysi2(si_int a); // bit parity
+si_int __paritydi2(di_int a); // bit parity
+si_int __parityti2(ti_int a); // bit parity
+
+si_int __popcountsi2(si_int a); // bit population
+si_int __popcountdi2(di_int a); // bit population
+si_int __popcountti2(ti_int a); // bit population
+
+uint32_t __bswapsi2(uint32_t a); // a byteswapped
+uint64_t __bswapdi2(uint64_t a); // a byteswapped
+
+// Integral arithmetic
+
+di_int __negdi2 (di_int a); // -a
+ti_int __negti2 (ti_int a); // -a
+di_int __muldi3 (di_int a, di_int b); // a * b
+ti_int __multi3 (ti_int a, ti_int b); // a * b
+si_int __divsi3 (si_int a, si_int b); // a / b signed
+di_int __divdi3 (di_int a, di_int b); // a / b signed
+ti_int __divti3 (ti_int a, ti_int b); // a / b signed
+su_int __udivsi3 (su_int n, su_int d); // a / b unsigned
+du_int __udivdi3 (du_int a, du_int b); // a / b unsigned
+tu_int __udivti3 (tu_int a, tu_int b); // a / b unsigned
+si_int __modsi3 (si_int a, si_int b); // a % b signed
+di_int __moddi3 (di_int a, di_int b); // a % b signed
+ti_int __modti3 (ti_int a, ti_int b); // a % b signed
+su_int __umodsi3 (su_int a, su_int b); // a % b unsigned
+du_int __umoddi3 (du_int a, du_int b); // a % b unsigned
+tu_int __umodti3 (tu_int a, tu_int b); // a % b unsigned
+du_int __udivmoddi4(du_int a, du_int b, du_int* rem); // a / b, *rem = a % b unsigned
+tu_int __udivmodti4(tu_int a, tu_int b, tu_int* rem); // a / b, *rem = a % b unsigned
+su_int __udivmodsi4(su_int a, su_int b, su_int* rem); // a / b, *rem = a % b unsigned
+si_int __divmodsi4(si_int a, si_int b, si_int* rem); // a / b, *rem = a % b signed
+
+
+
+// Integral arithmetic with trapping overflow
+
+si_int __absvsi2(si_int a); // abs(a)
+di_int __absvdi2(di_int a); // abs(a)
+ti_int __absvti2(ti_int a); // abs(a)
+
+si_int __negvsi2(si_int a); // -a
+di_int __negvdi2(di_int a); // -a
+ti_int __negvti2(ti_int a); // -a
+
+si_int __addvsi3(si_int a, si_int b); // a + b
+di_int __addvdi3(di_int a, di_int b); // a + b
+ti_int __addvti3(ti_int a, ti_int b); // a + b
+
+si_int __subvsi3(si_int a, si_int b); // a - b
+di_int __subvdi3(di_int a, di_int b); // a - b
+ti_int __subvti3(ti_int a, ti_int b); // a - b
+
+si_int __mulvsi3(si_int a, si_int b); // a * b
+di_int __mulvdi3(di_int a, di_int b); // a * b
+ti_int __mulvti3(ti_int a, ti_int b); // a * b
+
+
+// Integral arithmetic which returns if overflow
+
+si_int __mulosi4(si_int a, si_int b, int* overflow); // a * b, overflow set to one if result not in signed range
+di_int __mulodi4(di_int a, di_int b, int* overflow); // a * b, overflow set to one if result not in signed range
+ti_int __muloti4(ti_int a, ti_int b, int* overflow); // a * b, overflow set to
+ one if result not in signed range
+
+
+// Integral comparison: a < b -> 0
+// a == b -> 1
+// a > b -> 2
+
+si_int __cmpdi2 (di_int a, di_int b);
+si_int __cmpti2 (ti_int a, ti_int b);
+si_int __ucmpdi2(du_int a, du_int b);
+si_int __ucmpti2(tu_int a, tu_int b);
+
+// Integral / floating point conversion
+
+di_int __fixsfdi( float a);
+di_int __fixdfdi( double a);
+di_int __fixxfdi(long double a);
+
+ti_int __fixsfti( float a);
+ti_int __fixdfti( double a);
+ti_int __fixxfti(long double a);
+uint64_t __fixtfdi(long double input); // ppc only, doesn't match documentation
+
+su_int __fixunssfsi( float a);
+su_int __fixunsdfsi( double a);
+su_int __fixunsxfsi(long double a);
+
+du_int __fixunssfdi( float a);
+du_int __fixunsdfdi( double a);
+du_int __fixunsxfdi(long double a);
+
+tu_int __fixunssfti( float a);
+tu_int __fixunsdfti( double a);
+tu_int __fixunsxfti(long double a);
+uint64_t __fixunstfdi(long double input); // ppc only
+
+float __floatdisf(di_int a);
+double __floatdidf(di_int a);
+long double __floatdixf(di_int a);
+long double __floatditf(int64_t a); // ppc only
+
+float __floattisf(ti_int a);
+double __floattidf(ti_int a);
+long double __floattixf(ti_int a);
+
+float __floatundisf(du_int a);
+double __floatundidf(du_int a);
+long double __floatundixf(du_int a);
+long double __floatunditf(uint64_t a); // ppc only
+
+float __floatuntisf(tu_int a);
+double __floatuntidf(tu_int a);
+long double __floatuntixf(tu_int a);
+
+// Floating point raised to integer power
+
+float __powisf2( float a, si_int b); // a ^ b
+double __powidf2( double a, si_int b); // a ^ b
+long double __powixf2(long double a, si_int b); // a ^ b
+long double __powitf2(long double a, si_int b); // ppc only, a ^ b
+
+// Complex arithmetic
+
+// (a + ib) * (c + id)
+
+ float _Complex __mulsc3( float a, float b, float c, float d);
+ double _Complex __muldc3(double a, double b, double c, double d);
+long double _Complex __mulxc3(long double a, long double b,
+ long double c, long double d);
+long double _Complex __multc3(long double a, long double b,
+ long double c, long double d); // ppc only
+
+// (a + ib) / (c + id)
+
+ float _Complex __divsc3( float a, float b, float c, float d);
+ double _Complex __divdc3(double a, double b, double c, double d);
+long double _Complex __divxc3(long double a, long double b,
+ long double c, long double d);
+long double _Complex __divtc3(long double a, long double b,
+ long double c, long double d); // ppc only
+
+
+// Runtime support
+
+// __clear_cache() is used to tell process that new instructions have been
+// written to an address range. Necessary on processors that do not have
+// a unified instruction and data cache.
+void __clear_cache(void* start, void* end);
+
+// __enable_execute_stack() is used with nested functions when a trampoline
+// function is written onto the stack and that page range needs to be made
+// executable.
+void __enable_execute_stack(void* addr);
+
+// __gcc_personality_v0() is normally only called by the system unwinder.
+// C code (as opposed to C++) normally does not need a personality function
+// because there are no catch clauses or destructors to be run. But there
+// is a C language extension __attribute__((cleanup(func))) which marks local
+// variables as needing the cleanup function "func" to be run when the
+// variable goes out of scope. That includes when an exception is thrown,
+// so a personality handler is needed.
+_Unwind_Reason_Code __gcc_personality_v0(int version, _Unwind_Action actions,
+ uint64_t exceptionClass, struct _Unwind_Exception* exceptionObject,
+ _Unwind_Context_t context);
+
+// for use with some implementations of assert() in <assert.h>
+void __eprintf(const char* format, const char* assertion_expression,
+ const char* line, const char* file);
+
+// for systems with emulated thread local storage
+void* __emutls_get_address(struct __emutls_control*);
+
+
+// Power PC specific functions
+
+// There is no C interface to the saveFP/restFP functions. They are helper
+// functions called by the prolog and epilog of functions that need to save
+// a number of non-volatile float point registers.
+saveFP
+restFP
+
+// PowerPC has a standard template for trampoline functions. This function
+// generates a custom trampoline function with the specific realFunc
+// and localsPtr values.
+void __trampoline_setup(uint32_t* trampOnStack, int trampSizeAllocated,
+ const void* realFunc, void* localsPtr);
+
+// adds two 128-bit double-double precision values ( x + y )
+long double __gcc_qadd(long double x, long double y);
+
+// subtracts two 128-bit double-double precision values ( x - y )
+long double __gcc_qsub(long double x, long double y);
+
+// multiples two 128-bit double-double precision values ( x * y )
+long double __gcc_qmul(long double x, long double y);
+
+// divides two 128-bit double-double precision values ( x / y )
+long double __gcc_qdiv(long double a, long double b);
+
+
+// ARM specific functions
+
+// There is no C interface to the switch* functions. These helper functions
+// are only needed by Thumb1 code for efficient switch table generation.
+switch16
+switch32
+switch8
+switchu8
+
+// There is no C interface to the *_vfp_d8_d15_regs functions. There are
+// called in the prolog and epilog of Thumb1 functions. When the C++ ABI use
+// SJLJ for exceptions, each function with a catch clause or destuctors needs
+// to save and restore all registers in it prolog and epliog. But there is
+// no way to access vector and high float registers from thumb1 code, so the
+// compiler must add call outs to these helper functions in the prolog and
+// epilog.
+restore_vfp_d8_d15_regs
+save_vfp_d8_d15_regs
+
+
+// Note: long ago ARM processors did not have floating point hardware support.
+// Floating point was done in software and floating point parameters were
+// passed in integer registers. When hardware support was added for floating
+// point, new *vfp functions were added to do the same operations but with
+// floating point parameters in floating point registers.
+
+// Undocumented functions
+
+float __addsf3vfp(float a, float b); // Appears to return a + b
+double __adddf3vfp(double a, double b); // Appears to return a + b
+float __divsf3vfp(float a, float b); // Appears to return a / b
+double __divdf3vfp(double a, double b); // Appears to return a / b
+int __eqsf2vfp(float a, float b); // Appears to return one
+ // iff a == b and neither is NaN.
+int __eqdf2vfp(double a, double b); // Appears to return one
+ // iff a == b and neither is NaN.
+double __extendsfdf2vfp(float a); // Appears to convert from
+ // float to double.
+int __fixdfsivfp(double a); // Appears to convert from
+ // double to int.
+int __fixsfsivfp(float a); // Appears to convert from
+ // float to int.
+unsigned int __fixunssfsivfp(float a); // Appears to convert from
+ // float to unsigned int.
+unsigned int __fixunsdfsivfp(double a); // Appears to convert from
+ // double to unsigned int.
+double __floatsidfvfp(int a); // Appears to convert from
+ // int to double.
+float __floatsisfvfp(int a); // Appears to convert from
+ // int to float.
+double __floatunssidfvfp(unsigned int a); // Appears to convert from
+ // unisgned int to double.
+float __floatunssisfvfp(unsigned int a); // Appears to convert from
+ // unisgned int to float.
+int __gedf2vfp(double a, double b); // Appears to return __gedf2
+ // (a >= b)
+int __gesf2vfp(float a, float b); // Appears to return __gesf2
+ // (a >= b)
+int __gtdf2vfp(double a, double b); // Appears to return __gtdf2
+ // (a > b)
+int __gtsf2vfp(float a, float b); // Appears to return __gtsf2
+ // (a > b)
+int __ledf2vfp(double a, double b); // Appears to return __ledf2
+ // (a <= b)
+int __lesf2vfp(float a, float b); // Appears to return __lesf2
+ // (a <= b)
+int __ltdf2vfp(double a, double b); // Appears to return __ltdf2
+ // (a < b)
+int __ltsf2vfp(float a, float b); // Appears to return __ltsf2
+ // (a < b)
+double __muldf3vfp(double a, double b); // Appears to return a * b
+float __mulsf3vfp(float a, float b); // Appears to return a * b
+int __nedf2vfp(double a, double b); // Appears to return __nedf2
+ // (a != b)
+double __negdf2vfp(double a); // Appears to return -a
+float __negsf2vfp(float a); // Appears to return -a
+float __negsf2vfp(float a); // Appears to return -a
+double __subdf3vfp(double a, double b); // Appears to return a - b
+float __subsf3vfp(float a, float b); // Appears to return a - b
+float __truncdfsf2vfp(double a); // Appears to convert from
+ // double to float.
+int __unorddf2vfp(double a, double b); // Appears to return __unorddf2
+int __unordsf2vfp(float a, float b); // Appears to return __unordsf2
+
+
+Preconditions are listed for each function at the definition when there are any.
+Any preconditions reflect the specification at
+http://gcc.gnu.org/onlinedocs/gccint/Libgcc.html#Libgcc.
+
+Assumptions are listed in "int_lib.h", and in individual files. Where possible
+assumptions are checked at compile time.
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/builtins/README.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_linux.cc (revision 351984)
@@ -0,0 +1,78 @@
+//===-- interception_linux.cc -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Linux-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#include "interception.h"
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+#include <dlfcn.h> // for dlsym() and dlvsym()
+
+namespace __interception {
+
+#if SANITIZER_NETBSD
+static int StrCmp(const char *s1, const char *s2) {
+ while (true) {
+ if (*s1 != *s2)
+ return false;
+ if (*s1 == 0)
+ return true;
+ s1++;
+ s2++;
+ }
+}
+#endif
+
+static void *GetFuncAddr(const char *name) {
+#if SANITIZER_NETBSD
+ // FIXME: Find a better way to handle renames
+ if (StrCmp(name, "sigaction"))
+ name = "__sigaction14";
+#endif
+ void *addr = dlsym(RTLD_NEXT, name);
+ if (!addr) {
+ // If the lookup using RTLD_NEXT failed, the sanitizer runtime library is
+ // later in the library search order than the DSO that we are trying to
+ // intercept, which means that we cannot intercept this function. We still
+ // want the address of the real definition, though, so look it up using
+ // RTLD_DEFAULT.
+ addr = dlsym(RTLD_DEFAULT, name);
+ }
+ return addr;
+}
+
+bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
+ uptr wrapper) {
+ void *addr = GetFuncAddr(name);
+ *ptr_to_real = (uptr)addr;
+ return addr && (func == wrapper);
+}
+
+// Android and Solaris do not have dlvsym
+#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
+static void *GetFuncAddr(const char *name, const char *ver) {
+ return dlvsym(RTLD_NEXT, name, ver);
+}
+
+bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
+ uptr func, uptr wrapper) {
+ void *addr = GetFuncAddr(name, ver);
+ *ptr_to_real = (uptr)addr;
+ return addr && (func == wrapper);
+}
+#endif // !SANITIZER_ANDROID
+
+} // namespace __interception
+
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
+ // SANITIZER_OPENBSD || SANITIZER_SOLARIS
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception.h (revision 351984)
@@ -0,0 +1,304 @@
+//===-- interception.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Machinery for providing replacements/wrappers for system functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef INTERCEPTION_H
+#define INTERCEPTION_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#if !SANITIZER_LINUX && !SANITIZER_FREEBSD && !SANITIZER_MAC && \
+ !SANITIZER_NETBSD && !SANITIZER_OPENBSD && !SANITIZER_WINDOWS && \
+ !SANITIZER_FUCHSIA && !SANITIZER_RTEMS && !SANITIZER_SOLARIS
+# error "Interception doesn't work on this operating system."
+#endif
+
+// These typedefs should be used only in the interceptor definitions to replace
+// the standard system types (e.g. SSIZE_T instead of ssize_t)
+typedef __sanitizer::uptr SIZE_T;
+typedef __sanitizer::sptr SSIZE_T;
+typedef __sanitizer::sptr PTRDIFF_T;
+typedef __sanitizer::s64 INTMAX_T;
+typedef __sanitizer::u64 UINTMAX_T;
+typedef __sanitizer::OFF_T OFF_T;
+typedef __sanitizer::OFF64_T OFF64_T;
+
+// How to add an interceptor:
+// Suppose you need to wrap/replace system function (generally, from libc):
+// int foo(const char *bar, double baz);
+// You'll need to:
+// 1) define INTERCEPTOR(int, foo, const char *bar, double baz) { ... } in
+// your source file. See the notes below for cases when
+// INTERCEPTOR_WITH_SUFFIX(...) should be used instead.
+// 2) Call "INTERCEPT_FUNCTION(foo)" prior to the first call of "foo".
+// INTERCEPT_FUNCTION(foo) evaluates to "true" iff the function was
+// intercepted successfully.
+// You can access original function by calling REAL(foo)(bar, baz).
+// By default, REAL(foo) will be visible only inside your interceptor, and if
+// you want to use it in other parts of RTL, you'll need to:
+// 3a) add DECLARE_REAL(int, foo, const char*, double) to a
+// header file.
+// However, if the call "INTERCEPT_FUNCTION(foo)" and definition for
+// INTERCEPTOR(..., foo, ...) are in different files, you'll instead need to:
+// 3b) add DECLARE_REAL_AND_INTERCEPTOR(int, foo, const char*, double)
+// to a header file.
+
+// Notes: 1. Things may not work properly if macro INTERCEPTOR(...) {...} or
+// DECLARE_REAL(...) are located inside namespaces.
+// 2. On Mac you can also use: "OVERRIDE_FUNCTION(foo, zoo)" to
+// effectively redirect calls from "foo" to "zoo". In this case
+// you aren't required to implement
+// INTERCEPTOR(int, foo, const char *bar, double baz) {...}
+// but instead you'll have to add
+// DECLARE_REAL(int, foo, const char *bar, double baz) in your
+// source file (to define a pointer to overriden function).
+// 3. Some Mac functions have symbol variants discriminated by
+// additional suffixes, e.g. _$UNIX2003 (see
+// https://developer.apple.com/library/mac/#releasenotes/Darwin/SymbolVariantsRelNotes/index.html
+// for more details). To intercept such functions you need to use the
+// INTERCEPTOR_WITH_SUFFIX(...) macro.
+
+// How it works:
+// To replace system functions on Linux we just need to declare functions
+// with same names in our library and then obtain the real function pointers
+// using dlsym().
+// There is one complication. A user may also intercept some of the functions
+// we intercept. To resolve this we declare our interceptors with __interceptor_
+// prefix, and then make actual interceptors weak aliases to __interceptor_
+// functions.
+//
+// This is not so on Mac OS, where the two-level namespace makes
+// our replacement functions invisible to other libraries. This may be overcomed
+// using the DYLD_FORCE_FLAT_NAMESPACE, but some errors loading the shared
+// libraries in Chromium were noticed when doing so.
+// Instead we create a dylib containing a __DATA,__interpose section that
+// associates library functions with their wrappers. When this dylib is
+// preloaded before an executable using DYLD_INSERT_LIBRARIES, it routes all
+// the calls to interposed functions done through stubs to the wrapper
+// functions.
+// As it's decided at compile time which functions are to be intercepted on Mac,
+// INTERCEPT_FUNCTION() is effectively a no-op on this system.
+
+#if SANITIZER_MAC
+#include <sys/cdefs.h> // For __DARWIN_ALIAS_C().
+
+// Just a pair of pointers.
+struct interpose_substitution {
+ const __sanitizer::uptr replacement;
+ const __sanitizer::uptr original;
+};
+
+// For a function foo() create a global pair of pointers { wrap_foo, foo } in
+// the __DATA,__interpose section.
+// As a result all the calls to foo() will be routed to wrap_foo() at runtime.
+#define INTERPOSER(func_name) __attribute__((used)) \
+const interpose_substitution substitution_##func_name[] \
+ __attribute__((section("__DATA, __interpose"))) = { \
+ { reinterpret_cast<const uptr>(WRAP(func_name)), \
+ reinterpret_cast<const uptr>(func_name) } \
+}
+
+// For a function foo() and a wrapper function bar() create a global pair
+// of pointers { bar, foo } in the __DATA,__interpose section.
+// As a result all the calls to foo() will be routed to bar() at runtime.
+#define INTERPOSER_2(func_name, wrapper_name) __attribute__((used)) \
+const interpose_substitution substitution_##func_name[] \
+ __attribute__((section("__DATA, __interpose"))) = { \
+ { reinterpret_cast<const uptr>(wrapper_name), \
+ reinterpret_cast<const uptr>(func_name) } \
+}
+
+# define WRAP(x) wrap_##x
+# define WRAPPER_NAME(x) "wrap_"#x
+# define INTERCEPTOR_ATTRIBUTE
+# define DECLARE_WRAPPER(ret_type, func, ...)
+
+#elif SANITIZER_WINDOWS
+# define WRAP(x) __asan_wrap_##x
+# define WRAPPER_NAME(x) "__asan_wrap_"#x
+# define INTERCEPTOR_ATTRIBUTE __declspec(dllexport)
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__);
+# define DECLARE_WRAPPER_WINAPI(ret_type, func, ...) \
+ extern "C" __declspec(dllimport) ret_type __stdcall func(__VA_ARGS__);
+#elif SANITIZER_RTEMS
+# define WRAP(x) x
+# define WRAPPER_NAME(x) #x
+# define INTERCEPTOR_ATTRIBUTE
+# define DECLARE_WRAPPER(ret_type, func, ...)
+#elif SANITIZER_FREEBSD || SANITIZER_NETBSD
+# define WRAP(x) __interceptor_ ## x
+# define WRAPPER_NAME(x) "__interceptor_" #x
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+// FreeBSD's dynamic linker (incompliantly) gives non-weak symbols higher
+// priority than weak ones so weak aliases won't work for indirect calls
+// in position-independent (-fPIC / -fPIE) mode.
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ __attribute__((alias("__interceptor_" #func), visibility("default")));
+#elif !SANITIZER_FUCHSIA
+# define WRAP(x) __interceptor_ ## x
+# define WRAPPER_NAME(x) "__interceptor_" #x
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+# define DECLARE_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) \
+ __attribute__((weak, alias("__interceptor_" #func), visibility("default")));
+#endif
+
+#if SANITIZER_FUCHSIA
+// There is no general interception at all on Fuchsia.
+// Sanitizer runtimes just define functions directly to preempt them,
+// and have bespoke ways to access the underlying libc functions.
+# include <zircon/sanitizer.h>
+# define INTERCEPTOR_ATTRIBUTE __attribute__((visibility("default")))
+# define REAL(x) __unsanitized_##x
+# define DECLARE_REAL(ret_type, func, ...)
+#elif SANITIZER_RTEMS
+# define REAL(x) __real_ ## x
+# define DECLARE_REAL(ret_type, func, ...) \
+ extern "C" ret_type REAL(func)(__VA_ARGS__);
+#elif !SANITIZER_MAC
+# define PTR_TO_REAL(x) real_##x
+# define REAL(x) __interception::PTR_TO_REAL(x)
+# define FUNC_TYPE(x) x##_type
+
+# define DECLARE_REAL(ret_type, func, ...) \
+ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
+ namespace __interception { \
+ extern FUNC_TYPE(func) PTR_TO_REAL(func); \
+ }
+# define ASSIGN_REAL(dst, src) REAL(dst) = REAL(src)
+#else // SANITIZER_MAC
+# define REAL(x) x
+# define DECLARE_REAL(ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__);
+# define ASSIGN_REAL(x, y)
+#endif // SANITIZER_MAC
+
+#if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...) \
+ DECLARE_REAL(ret_type, func, __VA_ARGS__) \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__);
+// Declare an interceptor and its wrapper defined in a different translation
+// unit (ex. asm).
+# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...) \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ extern "C" ret_type func(__VA_ARGS__);
+#else
+# define DECLARE_REAL_AND_INTERCEPTOR(ret_type, func, ...)
+# define DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(ret_type, func, ...)
+#endif
+
+// Generally, you don't need to use DEFINE_REAL by itself, as INTERCEPTOR
+// macros does its job. In exceptional cases you may need to call REAL(foo)
+// without defining INTERCEPTOR(..., foo, ...). For example, if you override
+// foo with an interceptor for other function.
+#if !SANITIZER_MAC && !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
+# define DEFINE_REAL(ret_type, func, ...) \
+ typedef ret_type (*FUNC_TYPE(func))(__VA_ARGS__); \
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
+ }
+#else
+# define DEFINE_REAL(ret_type, func, ...)
+#endif
+
+#if SANITIZER_FUCHSIA
+
+// We need to define the __interceptor_func name just to get
+// sanitizer_common/scripts/gen_dynamic_list.py to export func.
+// But we don't need to export __interceptor_func to get that.
+#define INTERCEPTOR(ret_type, func, ...) \
+ extern "C"[[ gnu::alias(#func), gnu::visibility("hidden") ]] ret_type \
+ __interceptor_##func(__VA_ARGS__); \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type func(__VA_ARGS__)
+
+#elif !SANITIZER_MAC
+
+#define INTERCEPTOR(ret_type, func, ...) \
+ DEFINE_REAL(ret_type, func, __VA_ARGS__) \
+ DECLARE_WRAPPER(ret_type, func, __VA_ARGS__) \
+ extern "C" \
+ INTERCEPTOR_ATTRIBUTE \
+ ret_type WRAP(func)(__VA_ARGS__)
+
+// We don't need INTERCEPTOR_WITH_SUFFIX on non-Darwin for now.
+#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
+ INTERCEPTOR(ret_type, func, __VA_ARGS__)
+
+#else // SANITIZER_MAC
+
+#define INTERCEPTOR_ZZZ(suffix, ret_type, func, ...) \
+ extern "C" ret_type func(__VA_ARGS__) suffix; \
+ extern "C" ret_type WRAP(func)(__VA_ARGS__); \
+ INTERPOSER(func); \
+ extern "C" INTERCEPTOR_ATTRIBUTE ret_type WRAP(func)(__VA_ARGS__)
+
+#define INTERCEPTOR(ret_type, func, ...) \
+ INTERCEPTOR_ZZZ(/*no symbol variants*/, ret_type, func, __VA_ARGS__)
+
+#define INTERCEPTOR_WITH_SUFFIX(ret_type, func, ...) \
+ INTERCEPTOR_ZZZ(__DARWIN_ALIAS_C(func), ret_type, func, __VA_ARGS__)
+
+// Override |overridee| with |overrider|.
+#define OVERRIDE_FUNCTION(overridee, overrider) \
+ INTERPOSER_2(overridee, WRAP(overrider))
+#endif
+
+#if SANITIZER_WINDOWS
+# define INTERCEPTOR_WINAPI(ret_type, func, ...) \
+ typedef ret_type (__stdcall *FUNC_TYPE(func))(__VA_ARGS__); \
+ namespace __interception { \
+ FUNC_TYPE(func) PTR_TO_REAL(func); \
+ } \
+ extern "C" \
+ INTERCEPTOR_ATTRIBUTE \
+ ret_type __stdcall WRAP(func)(__VA_ARGS__)
+#endif
+
+// ISO C++ forbids casting between pointer-to-function and pointer-to-object,
+// so we use casting via an integral type __interception::uptr,
+// assuming that system is POSIX-compliant. Using other hacks seem
+// challenging, as we don't even pass function type to
+// INTERCEPT_FUNCTION macro, only its name.
+namespace __interception {
+#if defined(_WIN64)
+typedef unsigned long long uptr; // NOLINT
+#else
+typedef unsigned long uptr; // NOLINT
+#endif // _WIN64
+} // namespace __interception
+
+#define INCLUDED_FROM_INTERCEPTION_LIB
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+# include "interception_linux.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
+# define INTERCEPT_FUNCTION_VER(func, symver) \
+ INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver)
+#elif SANITIZER_MAC
+# include "interception_mac.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_MAC(func)
+# define INTERCEPT_FUNCTION_VER(func, symver) \
+ INTERCEPT_FUNCTION_VER_MAC(func, symver)
+#elif SANITIZER_WINDOWS
+# include "interception_win.h"
+# define INTERCEPT_FUNCTION(func) INTERCEPT_FUNCTION_WIN(func)
+# define INTERCEPT_FUNCTION_VER(func, symver) \
+ INTERCEPT_FUNCTION_VER_WIN(func, symver)
+#endif
+
+#undef INCLUDED_FROM_INTERCEPTION_LIB
+
+#endif // INTERCEPTION_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_linux.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_linux.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_linux.h (revision 351984)
@@ -0,0 +1,53 @@
+//===-- interception_linux.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Linux-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD || \
+ SANITIZER_OPENBSD || SANITIZER_SOLARIS
+
+#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
+# error "interception_linux.h should be included from interception library only"
+#endif
+
+#ifndef INTERCEPTION_LINUX_H
+#define INTERCEPTION_LINUX_H
+
+namespace __interception {
+bool InterceptFunction(const char *name, uptr *ptr_to_real, uptr func,
+ uptr wrapper);
+bool InterceptFunction(const char *name, const char *ver, uptr *ptr_to_real,
+ uptr func, uptr wrapper);
+} // namespace __interception
+
+#define INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func) \
+ ::__interception::InterceptFunction( \
+ #func, \
+ (::__interception::uptr *) & REAL(func), \
+ (::__interception::uptr) & (func), \
+ (::__interception::uptr) & WRAP(func))
+
+// Android, Solaris and OpenBSD do not have dlvsym
+#if !SANITIZER_ANDROID && !SANITIZER_SOLARIS && !SANITIZER_OPENBSD
+#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
+ ::__interception::InterceptFunction( \
+ #func, symver, \
+ (::__interception::uptr *) & REAL(func), \
+ (::__interception::uptr) & (func), \
+ (::__interception::uptr) & WRAP(func))
+#else
+#define INTERCEPT_FUNCTION_VER_LINUX_OR_FREEBSD(func, symver) \
+ INTERCEPT_FUNCTION_LINUX_OR_FREEBSD(func)
+#endif // !SANITIZER_ANDROID && !SANITIZER_SOLARIS
+
+#endif // INTERCEPTION_LINUX_H
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD ||
+ // SANITIZER_OPENBSD || SANITIZER_SOLARIS
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_mac.cc (revision 351984)
@@ -0,0 +1,18 @@
+//===-- interception_mac.cc -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Mac-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#include "interception.h"
+
+#if SANITIZER_MAC
+
+#endif // SANITIZER_MAC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_mac.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_mac.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_mac.h (revision 351984)
@@ -0,0 +1,27 @@
+//===-- interception_mac.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Mac-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_MAC
+
+#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
+# error "interception_mac.h should be included from interception.h only"
+#endif
+
+#ifndef INTERCEPTION_MAC_H
+#define INTERCEPTION_MAC_H
+
+#define INTERCEPT_FUNCTION_MAC(func)
+#define INTERCEPT_FUNCTION_VER_MAC(func, symver)
+
+#endif // INTERCEPTION_MAC_H
+#endif // SANITIZER_MAC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_type_test.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_type_test.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_type_test.cc (revision 351984)
@@ -0,0 +1,39 @@
+//===-- interception_type_test.cc -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Compile-time tests of the internal type definitions.
+//===----------------------------------------------------------------------===//
+
+#include "interception.h"
+
+#if SANITIZER_LINUX || SANITIZER_MAC
+
+#include <sys/types.h>
+#include <stddef.h>
+#include <stdint.h>
+
+COMPILER_CHECK(sizeof(::SIZE_T) == sizeof(size_t));
+COMPILER_CHECK(sizeof(::SSIZE_T) == sizeof(ssize_t));
+COMPILER_CHECK(sizeof(::PTRDIFF_T) == sizeof(ptrdiff_t));
+COMPILER_CHECK(sizeof(::INTMAX_T) == sizeof(intmax_t));
+
+#if !SANITIZER_MAC
+COMPILER_CHECK(sizeof(::OFF64_T) == sizeof(off64_t));
+#endif
+
+// The following are the cases when pread (and friends) is used instead of
+// pread64. In those cases we need OFF_T to match off_t. We don't care about the
+// rest (they depend on _FILE_OFFSET_BITS setting when building an application).
+# if SANITIZER_ANDROID || !defined _FILE_OFFSET_BITS || \
+ _FILE_OFFSET_BITS != 64
+COMPILER_CHECK(sizeof(::OFF_T) == sizeof(off_t));
+# endif
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_win.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_win.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_win.cc (revision 351984)
@@ -0,0 +1,1022 @@
+//===-- interception_linux.cc -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Windows-specific interception methods.
+//
+// This file is implementing several hooking techniques to intercept calls
+// to functions. The hooks are dynamically installed by modifying the assembly
+// code.
+//
+// The hooking techniques are making assumptions on the way the code is
+// generated and are safe under these assumptions.
+//
+// On 64-bit architecture, there is no direct 64-bit jump instruction. To allow
+// arbitrary branching on the whole memory space, the notion of trampoline
+// region is used. A trampoline region is a memory space withing 2G boundary
+// where it is safe to add custom assembly code to build 64-bit jumps.
+//
+// Hooking techniques
+// ==================
+//
+// 1) Detour
+//
+// The Detour hooking technique is assuming the presence of an header with
+// padding and an overridable 2-bytes nop instruction (mov edi, edi). The
+// nop instruction can safely be replaced by a 2-bytes jump without any need
+// to save the instruction. A jump to the target is encoded in the function
+// header and the nop instruction is replaced by a short jump to the header.
+//
+// head: 5 x nop head: jmp <hook>
+// func: mov edi, edi --> func: jmp short <head>
+// [...] real: [...]
+//
+// This technique is only implemented on 32-bit architecture.
+// Most of the time, Windows API are hookable with the detour technique.
+//
+// 2) Redirect Jump
+//
+// The redirect jump is applicable when the first instruction is a direct
+// jump. The instruction is replaced by jump to the hook.
+//
+// func: jmp <label> --> func: jmp <hook>
+//
+// On an 64-bit architecture, a trampoline is inserted.
+//
+// func: jmp <label> --> func: jmp <tramp>
+// [...]
+//
+// [trampoline]
+// tramp: jmp QWORD [addr]
+// addr: .bytes <hook>
+//
+// Note: <real> is equilavent to <label>.
+//
+// 3) HotPatch
+//
+// The HotPatch hooking is assuming the presence of an header with padding
+// and a first instruction with at least 2-bytes.
+//
+// The reason to enforce the 2-bytes limitation is to provide the minimal
+// space to encode a short jump. HotPatch technique is only rewriting one
+// instruction to avoid breaking a sequence of instructions containing a
+// branching target.
+//
+// Assumptions are enforced by MSVC compiler by using the /HOTPATCH flag.
+// see: https://msdn.microsoft.com/en-us/library/ms173507.aspx
+// Default padding length is 5 bytes in 32-bits and 6 bytes in 64-bits.
+//
+// head: 5 x nop head: jmp <hook>
+// func: <instr> --> func: jmp short <head>
+// [...] body: [...]
+//
+// [trampoline]
+// real: <instr>
+// jmp <body>
+//
+// On an 64-bit architecture:
+//
+// head: 6 x nop head: jmp QWORD [addr1]
+// func: <instr> --> func: jmp short <head>
+// [...] body: [...]
+//
+// [trampoline]
+// addr1: .bytes <hook>
+// real: <instr>
+// jmp QWORD [addr2]
+// addr2: .bytes <body>
+//
+// 4) Trampoline
+//
+// The Trampoline hooking technique is the most aggressive one. It is
+// assuming that there is a sequence of instructions that can be safely
+// replaced by a jump (enough room and no incoming branches).
+//
+// Unfortunately, these assumptions can't be safely presumed and code may
+// be broken after hooking.
+//
+// func: <instr> --> func: jmp <hook>
+// <instr>
+// [...] body: [...]
+//
+// [trampoline]
+// real: <instr>
+// <instr>
+// jmp <body>
+//
+// On an 64-bit architecture:
+//
+// func: <instr> --> func: jmp QWORD [addr1]
+// <instr>
+// [...] body: [...]
+//
+// [trampoline]
+// addr1: .bytes <hook>
+// real: <instr>
+// <instr>
+// jmp QWORD [addr2]
+// addr2: .bytes <body>
+//===----------------------------------------------------------------------===//
+
+#include "interception.h"
+
+#if SANITIZER_WINDOWS
+#include "sanitizer_common/sanitizer_platform.h"
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+namespace __interception {
+
+static const int kAddressLength = FIRST_32_SECOND_64(4, 8);
+static const int kJumpInstructionLength = 5;
+static const int kShortJumpInstructionLength = 2;
+static const int kIndirectJumpInstructionLength = 6;
+static const int kBranchLength =
+ FIRST_32_SECOND_64(kJumpInstructionLength, kIndirectJumpInstructionLength);
+static const int kDirectBranchLength = kBranchLength + kAddressLength;
+
+static void InterceptionFailed() {
+ // Do we have a good way to abort with an error message here?
+ __debugbreak();
+}
+
+static bool DistanceIsWithin2Gig(uptr from, uptr target) {
+#if SANITIZER_WINDOWS64
+ if (from < target)
+ return target - from <= (uptr)0x7FFFFFFFU;
+ else
+ return from - target <= (uptr)0x80000000U;
+#else
+ // In a 32-bit address space, the address calculation will wrap, so this check
+ // is unnecessary.
+ return true;
+#endif
+}
+
+static uptr GetMmapGranularity() {
+ SYSTEM_INFO si;
+ GetSystemInfo(&si);
+ return si.dwAllocationGranularity;
+}
+
+static uptr RoundUpTo(uptr size, uptr boundary) {
+ return (size + boundary - 1) & ~(boundary - 1);
+}
+
+// FIXME: internal_str* and internal_mem* functions should be moved from the
+// ASan sources into interception/.
+
+static size_t _strlen(const char *str) {
+ const char* p = str;
+ while (*p != '\0') ++p;
+ return p - str;
+}
+
+static char* _strchr(char* str, char c) {
+ while (*str) {
+ if (*str == c)
+ return str;
+ ++str;
+ }
+ return nullptr;
+}
+
+static void _memset(void *p, int value, size_t sz) {
+ for (size_t i = 0; i < sz; ++i)
+ ((char*)p)[i] = (char)value;
+}
+
+static void _memcpy(void *dst, void *src, size_t sz) {
+ char *dst_c = (char*)dst,
+ *src_c = (char*)src;
+ for (size_t i = 0; i < sz; ++i)
+ dst_c[i] = src_c[i];
+}
+
+static bool ChangeMemoryProtection(
+ uptr address, uptr size, DWORD *old_protection) {
+ return ::VirtualProtect((void*)address, size,
+ PAGE_EXECUTE_READWRITE,
+ old_protection) != FALSE;
+}
+
+static bool RestoreMemoryProtection(
+ uptr address, uptr size, DWORD old_protection) {
+ DWORD unused;
+ return ::VirtualProtect((void*)address, size,
+ old_protection,
+ &unused) != FALSE;
+}
+
+static bool IsMemoryPadding(uptr address, uptr size) {
+ u8* function = (u8*)address;
+ for (size_t i = 0; i < size; ++i)
+ if (function[i] != 0x90 && function[i] != 0xCC)
+ return false;
+ return true;
+}
+
+static const u8 kHintNop8Bytes[] = {
+ 0x0F, 0x1F, 0x84, 0x00, 0x00, 0x00, 0x00, 0x00
+};
+
+template<class T>
+static bool FunctionHasPrefix(uptr address, const T &pattern) {
+ u8* function = (u8*)address - sizeof(pattern);
+ for (size_t i = 0; i < sizeof(pattern); ++i)
+ if (function[i] != pattern[i])
+ return false;
+ return true;
+}
+
+static bool FunctionHasPadding(uptr address, uptr size) {
+ if (IsMemoryPadding(address - size, size))
+ return true;
+ if (size <= sizeof(kHintNop8Bytes) &&
+ FunctionHasPrefix(address, kHintNop8Bytes))
+ return true;
+ return false;
+}
+
+static void WritePadding(uptr from, uptr size) {
+ _memset((void*)from, 0xCC, (size_t)size);
+}
+
+static void WriteJumpInstruction(uptr from, uptr target) {
+ if (!DistanceIsWithin2Gig(from + kJumpInstructionLength, target))
+ InterceptionFailed();
+ ptrdiff_t offset = target - from - kJumpInstructionLength;
+ *(u8*)from = 0xE9;
+ *(u32*)(from + 1) = offset;
+}
+
+static void WriteShortJumpInstruction(uptr from, uptr target) {
+ sptr offset = target - from - kShortJumpInstructionLength;
+ if (offset < -128 || offset > 127)
+ InterceptionFailed();
+ *(u8*)from = 0xEB;
+ *(u8*)(from + 1) = (u8)offset;
+}
+
+#if SANITIZER_WINDOWS64
+static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) {
+ // jmp [rip + <offset>] = FF 25 <offset> where <offset> is a relative
+ // offset.
+ // The offset is the distance from then end of the jump instruction to the
+ // memory location containing the targeted address. The displacement is still
+ // 32-bit in x64, so indirect_target must be located within +/- 2GB range.
+ int offset = indirect_target - from - kIndirectJumpInstructionLength;
+ if (!DistanceIsWithin2Gig(from + kIndirectJumpInstructionLength,
+ indirect_target)) {
+ InterceptionFailed();
+ }
+ *(u16*)from = 0x25FF;
+ *(u32*)(from + 2) = offset;
+}
+#endif
+
+static void WriteBranch(
+ uptr from, uptr indirect_target, uptr target) {
+#if SANITIZER_WINDOWS64
+ WriteIndirectJumpInstruction(from, indirect_target);
+ *(u64*)indirect_target = target;
+#else
+ (void)indirect_target;
+ WriteJumpInstruction(from, target);
+#endif
+}
+
+static void WriteDirectBranch(uptr from, uptr target) {
+#if SANITIZER_WINDOWS64
+ // Emit an indirect jump through immediately following bytes:
+ // jmp [rip + kBranchLength]
+ // .quad <target>
+ WriteBranch(from, from + kBranchLength, target);
+#else
+ WriteJumpInstruction(from, target);
+#endif
+}
+
+struct TrampolineMemoryRegion {
+ uptr content;
+ uptr allocated_size;
+ uptr max_size;
+};
+
+static const uptr kTrampolineScanLimitRange = 1 << 31; // 2 gig
+static const int kMaxTrampolineRegion = 1024;
+static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineRegion];
+
+static void *AllocateTrampolineRegion(uptr image_address, size_t granularity) {
+#if SANITIZER_WINDOWS64
+ uptr address = image_address;
+ uptr scanned = 0;
+ while (scanned < kTrampolineScanLimitRange) {
+ MEMORY_BASIC_INFORMATION info;
+ if (!::VirtualQuery((void*)address, &info, sizeof(info)))
+ return nullptr;
+
+ // Check whether a region can be allocated at |address|.
+ if (info.State == MEM_FREE && info.RegionSize >= granularity) {
+ void *page = ::VirtualAlloc((void*)RoundUpTo(address, granularity),
+ granularity,
+ MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+ return page;
+ }
+
+ // Move to the next region.
+ address = (uptr)info.BaseAddress + info.RegionSize;
+ scanned += info.RegionSize;
+ }
+ return nullptr;
+#else
+ return ::VirtualAlloc(nullptr,
+ granularity,
+ MEM_RESERVE | MEM_COMMIT,
+ PAGE_EXECUTE_READWRITE);
+#endif
+}
+
+// Used by unittests to release mapped memory space.
+void TestOnlyReleaseTrampolineRegions() {
+ for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
+ TrampolineMemoryRegion *current = &TrampolineRegions[bucket];
+ if (current->content == 0)
+ return;
+ ::VirtualFree((void*)current->content, 0, MEM_RELEASE);
+ current->content = 0;
+ }
+}
+
+static uptr AllocateMemoryForTrampoline(uptr image_address, size_t size) {
+ // Find a region within 2G with enough space to allocate |size| bytes.
+ TrampolineMemoryRegion *region = nullptr;
+ for (size_t bucket = 0; bucket < kMaxTrampolineRegion; ++bucket) {
+ TrampolineMemoryRegion* current = &TrampolineRegions[bucket];
+ if (current->content == 0) {
+ // No valid region found, allocate a new region.
+ size_t bucket_size = GetMmapGranularity();
+ void *content = AllocateTrampolineRegion(image_address, bucket_size);
+ if (content == nullptr)
+ return 0U;
+
+ current->content = (uptr)content;
+ current->allocated_size = 0;
+ current->max_size = bucket_size;
+ region = current;
+ break;
+ } else if (current->max_size - current->allocated_size > size) {
+#if SANITIZER_WINDOWS64
+ // In 64-bits, the memory space must be allocated within 2G boundary.
+ uptr next_address = current->content + current->allocated_size;
+ if (next_address < image_address ||
+ next_address - image_address >= 0x7FFF0000)
+ continue;
+#endif
+ // The space can be allocated in the current region.
+ region = current;
+ break;
+ }
+ }
+
+ // Failed to find a region.
+ if (region == nullptr)
+ return 0U;
+
+ // Allocate the space in the current region.
+ uptr allocated_space = region->content + region->allocated_size;
+ region->allocated_size += size;
+ WritePadding(allocated_space, size);
+
+ return allocated_space;
+}
+
+// Returns 0 on error.
+static size_t GetInstructionSize(uptr address, size_t* rel_offset = nullptr) {
+ switch (*(u64*)address) {
+ case 0x90909090909006EB: // stub: jmp over 6 x nop.
+ return 8;
+ }
+
+ switch (*(u8*)address) {
+ case 0x90: // 90 : nop
+ return 1;
+
+ case 0x50: // push eax / rax
+ case 0x51: // push ecx / rcx
+ case 0x52: // push edx / rdx
+ case 0x53: // push ebx / rbx
+ case 0x54: // push esp / rsp
+ case 0x55: // push ebp / rbp
+ case 0x56: // push esi / rsi
+ case 0x57: // push edi / rdi
+ case 0x5D: // pop ebp / rbp
+ return 1;
+
+ case 0x6A: // 6A XX = push XX
+ return 2;
+
+ case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX
+ case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX
+ return 5;
+
+ // Cannot overwrite control-instruction. Return 0 to indicate failure.
+ case 0xE9: // E9 XX XX XX XX : jmp <label>
+ case 0xE8: // E8 XX XX XX XX : call <func>
+ case 0xC3: // C3 : ret
+ case 0xEB: // EB XX : jmp XX (short jump)
+ case 0x70: // 7Y YY : jy XX (short conditional jump)
+ case 0x71:
+ case 0x72:
+ case 0x73:
+ case 0x74:
+ case 0x75:
+ case 0x76:
+ case 0x77:
+ case 0x78:
+ case 0x79:
+ case 0x7A:
+ case 0x7B:
+ case 0x7C:
+ case 0x7D:
+ case 0x7E:
+ case 0x7F:
+ return 0;
+ }
+
+ switch (*(u16*)(address)) {
+ case 0x018A: // 8A 01 : mov al, byte ptr [ecx]
+ case 0xFF8B: // 8B FF : mov edi, edi
+ case 0xEC8B: // 8B EC : mov ebp, esp
+ case 0xc889: // 89 C8 : mov eax, ecx
+ case 0xC18B: // 8B C1 : mov eax, ecx
+ case 0xC033: // 33 C0 : xor eax, eax
+ case 0xC933: // 33 C9 : xor ecx, ecx
+ case 0xD233: // 33 D2 : xor edx, edx
+ return 2;
+
+ // Cannot overwrite control-instruction. Return 0 to indicate failure.
+ case 0x25FF: // FF 25 XX XX XX XX : jmp [XXXXXXXX]
+ return 0;
+ }
+
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0x24A48D: // 8D A4 24 XX XX XX XX : lea esp, [esp + XX XX XX XX]
+ return 7;
+ }
+
+#if SANITIZER_WINDOWS64
+ switch (*(u8*)address) {
+ case 0xA1: // A1 XX XX XX XX XX XX XX XX :
+ // movabs eax, dword ptr ds:[XXXXXXXX]
+ return 9;
+ }
+
+ switch (*(u16*)address) {
+ case 0x5040: // push rax
+ case 0x5140: // push rcx
+ case 0x5240: // push rdx
+ case 0x5340: // push rbx
+ case 0x5440: // push rsp
+ case 0x5540: // push rbp
+ case 0x5640: // push rsi
+ case 0x5740: // push rdi
+ case 0x5441: // push r12
+ case 0x5541: // push r13
+ case 0x5641: // push r14
+ case 0x5741: // push r15
+ case 0x9066: // Two-byte NOP
+ return 2;
+
+ case 0x058B: // 8B 05 XX XX XX XX : mov eax, dword ptr [XX XX XX XX]
+ if (rel_offset)
+ *rel_offset = 2;
+ return 6;
+ }
+
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0xe58948: // 48 8b c4 : mov rbp, rsp
+ case 0xc18b48: // 48 8b c1 : mov rax, rcx
+ case 0xc48b48: // 48 8b c4 : mov rax, rsp
+ case 0xd9f748: // 48 f7 d9 : neg rcx
+ case 0xd12b48: // 48 2b d1 : sub rdx, rcx
+ case 0x07c1f6: // f6 c1 07 : test cl, 0x7
+ case 0xc98548: // 48 85 C9 : test rcx, rcx
+ case 0xc0854d: // 4d 85 c0 : test r8, r8
+ case 0xc2b60f: // 0f b6 c2 : movzx eax, dl
+ case 0xc03345: // 45 33 c0 : xor r8d, r8d
+ case 0xc93345: // 45 33 c9 : xor r9d, r9d
+ case 0xdb3345: // 45 33 DB : xor r11d, r11d
+ case 0xd98b4c: // 4c 8b d9 : mov r11, rcx
+ case 0xd28b4c: // 4c 8b d2 : mov r10, rdx
+ case 0xc98b4c: // 4C 8B C9 : mov r9, rcx
+ case 0xc18b4c: // 4C 8B C1 : mov r8, rcx
+ case 0xd2b60f: // 0f b6 d2 : movzx edx, dl
+ case 0xca2b48: // 48 2b ca : sub rcx, rdx
+ case 0x10b70f: // 0f b7 10 : movzx edx, WORD PTR [rax]
+ case 0xc00b4d: // 3d 0b c0 : or r8, r8
+ case 0xd18b48: // 48 8b d1 : mov rdx, rcx
+ case 0xdc8b4c: // 4c 8b dc : mov r11, rsp
+ case 0xd18b4c: // 4c 8b d1 : mov r10, rcx
+ case 0xE0E483: // 83 E4 E0 : and esp, 0xFFFFFFE0
+ return 3;
+
+ case 0xec8348: // 48 83 ec XX : sub rsp, XX
+ case 0xf88349: // 49 83 f8 XX : cmp r8, XX
+ case 0x588948: // 48 89 58 XX : mov QWORD PTR[rax + XX], rbx
+ return 4;
+
+ case 0xec8148: // 48 81 EC XX XX XX XX : sub rsp, XXXXXXXX
+ return 7;
+
+ case 0x058b48: // 48 8b 05 XX XX XX XX :
+ // mov rax, QWORD PTR [rip + XXXXXXXX]
+ case 0x25ff48: // 48 ff 25 XX XX XX XX :
+ // rex.W jmp QWORD PTR [rip + XXXXXXXX]
+
+ // Instructions having offset relative to 'rip' need offset adjustment.
+ if (rel_offset)
+ *rel_offset = 3;
+ return 7;
+
+ case 0x2444c7: // C7 44 24 XX YY YY YY YY
+ // mov dword ptr [rsp + XX], YYYYYYYY
+ return 8;
+ }
+
+ switch (*(u32*)(address)) {
+ case 0x24448b48: // 48 8b 44 24 XX : mov rax, QWORD ptr [rsp + XX]
+ case 0x246c8948: // 48 89 6C 24 XX : mov QWORD ptr [rsp + XX], rbp
+ case 0x245c8948: // 48 89 5c 24 XX : mov QWORD PTR [rsp + XX], rbx
+ case 0x24748948: // 48 89 74 24 XX : mov QWORD PTR [rsp + XX], rsi
+ case 0x244C8948: // 48 89 4C 24 XX : mov QWORD PTR [rsp + XX], rcx
+ case 0x24548948: // 48 89 54 24 XX : mov QWORD PTR [rsp + XX], rdx
+ case 0x244c894c: // 4c 89 4c 24 XX : mov QWORD PTR [rsp + XX], r9
+ case 0x2444894c: // 4c 89 44 24 XX : mov QWORD PTR [rsp + XX], r8
+ return 5;
+ case 0x24648348: // 48 83 64 24 XX : and QWORD PTR [rsp + XX], YY
+ return 6;
+ }
+
+#else
+
+ switch (*(u8*)address) {
+ case 0xA1: // A1 XX XX XX XX : mov eax, dword ptr ds:[XXXXXXXX]
+ return 5;
+ }
+ switch (*(u16*)address) {
+ case 0x458B: // 8B 45 XX : mov eax, dword ptr [ebp + XX]
+ case 0x5D8B: // 8B 5D XX : mov ebx, dword ptr [ebp + XX]
+ case 0x7D8B: // 8B 7D XX : mov edi, dword ptr [ebp + XX]
+ case 0xEC83: // 83 EC XX : sub esp, XX
+ case 0x75FF: // FF 75 XX : push dword ptr [ebp + XX]
+ return 3;
+ case 0xC1F7: // F7 C1 XX YY ZZ WW : test ecx, WWZZYYXX
+ case 0x25FF: // FF 25 XX YY ZZ WW : jmp dword ptr ds:[WWZZYYXX]
+ return 6;
+ case 0x3D83: // 83 3D XX YY ZZ WW TT : cmp TT, WWZZYYXX
+ return 7;
+ case 0x7D83: // 83 7D XX YY : cmp dword ptr [ebp + XX], YY
+ return 4;
+ }
+
+ switch (0x00FFFFFF & *(u32*)address) {
+ case 0x24448A: // 8A 44 24 XX : mov eal, dword ptr [esp + XX]
+ case 0x24448B: // 8B 44 24 XX : mov eax, dword ptr [esp + XX]
+ case 0x244C8B: // 8B 4C 24 XX : mov ecx, dword ptr [esp + XX]
+ case 0x24548B: // 8B 54 24 XX : mov edx, dword ptr [esp + XX]
+ case 0x24748B: // 8B 74 24 XX : mov esi, dword ptr [esp + XX]
+ case 0x247C8B: // 8B 7C 24 XX : mov edi, dword ptr [esp + XX]
+ return 4;
+ }
+
+ switch (*(u32*)address) {
+ case 0x2444B60F: // 0F B6 44 24 XX : movzx eax, byte ptr [esp + XX]
+ return 5;
+ }
+#endif
+
+ // Unknown instruction!
+ // FIXME: Unknown instruction failures might happen when we add a new
+ // interceptor or a new compiler version. In either case, they should result
+ // in visible and readable error messages. However, merely calling abort()
+ // leads to an infinite recursion in CheckFailed.
+ InterceptionFailed();
+ return 0;
+}
+
+// Returns 0 on error.
+static size_t RoundUpToInstrBoundary(size_t size, uptr address) {
+ size_t cursor = 0;
+ while (cursor < size) {
+ size_t instruction_size = GetInstructionSize(address + cursor);
+ if (!instruction_size)
+ return 0;
+ cursor += instruction_size;
+ }
+ return cursor;
+}
+
+static bool CopyInstructions(uptr to, uptr from, size_t size) {
+ size_t cursor = 0;
+ while (cursor != size) {
+ size_t rel_offset = 0;
+ size_t instruction_size = GetInstructionSize(from + cursor, &rel_offset);
+ _memcpy((void*)(to + cursor), (void*)(from + cursor),
+ (size_t)instruction_size);
+ if (rel_offset) {
+ uptr delta = to - from;
+ uptr relocated_offset = *(u32*)(to + cursor + rel_offset) - delta;
+#if SANITIZER_WINDOWS64
+ if (relocated_offset + 0x80000000U >= 0xFFFFFFFFU)
+ return false;
+#endif
+ *(u32*)(to + cursor + rel_offset) = relocated_offset;
+ }
+ cursor += instruction_size;
+ }
+ return true;
+}
+
+
+#if !SANITIZER_WINDOWS64
+bool OverrideFunctionWithDetour(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+ const int kDetourHeaderLen = 5;
+ const u16 kDetourInstruction = 0xFF8B;
+
+ uptr header = (uptr)old_func - kDetourHeaderLen;
+ uptr patch_length = kDetourHeaderLen + kShortJumpInstructionLength;
+
+ // Validate that the function is hookable.
+ if (*(u16*)old_func != kDetourInstruction ||
+ !IsMemoryPadding(header, kDetourHeaderLen))
+ return false;
+
+ // Change memory protection to writable.
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(header, patch_length, &protection))
+ return false;
+
+ // Write a relative jump to the redirected function.
+ WriteJumpInstruction(header, new_func);
+
+ // Write the short jump to the function prefix.
+ WriteShortJumpInstruction(old_func, header);
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(header, patch_length, protection))
+ return false;
+
+ if (orig_old_func)
+ *orig_old_func = old_func + kShortJumpInstructionLength;
+
+ return true;
+}
+#endif
+
+bool OverrideFunctionWithRedirectJump(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+ // Check whether the first instruction is a relative jump.
+ if (*(u8*)old_func != 0xE9)
+ return false;
+
+ if (orig_old_func) {
+ uptr relative_offset = *(u32*)(old_func + 1);
+ uptr absolute_target = old_func + relative_offset + kJumpInstructionLength;
+ *orig_old_func = absolute_target;
+ }
+
+#if SANITIZER_WINDOWS64
+ // If needed, get memory space for a trampoline jump.
+ uptr trampoline = AllocateMemoryForTrampoline(old_func, kDirectBranchLength);
+ if (!trampoline)
+ return false;
+ WriteDirectBranch(trampoline, new_func);
+#endif
+
+ // Change memory protection to writable.
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(old_func, kJumpInstructionLength, &protection))
+ return false;
+
+ // Write a relative jump to the redirected function.
+ WriteJumpInstruction(old_func, FIRST_32_SECOND_64(new_func, trampoline));
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(old_func, kJumpInstructionLength, protection))
+ return false;
+
+ return true;
+}
+
+bool OverrideFunctionWithHotPatch(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+ const int kHotPatchHeaderLen = kBranchLength;
+
+ uptr header = (uptr)old_func - kHotPatchHeaderLen;
+ uptr patch_length = kHotPatchHeaderLen + kShortJumpInstructionLength;
+
+ // Validate that the function is hot patchable.
+ size_t instruction_size = GetInstructionSize(old_func);
+ if (instruction_size < kShortJumpInstructionLength ||
+ !FunctionHasPadding(old_func, kHotPatchHeaderLen))
+ return false;
+
+ if (orig_old_func) {
+ // Put the needed instructions into the trampoline bytes.
+ uptr trampoline_length = instruction_size + kDirectBranchLength;
+ uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
+ if (!trampoline)
+ return false;
+ if (!CopyInstructions(trampoline, old_func, instruction_size))
+ return false;
+ WriteDirectBranch(trampoline + instruction_size,
+ old_func + instruction_size);
+ *orig_old_func = trampoline;
+ }
+
+ // If needed, get memory space for indirect address.
+ uptr indirect_address = 0;
+#if SANITIZER_WINDOWS64
+ indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
+ if (!indirect_address)
+ return false;
+#endif
+
+ // Change memory protection to writable.
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(header, patch_length, &protection))
+ return false;
+
+ // Write jumps to the redirected function.
+ WriteBranch(header, indirect_address, new_func);
+ WriteShortJumpInstruction(old_func, header);
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(header, patch_length, protection))
+ return false;
+
+ return true;
+}
+
+bool OverrideFunctionWithTrampoline(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+
+ size_t instructions_length = kBranchLength;
+ size_t padding_length = 0;
+ uptr indirect_address = 0;
+
+ if (orig_old_func) {
+ // Find out the number of bytes of the instructions we need to copy
+ // to the trampoline.
+ instructions_length = RoundUpToInstrBoundary(kBranchLength, old_func);
+ if (!instructions_length)
+ return false;
+
+ // Put the needed instructions into the trampoline bytes.
+ uptr trampoline_length = instructions_length + kDirectBranchLength;
+ uptr trampoline = AllocateMemoryForTrampoline(old_func, trampoline_length);
+ if (!trampoline)
+ return false;
+ if (!CopyInstructions(trampoline, old_func, instructions_length))
+ return false;
+ WriteDirectBranch(trampoline + instructions_length,
+ old_func + instructions_length);
+ *orig_old_func = trampoline;
+ }
+
+#if SANITIZER_WINDOWS64
+ // Check if the targeted address can be encoded in the function padding.
+ // Otherwise, allocate it in the trampoline region.
+ if (IsMemoryPadding(old_func - kAddressLength, kAddressLength)) {
+ indirect_address = old_func - kAddressLength;
+ padding_length = kAddressLength;
+ } else {
+ indirect_address = AllocateMemoryForTrampoline(old_func, kAddressLength);
+ if (!indirect_address)
+ return false;
+ }
+#endif
+
+ // Change memory protection to writable.
+ uptr patch_address = old_func - padding_length;
+ uptr patch_length = instructions_length + padding_length;
+ DWORD protection = 0;
+ if (!ChangeMemoryProtection(patch_address, patch_length, &protection))
+ return false;
+
+ // Patch the original function.
+ WriteBranch(old_func, indirect_address, new_func);
+
+ // Restore previous memory protection.
+ if (!RestoreMemoryProtection(patch_address, patch_length, protection))
+ return false;
+
+ return true;
+}
+
+bool OverrideFunction(
+ uptr old_func, uptr new_func, uptr *orig_old_func) {
+#if !SANITIZER_WINDOWS64
+ if (OverrideFunctionWithDetour(old_func, new_func, orig_old_func))
+ return true;
+#endif
+ if (OverrideFunctionWithRedirectJump(old_func, new_func, orig_old_func))
+ return true;
+ if (OverrideFunctionWithHotPatch(old_func, new_func, orig_old_func))
+ return true;
+ if (OverrideFunctionWithTrampoline(old_func, new_func, orig_old_func))
+ return true;
+ return false;
+}
+
+static void **InterestingDLLsAvailable() {
+ static const char *InterestingDLLs[] = {
+ "kernel32.dll",
+ "msvcr100.dll", // VS2010
+ "msvcr110.dll", // VS2012
+ "msvcr120.dll", // VS2013
+ "vcruntime140.dll", // VS2015
+ "ucrtbase.dll", // Universal CRT
+ // NTDLL should go last as it exports some functions that we should
+ // override in the CRT [presumably only used internally].
+ "ntdll.dll", NULL};
+ static void *result[ARRAY_SIZE(InterestingDLLs)] = { 0 };
+ if (!result[0]) {
+ for (size_t i = 0, j = 0; InterestingDLLs[i]; ++i) {
+ if (HMODULE h = GetModuleHandleA(InterestingDLLs[i]))
+ result[j++] = (void *)h;
+ }
+ }
+ return &result[0];
+}
+
+namespace {
+// Utility for reading loaded PE images.
+template <typename T> class RVAPtr {
+ public:
+ RVAPtr(void *module, uptr rva)
+ : ptr_(reinterpret_cast<T *>(reinterpret_cast<char *>(module) + rva)) {}
+ operator T *() { return ptr_; }
+ T *operator->() { return ptr_; }
+ T *operator++() { return ++ptr_; }
+
+ private:
+ T *ptr_;
+};
+} // namespace
+
+// Internal implementation of GetProcAddress. At least since Windows 8,
+// GetProcAddress appears to initialize DLLs before returning function pointers
+// into them. This is problematic for the sanitizers, because they typically
+// want to intercept malloc *before* MSVCRT initializes. Our internal
+// implementation walks the export list manually without doing initialization.
+uptr InternalGetProcAddress(void *module, const char *func_name) {
+ // Check that the module header is full and present.
+ RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
+ RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
+ if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
+ headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0"
+ headers->FileHeader.SizeOfOptionalHeader <
+ sizeof(IMAGE_OPTIONAL_HEADER)) {
+ return 0;
+ }
+
+ IMAGE_DATA_DIRECTORY *export_directory =
+ &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
+ if (export_directory->Size == 0)
+ return 0;
+ RVAPtr<IMAGE_EXPORT_DIRECTORY> exports(module,
+ export_directory->VirtualAddress);
+ RVAPtr<DWORD> functions(module, exports->AddressOfFunctions);
+ RVAPtr<DWORD> names(module, exports->AddressOfNames);
+ RVAPtr<WORD> ordinals(module, exports->AddressOfNameOrdinals);
+
+ for (DWORD i = 0; i < exports->NumberOfNames; i++) {
+ RVAPtr<char> name(module, names[i]);
+ if (!strcmp(func_name, name)) {
+ DWORD index = ordinals[i];
+ RVAPtr<char> func(module, functions[index]);
+
+ // Handle forwarded functions.
+ DWORD offset = functions[index];
+ if (offset >= export_directory->VirtualAddress &&
+ offset < export_directory->VirtualAddress + export_directory->Size) {
+ // An entry for a forwarded function is a string with the following
+ // format: "<module> . <function_name>" that is stored into the
+ // exported directory.
+ char function_name[256];
+ size_t funtion_name_length = _strlen(func);
+ if (funtion_name_length >= sizeof(function_name) - 1)
+ InterceptionFailed();
+
+ _memcpy(function_name, func, funtion_name_length);
+ function_name[funtion_name_length] = '\0';
+ char* separator = _strchr(function_name, '.');
+ if (!separator)
+ InterceptionFailed();
+ *separator = '\0';
+
+ void* redirected_module = GetModuleHandleA(function_name);
+ if (!redirected_module)
+ InterceptionFailed();
+ return InternalGetProcAddress(redirected_module, separator + 1);
+ }
+
+ return (uptr)(char *)func;
+ }
+ }
+
+ return 0;
+}
+
+bool OverrideFunction(
+ const char *func_name, uptr new_func, uptr *orig_old_func) {
+ bool hooked = false;
+ void **DLLs = InterestingDLLsAvailable();
+ for (size_t i = 0; DLLs[i]; ++i) {
+ uptr func_addr = InternalGetProcAddress(DLLs[i], func_name);
+ if (func_addr &&
+ OverrideFunction(func_addr, new_func, orig_old_func)) {
+ hooked = true;
+ }
+ }
+ return hooked;
+}
+
+bool OverrideImportedFunction(const char *module_to_patch,
+ const char *imported_module,
+ const char *function_name, uptr new_function,
+ uptr *orig_old_func) {
+ HMODULE module = GetModuleHandleA(module_to_patch);
+ if (!module)
+ return false;
+
+ // Check that the module header is full and present.
+ RVAPtr<IMAGE_DOS_HEADER> dos_stub(module, 0);
+ RVAPtr<IMAGE_NT_HEADERS> headers(module, dos_stub->e_lfanew);
+ if (!module || dos_stub->e_magic != IMAGE_DOS_SIGNATURE || // "MZ"
+ headers->Signature != IMAGE_NT_SIGNATURE || // "PE\0\0"
+ headers->FileHeader.SizeOfOptionalHeader <
+ sizeof(IMAGE_OPTIONAL_HEADER)) {
+ return false;
+ }
+
+ IMAGE_DATA_DIRECTORY *import_directory =
+ &headers->OptionalHeader.DataDirectory[IMAGE_DIRECTORY_ENTRY_IMPORT];
+
+ // Iterate the list of imported DLLs. FirstThunk will be null for the last
+ // entry.
+ RVAPtr<IMAGE_IMPORT_DESCRIPTOR> imports(module,
+ import_directory->VirtualAddress);
+ for (; imports->FirstThunk != 0; ++imports) {
+ RVAPtr<const char> modname(module, imports->Name);
+ if (_stricmp(&*modname, imported_module) == 0)
+ break;
+ }
+ if (imports->FirstThunk == 0)
+ return false;
+
+ // We have two parallel arrays: the import address table (IAT) and the table
+ // of names. They start out containing the same data, but the loader rewrites
+ // the IAT to hold imported addresses and leaves the name table in
+ // OriginalFirstThunk alone.
+ RVAPtr<IMAGE_THUNK_DATA> name_table(module, imports->OriginalFirstThunk);
+ RVAPtr<IMAGE_THUNK_DATA> iat(module, imports->FirstThunk);
+ for (; name_table->u1.Ordinal != 0; ++name_table, ++iat) {
+ if (!IMAGE_SNAP_BY_ORDINAL(name_table->u1.Ordinal)) {
+ RVAPtr<IMAGE_IMPORT_BY_NAME> import_by_name(
+ module, name_table->u1.ForwarderString);
+ const char *funcname = &import_by_name->Name[0];
+ if (strcmp(funcname, function_name) == 0)
+ break;
+ }
+ }
+ if (name_table->u1.Ordinal == 0)
+ return false;
+
+ // Now we have the correct IAT entry. Do the swap. We have to make the page
+ // read/write first.
+ if (orig_old_func)
+ *orig_old_func = iat->u1.AddressOfData;
+ DWORD old_prot, unused_prot;
+ if (!VirtualProtect(&iat->u1.AddressOfData, 4, PAGE_EXECUTE_READWRITE,
+ &old_prot))
+ return false;
+ iat->u1.AddressOfData = new_function;
+ if (!VirtualProtect(&iat->u1.AddressOfData, 4, old_prot, &unused_prot))
+ return false; // Not clear if this failure bothers us.
+ return true;
+}
+
+} // namespace __interception
+
+#endif // SANITIZER_MAC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_win.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_win.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/interception/interception_win.h (revision 351984)
@@ -0,0 +1,83 @@
+//===-- interception_linux.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer, an address sanity checker.
+//
+// Windows-specific interception methods.
+//===----------------------------------------------------------------------===//
+
+#if SANITIZER_WINDOWS
+
+#if !defined(INCLUDED_FROM_INTERCEPTION_LIB)
+# error "interception_win.h should be included from interception library only"
+#endif
+
+#ifndef INTERCEPTION_WIN_H
+#define INTERCEPTION_WIN_H
+
+namespace __interception {
+// All the functions in the OverrideFunction() family return true on success,
+// false on failure (including "couldn't find the function").
+
+// Overrides a function by its address.
+bool OverrideFunction(uptr old_func, uptr new_func, uptr *orig_old_func = 0);
+
+// Overrides a function in a system DLL or DLL CRT by its exported name.
+bool OverrideFunction(const char *name, uptr new_func, uptr *orig_old_func = 0);
+
+// Windows-only replacement for GetProcAddress. Useful for some sanitizers.
+uptr InternalGetProcAddress(void *module, const char *func_name);
+
+// Overrides a function only when it is called from a specific DLL. For example,
+// this is used to override calls to HeapAlloc/HeapFree from ucrtbase without
+// affecting other third party libraries.
+bool OverrideImportedFunction(const char *module_to_patch,
+ const char *imported_module,
+ const char *function_name, uptr new_function,
+ uptr *orig_old_func);
+
+#if !SANITIZER_WINDOWS64
+// Exposed for unittests
+bool OverrideFunctionWithDetour(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+#endif
+
+// Exposed for unittests
+bool OverrideFunctionWithRedirectJump(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+bool OverrideFunctionWithHotPatch(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+bool OverrideFunctionWithTrampoline(
+ uptr old_func, uptr new_func, uptr *orig_old_func);
+
+// Exposed for unittests
+void TestOnlyReleaseTrampolineRegions();
+
+} // namespace __interception
+
+#if defined(INTERCEPTION_DYNAMIC_CRT)
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction(#func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
+#else
+#define INTERCEPT_FUNCTION_WIN(func) \
+ ::__interception::OverrideFunction((::__interception::uptr)func, \
+ (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
+#endif
+
+#define INTERCEPT_FUNCTION_VER_WIN(func, symver) INTERCEPT_FUNCTION_WIN(func)
+
+#define INTERCEPT_FUNCTION_DLLIMPORT(user_dll, provider_dll, func) \
+ ::__interception::OverrideImportedFunction( \
+ user_dll, provider_dll, #func, (::__interception::uptr)WRAP(func), \
+ (::__interception::uptr *)&REAL(func))
+
+#endif // INTERCEPTION_WIN_H
+#endif // SANITIZER_WINDOWS
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl.cc (revision 351984)
@@ -0,0 +1,1100 @@
+//===-- tsan_rtl.cc -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main file (entry points) for the TSan run-time.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "tsan_defs.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
+#include "ubsan/ubsan_init.h"
+
+#ifdef __SSE3__
+// <emmintrin.h> transitively includes <stdlib.h>,
+// and it's prohibited to include std headers into tsan runtime.
+// So we do this dirty trick.
+#define _MM_MALLOC_H_INCLUDED
+#define __MM_MALLOC_H
+#include <emmintrin.h>
+typedef __m128i m128;
+#endif
+
+volatile int __tsan_resumed = 0;
+
+extern "C" void __tsan_resume() {
+ __tsan_resumed = 1;
+}
+
+namespace __tsan {
+
+#if !SANITIZER_GO && !SANITIZER_MAC
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL char cur_thread_placeholder[sizeof(ThreadState)] ALIGNED(64);
+#endif
+static char ctx_placeholder[sizeof(Context)] ALIGNED(64);
+Context *ctx;
+
+// Can be overriden by a front-end.
+#ifdef TSAN_EXTERNAL_HOOKS
+bool OnFinalize(bool failed);
+void OnInitialize();
+#else
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool OnFinalize(bool failed) {
+ return failed;
+}
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+void OnInitialize() {}
+#endif
+
+static char thread_registry_placeholder[sizeof(ThreadRegistry)];
+
+static ThreadContextBase *CreateThreadContext(u32 tid) {
+ // Map thread trace when context is created.
+ char name[50];
+ internal_snprintf(name, sizeof(name), "trace %u", tid);
+ MapThreadTrace(GetThreadTrace(tid), TraceSize() * sizeof(Event), name);
+ const uptr hdr = GetThreadTraceHeader(tid);
+ internal_snprintf(name, sizeof(name), "trace header %u", tid);
+ MapThreadTrace(hdr, sizeof(Trace), name);
+ new((void*)hdr) Trace();
+ // We are going to use only a small part of the trace with the default
+ // value of history_size. However, the constructor writes to the whole trace.
+ // Unmap the unused part.
+ uptr hdr_end = hdr + sizeof(Trace);
+ hdr_end -= sizeof(TraceHeader) * (kTraceParts - TraceParts());
+ hdr_end = RoundUp(hdr_end, GetPageSizeCached());
+ if (hdr_end < hdr + sizeof(Trace))
+ UnmapOrDie((void*)hdr_end, hdr + sizeof(Trace) - hdr_end);
+ void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadContext));
+ return new(mem) ThreadContext(tid);
+}
+
+#if !SANITIZER_GO
+static const u32 kThreadQuarantineSize = 16;
+#else
+static const u32 kThreadQuarantineSize = 64;
+#endif
+
+Context::Context()
+ : initialized()
+ , report_mtx(MutexTypeReport, StatMtxReport)
+ , nreported()
+ , nmissed_expected()
+ , thread_registry(new(thread_registry_placeholder) ThreadRegistry(
+ CreateThreadContext, kMaxTid, kThreadQuarantineSize, kMaxTidReuse))
+ , racy_mtx(MutexTypeRacy, StatMtxRacy)
+ , racy_stacks()
+ , racy_addresses()
+ , fired_suppressions_mtx(MutexTypeFired, StatMtxFired)
+ , clock_alloc("clock allocator") {
+ fired_suppressions.reserve(8);
+}
+
+// The objects are allocated in TLS, so one may rely on zero-initialization.
+ThreadState::ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
+ uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size)
+ : fast_state(tid, epoch)
+ // Do not touch these, rely on zero initialization,
+ // they may be accessed before the ctor.
+ // , ignore_reads_and_writes()
+ // , ignore_interceptors()
+ , clock(tid, reuse_count)
+#if !SANITIZER_GO
+ , jmp_bufs()
+#endif
+ , tid(tid)
+ , unique_id(unique_id)
+ , stk_addr(stk_addr)
+ , stk_size(stk_size)
+ , tls_addr(tls_addr)
+ , tls_size(tls_size)
+#if !SANITIZER_GO
+ , last_sleep_clock(tid)
+#endif
+{
+}
+
+#if !SANITIZER_GO
+static void MemoryProfiler(Context *ctx, fd_t fd, int i) {
+ uptr n_threads;
+ uptr n_running_threads;
+ ctx->thread_registry->GetNumberOfThreads(&n_threads, &n_running_threads);
+ InternalMmapVector<char> buf(4096);
+ WriteMemoryProfile(buf.data(), buf.size(), n_threads, n_running_threads);
+ WriteToFile(fd, buf.data(), internal_strlen(buf.data()));
+}
+
+static void BackgroundThread(void *arg) {
+ // This is a non-initialized non-user thread, nothing to see here.
+ // We don't use ScopedIgnoreInterceptors, because we want ignores to be
+ // enabled even when the thread function exits (e.g. during pthread thread
+ // shutdown code).
+ cur_thread_init();
+ cur_thread()->ignore_interceptors++;
+ const u64 kMs2Ns = 1000 * 1000;
+
+ fd_t mprof_fd = kInvalidFd;
+ if (flags()->profile_memory && flags()->profile_memory[0]) {
+ if (internal_strcmp(flags()->profile_memory, "stdout") == 0) {
+ mprof_fd = 1;
+ } else if (internal_strcmp(flags()->profile_memory, "stderr") == 0) {
+ mprof_fd = 2;
+ } else {
+ InternalScopedString filename(kMaxPathLength);
+ filename.append("%s.%d", flags()->profile_memory, (int)internal_getpid());
+ fd_t fd = OpenFile(filename.data(), WrOnly);
+ if (fd == kInvalidFd) {
+ Printf("ThreadSanitizer: failed to open memory profile file '%s'\n",
+ &filename[0]);
+ } else {
+ mprof_fd = fd;
+ }
+ }
+ }
+
+ u64 last_flush = NanoTime();
+ uptr last_rss = 0;
+ for (int i = 0;
+ atomic_load(&ctx->stop_background_thread, memory_order_relaxed) == 0;
+ i++) {
+ SleepForMillis(100);
+ u64 now = NanoTime();
+
+ // Flush memory if requested.
+ if (flags()->flush_memory_ms > 0) {
+ if (last_flush + flags()->flush_memory_ms * kMs2Ns < now) {
+ VPrintf(1, "ThreadSanitizer: periodic memory flush\n");
+ FlushShadowMemory();
+ last_flush = NanoTime();
+ }
+ }
+ // GetRSS can be expensive on huge programs, so don't do it every 100ms.
+ if (flags()->memory_limit_mb > 0) {
+ uptr rss = GetRSS();
+ uptr limit = uptr(flags()->memory_limit_mb) << 20;
+ VPrintf(1, "ThreadSanitizer: memory flush check"
+ " RSS=%llu LAST=%llu LIMIT=%llu\n",
+ (u64)rss >> 20, (u64)last_rss >> 20, (u64)limit >> 20);
+ if (2 * rss > limit + last_rss) {
+ VPrintf(1, "ThreadSanitizer: flushing memory due to RSS\n");
+ FlushShadowMemory();
+ rss = GetRSS();
+ VPrintf(1, "ThreadSanitizer: memory flushed RSS=%llu\n", (u64)rss>>20);
+ }
+ last_rss = rss;
+ }
+
+ // Write memory profile if requested.
+ if (mprof_fd != kInvalidFd)
+ MemoryProfiler(ctx, mprof_fd, i);
+
+ // Flush symbolizer cache if requested.
+ if (flags()->flush_symbolizer_ms > 0) {
+ u64 last = atomic_load(&ctx->last_symbolize_time_ns,
+ memory_order_relaxed);
+ if (last != 0 && last + flags()->flush_symbolizer_ms * kMs2Ns < now) {
+ Lock l(&ctx->report_mtx);
+ ScopedErrorReportLock l2;
+ SymbolizeFlush();
+ atomic_store(&ctx->last_symbolize_time_ns, 0, memory_order_relaxed);
+ }
+ }
+ }
+}
+
+static void StartBackgroundThread() {
+ ctx->background_thread = internal_start_thread(&BackgroundThread, 0);
+}
+
+#ifndef __mips__
+static void StopBackgroundThread() {
+ atomic_store(&ctx->stop_background_thread, 1, memory_order_relaxed);
+ internal_join_thread(ctx->background_thread);
+ ctx->background_thread = 0;
+}
+#endif
+#endif
+
+void DontNeedShadowFor(uptr addr, uptr size) {
+ ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size));
+}
+
+void MapShadow(uptr addr, uptr size) {
+ // Global data is not 64K aligned, but there are no adjacent mappings,
+ // so we can get away with unaligned mapping.
+ // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
+ const uptr kPageSize = GetPageSizeCached();
+ uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize);
+ uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize);
+ if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow"))
+ Die();
+
+ // Meta shadow is 2:1, so tread carefully.
+ static bool data_mapped = false;
+ static uptr mapped_meta_end = 0;
+ uptr meta_begin = (uptr)MemToMeta(addr);
+ uptr meta_end = (uptr)MemToMeta(addr + size);
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (!data_mapped) {
+ // First call maps data+bss.
+ data_mapped = true;
+ if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ Die();
+ } else {
+ // Mapping continous heap.
+ // Windows wants 64K alignment.
+ meta_begin = RoundDownTo(meta_begin, 64 << 10);
+ meta_end = RoundUpTo(meta_end, 64 << 10);
+ if (meta_end <= mapped_meta_end)
+ return;
+ if (meta_begin < mapped_meta_end)
+ meta_begin = mapped_meta_end;
+ if (!MmapFixedNoReserve(meta_begin, meta_end - meta_begin, "meta shadow"))
+ Die();
+ mapped_meta_end = meta_end;
+ }
+ VPrintf(2, "mapped meta shadow for (%p-%p) at (%p-%p)\n",
+ addr, addr+size, meta_begin, meta_end);
+}
+
+void MapThreadTrace(uptr addr, uptr size, const char *name) {
+ DPrintf("#0: Mapping trace at %p-%p(0x%zx)\n", addr, addr + size, size);
+ CHECK_GE(addr, TraceMemBeg());
+ CHECK_LE(addr + size, TraceMemEnd());
+ CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment
+ if (!MmapFixedNoReserve(addr, size, name)) {
+ Printf("FATAL: ThreadSanitizer can not mmap thread trace (%p/%p)\n",
+ addr, size);
+ Die();
+ }
+}
+
+static void CheckShadowMapping() {
+ uptr beg, end;
+ for (int i = 0; GetUserRegion(i, &beg, &end); i++) {
+ // Skip cases for empty regions (heap definition for architectures that
+ // do not use 64-bit allocator).
+ if (beg == end)
+ continue;
+ VPrintf(3, "checking shadow region %p-%p\n", beg, end);
+ uptr prev = 0;
+ for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) {
+ for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) {
+ const uptr p = RoundDown(p0 + x, kShadowCell);
+ if (p < beg || p >= end)
+ continue;
+ const uptr s = MemToShadow(p);
+ const uptr m = (uptr)MemToMeta(p);
+ VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m);
+ CHECK(IsAppMem(p));
+ CHECK(IsShadowMem(s));
+ CHECK_EQ(p, ShadowToMem(s));
+ CHECK(IsMetaMem(m));
+ if (prev) {
+ // Ensure that shadow and meta mappings are linear within a single
+ // user range. Lots of code that processes memory ranges assumes it.
+ const uptr prev_s = MemToShadow(prev);
+ const uptr prev_m = (uptr)MemToMeta(prev);
+ CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier);
+ CHECK_EQ((m - prev_m) / kMetaShadowSize,
+ (p - prev) / kMetaShadowCell);
+ }
+ prev = p;
+ }
+ }
+ }
+}
+
+#if !SANITIZER_GO
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ stack->Unwind(sig.pc, sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+static void TsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
+}
+#endif
+
+void Initialize(ThreadState *thr) {
+ // Thread safe because done before all threads exist.
+ static bool is_initialized = false;
+ if (is_initialized)
+ return;
+ is_initialized = true;
+ // We are not ready to handle interceptors yet.
+ ScopedIgnoreInterceptors ignore;
+ SanitizerToolName = "ThreadSanitizer";
+ // Install tool-specific callbacks in sanitizer_common.
+ SetCheckFailedCallback(TsanCheckFailed);
+
+ ctx = new(ctx_placeholder) Context;
+ const char *env_name = SANITIZER_GO ? "GORACE" : "TSAN_OPTIONS";
+ const char *options = GetEnv(env_name);
+ CacheBinaryName();
+ CheckASLR();
+ InitializeFlags(&ctx->flags, options, env_name);
+ AvoidCVE_2016_2143();
+ __sanitizer::InitializePlatformEarly();
+ __tsan::InitializePlatformEarly();
+
+#if !SANITIZER_GO
+ // Re-exec ourselves if we need to set additional env or command line args.
+ MaybeReexec();
+
+ InitializeAllocator();
+ ReplaceSystemMalloc();
+#endif
+ if (common_flags()->detect_deadlocks)
+ ctx->dd = DDetector::Create(flags());
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ InitializeInterceptors();
+ CheckShadowMapping();
+ InitializePlatform();
+ InitializeMutex();
+ InitializeDynamicAnnotations();
+#if !SANITIZER_GO
+ InitializeShadowMemory();
+ InitializeAllocatorLate();
+ InstallDeadlySignalHandlers(TsanOnDeadlySignal);
+#endif
+ // Setup correct file descriptor for error reports.
+ __sanitizer_set_report_path(common_flags()->log_path);
+ InitializeSuppressions();
+#if !SANITIZER_GO
+ InitializeLibIgnore();
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+#endif
+
+ VPrintf(1, "***** Running under ThreadSanitizer v2 (pid %d) *****\n",
+ (int)internal_getpid());
+
+ // Initialize thread 0.
+ int tid = ThreadCreate(thr, 0, 0, true);
+ CHECK_EQ(tid, 0);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
+#if TSAN_CONTAINS_UBSAN
+ __ubsan::InitAsPlugin();
+#endif
+ ctx->initialized = true;
+
+#if !SANITIZER_GO
+ Symbolizer::LateInitialize();
+#endif
+
+ if (flags()->stop_on_start) {
+ Printf("ThreadSanitizer is suspended at startup (pid %d)."
+ " Call __tsan_resume().\n",
+ (int)internal_getpid());
+ while (__tsan_resumed == 0) {}
+ }
+
+ OnInitialize();
+}
+
+void MaybeSpawnBackgroundThread() {
+ // On MIPS, TSan initialization is run before
+ // __pthread_initialize_minimal_internal() is finished, so we can not spawn
+ // new threads.
+#if !SANITIZER_GO && !defined(__mips__)
+ static atomic_uint32_t bg_thread = {};
+ if (atomic_load(&bg_thread, memory_order_relaxed) == 0 &&
+ atomic_exchange(&bg_thread, 1, memory_order_relaxed) == 0) {
+ StartBackgroundThread();
+ SetSandboxingCallback(StopBackgroundThread);
+ }
+#endif
+}
+
+
+int Finalize(ThreadState *thr) {
+ bool failed = false;
+
+ if (common_flags()->print_module_map == 1) PrintModuleMap();
+
+ if (flags()->atexit_sleep_ms > 0 && ThreadCount(thr) > 1)
+ SleepForMillis(flags()->atexit_sleep_ms);
+
+ // Wait for pending reports.
+ ctx->report_mtx.Lock();
+ { ScopedErrorReportLock l; }
+ ctx->report_mtx.Unlock();
+
+#if !SANITIZER_GO
+ if (Verbosity()) AllocatorPrintStats();
+#endif
+
+ ThreadFinalize(thr);
+
+ if (ctx->nreported) {
+ failed = true;
+#if !SANITIZER_GO
+ Printf("ThreadSanitizer: reported %d warnings\n", ctx->nreported);
+#else
+ Printf("Found %d data race(s)\n", ctx->nreported);
+#endif
+ }
+
+ if (ctx->nmissed_expected) {
+ failed = true;
+ Printf("ThreadSanitizer: missed %d expected races\n",
+ ctx->nmissed_expected);
+ }
+
+ if (common_flags()->print_suppressions)
+ PrintMatchedSuppressions();
+#if !SANITIZER_GO
+ if (flags()->print_benign)
+ PrintMatchedBenignRaces();
+#endif
+
+ failed = OnFinalize(failed);
+
+#if TSAN_COLLECT_STATS
+ StatAggregate(ctx->stat, thr->stat);
+ StatOutput(ctx->stat);
+#endif
+
+ return failed ? common_flags()->exitcode : 0;
+}
+
+#if !SANITIZER_GO
+void ForkBefore(ThreadState *thr, uptr pc) {
+ ctx->thread_registry->Lock();
+ ctx->report_mtx.Lock();
+}
+
+void ForkParentAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+}
+
+void ForkChildAfter(ThreadState *thr, uptr pc) {
+ ctx->report_mtx.Unlock();
+ ctx->thread_registry->Unlock();
+
+ uptr nthread = 0;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &nthread /* alive threads */);
+ VPrintf(1, "ThreadSanitizer: forked new process with pid %d,"
+ " parent had %d threads\n", (int)internal_getpid(), (int)nthread);
+ if (nthread == 1) {
+ StartBackgroundThread();
+ } else {
+ // We've just forked a multi-threaded process. We cannot reasonably function
+ // after that (some mutexes may be locked before fork). So just enable
+ // ignores for everything in the hope that we will exec soon.
+ ctx->after_multithreaded_fork = true;
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, pc);
+ ThreadIgnoreSyncBegin(thr, pc);
+ }
+}
+#endif
+
+#if SANITIZER_GO
+NOINLINE
+void GrowShadowStack(ThreadState *thr) {
+ const int sz = thr->shadow_stack_end - thr->shadow_stack;
+ const int newsz = 2 * sz;
+ uptr *newstack = (uptr*)internal_alloc(MBlockShadowStack,
+ newsz * sizeof(uptr));
+ internal_memcpy(newstack, thr->shadow_stack, sz * sizeof(uptr));
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = newstack;
+ thr->shadow_stack_pos = newstack + sz;
+ thr->shadow_stack_end = newstack + newsz;
+}
+#endif
+
+u32 CurrentStackId(ThreadState *thr, uptr pc) {
+ if (!thr->is_inited) // May happen during bootstrap.
+ return 0;
+ if (pc != 0) {
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+ }
+ u32 id = StackDepotPut(
+ StackTrace(thr->shadow_stack, thr->shadow_stack_pos - thr->shadow_stack));
+ if (pc != 0)
+ thr->shadow_stack_pos--;
+ return id;
+}
+
+void TraceSwitch(ThreadState *thr) {
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork)
+ return;
+#endif
+ thr->nomalloc++;
+ Trace *thr_trace = ThreadTrace(thr->tid);
+ Lock l(&thr_trace->mtx);
+ unsigned trace = (thr->fast_state.epoch() / kTracePartSize) % TraceParts();
+ TraceHeader *hdr = &thr_trace->headers[trace];
+ hdr->epoch0 = thr->fast_state.epoch();
+ ObtainCurrentStack(thr, 0, &hdr->stack0);
+ hdr->mset0 = thr->mset;
+ thr->nomalloc--;
+}
+
+Trace *ThreadTrace(int tid) {
+ return (Trace*)GetThreadTraceHeader(tid);
+}
+
+uptr TraceTopPC(ThreadState *thr) {
+ Event *events = (Event*)GetThreadTrace(thr->tid);
+ uptr pc = events[thr->fast_state.GetTracePos()];
+ return pc;
+}
+
+uptr TraceSize() {
+ return (uptr)(1ull << (kTracePartSizeBits + flags()->history_size + 1));
+}
+
+uptr TraceParts() {
+ return TraceSize() / kTracePartSize;
+}
+
+#if !SANITIZER_GO
+extern "C" void __tsan_trace_switch() {
+ TraceSwitch(cur_thread());
+}
+
+extern "C" void __tsan_report_race() {
+ ReportRace(cur_thread());
+}
+#endif
+
+ALWAYS_INLINE
+Shadow LoadShadow(u64 *p) {
+ u64 raw = atomic_load((atomic_uint64_t*)p, memory_order_relaxed);
+ return Shadow(raw);
+}
+
+ALWAYS_INLINE
+void StoreShadow(u64 *sp, u64 s) {
+ atomic_store((atomic_uint64_t*)sp, s, memory_order_relaxed);
+}
+
+ALWAYS_INLINE
+void StoreIfNotYetStored(u64 *sp, u64 *s) {
+ StoreShadow(sp, *s);
+ *s = 0;
+}
+
+ALWAYS_INLINE
+void HandleRace(ThreadState *thr, u64 *shadow_mem,
+ Shadow cur, Shadow old) {
+ thr->racy_state[0] = cur.raw();
+ thr->racy_state[1] = old.raw();
+ thr->racy_shadow_addr = shadow_mem;
+#if !SANITIZER_GO
+ HACKY_CALL(__tsan_report_race);
+#else
+ ReportRace(thr);
+#endif
+}
+
+static inline bool HappensBefore(Shadow old, ThreadState *thr) {
+ return thr->clock.get(old.TidWithIgnore()) >= old.epoch();
+}
+
+ALWAYS_INLINE
+void MemoryAccessImpl1(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+
+ // This potentially can live in an MMX/SSE scratch register.
+ // The required intrinsics are:
+ // __m128i _mm_move_epi64(__m128i*);
+ // _mm_storel_epi64(u64*, __m128i);
+ u64 store_word = cur.raw();
+ bool stored = false;
+
+ // scan all the shadow values and dispatch to 4 categories:
+ // same, replace, candidate and race (see comments below).
+ // we consider only 3 cases regarding access sizes:
+ // equal, intersect and not intersect. initially I considered
+ // larger and smaller as well, it allowed to replace some
+ // 'candidates' with 'same' or 'replace', but I think
+ // it's just not worth it (performance- and complexity-wise).
+
+ Shadow old(0);
+
+ // It release mode we manually unroll the loop,
+ // because empirically gcc generates better code this way.
+ // However, we can't afford unrolling in debug mode, because the function
+ // consumes almost 4K of stack. Gtest gives only 4K of stack to death test
+ // threads, which is not enough for the unrolled loop.
+#if SANITIZER_DEBUG
+ for (int idx = 0; idx < 4; idx++) {
+#include "tsan_update_shadow_word_inl.h"
+ }
+#else
+ int idx = 0;
+#include "tsan_update_shadow_word_inl.h"
+ idx = 1;
+ if (stored) {
+#include "tsan_update_shadow_word_inl.h"
+ } else {
+#include "tsan_update_shadow_word_inl.h"
+ }
+ idx = 2;
+ if (stored) {
+#include "tsan_update_shadow_word_inl.h"
+ } else {
+#include "tsan_update_shadow_word_inl.h"
+ }
+ idx = 3;
+ if (stored) {
+#include "tsan_update_shadow_word_inl.h"
+ } else {
+#include "tsan_update_shadow_word_inl.h"
+ }
+#endif
+
+ // we did not find any races and had already stored
+ // the current access info, so we are done
+ if (LIKELY(stored))
+ return;
+ // choose a random candidate slot and replace it
+ StoreShadow(shadow_mem + (cur.epoch() % kShadowCnt), store_word);
+ StatInc(thr, StatShadowReplace);
+ return;
+ RACE:
+ HandleRace(thr, shadow_mem, cur, old);
+ return;
+}
+
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int size, bool kAccessIsWrite, bool kIsAtomic) {
+ while (size) {
+ int size1 = 1;
+ int kAccessSizeLog = kSizeLog1;
+ if (size >= 8 && (addr & ~7) == ((addr + 7) & ~7)) {
+ size1 = 8;
+ kAccessSizeLog = kSizeLog8;
+ } else if (size >= 4 && (addr & ~7) == ((addr + 3) & ~7)) {
+ size1 = 4;
+ kAccessSizeLog = kSizeLog4;
+ } else if (size >= 2 && (addr & ~7) == ((addr + 1) & ~7)) {
+ size1 = 2;
+ kAccessSizeLog = kSizeLog2;
+ }
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic);
+ addr += size1;
+ size -= size1;
+ }
+}
+
+ALWAYS_INLINE
+bool ContainsSameAccessSlow(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ Shadow cur(a);
+ for (uptr i = 0; i < kShadowCnt; i++) {
+ Shadow old(LoadShadow(&s[i]));
+ if (Shadow::Addr0AndSizeAreEqual(cur, old) &&
+ old.TidWithIgnore() == cur.TidWithIgnore() &&
+ old.epoch() > sync_epoch &&
+ old.IsAtomic() == cur.IsAtomic() &&
+ old.IsRead() <= cur.IsRead())
+ return true;
+ }
+ return false;
+}
+
+#if defined(__SSE3__)
+#define SHUF(v0, v1, i0, i1, i2, i3) _mm_castps_si128(_mm_shuffle_ps( \
+ _mm_castsi128_ps(v0), _mm_castsi128_ps(v1), \
+ (i0)*1 + (i1)*4 + (i2)*16 + (i3)*64))
+ALWAYS_INLINE
+bool ContainsSameAccessFast(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+ // This is an optimized version of ContainsSameAccessSlow.
+ // load current access into access[0:63]
+ const m128 access = _mm_cvtsi64_si128(a);
+ // duplicate high part of access in addr0:
+ // addr0[0:31] = access[32:63]
+ // addr0[32:63] = access[32:63]
+ // addr0[64:95] = access[32:63]
+ // addr0[96:127] = access[32:63]
+ const m128 addr0 = SHUF(access, access, 1, 1, 1, 1);
+ // load 4 shadow slots
+ const m128 shadow0 = _mm_load_si128((__m128i*)s);
+ const m128 shadow1 = _mm_load_si128((__m128i*)s + 1);
+ // load high parts of 4 shadow slots into addr_vect:
+ // addr_vect[0:31] = shadow0[32:63]
+ // addr_vect[32:63] = shadow0[96:127]
+ // addr_vect[64:95] = shadow1[32:63]
+ // addr_vect[96:127] = shadow1[96:127]
+ m128 addr_vect = SHUF(shadow0, shadow1, 1, 3, 1, 3);
+ if (!is_write) {
+ // set IsRead bit in addr_vect
+ const m128 rw_mask1 = _mm_cvtsi64_si128(1<<15);
+ const m128 rw_mask = SHUF(rw_mask1, rw_mask1, 0, 0, 0, 0);
+ addr_vect = _mm_or_si128(addr_vect, rw_mask);
+ }
+ // addr0 == addr_vect?
+ const m128 addr_res = _mm_cmpeq_epi32(addr0, addr_vect);
+ // epoch1[0:63] = sync_epoch
+ const m128 epoch1 = _mm_cvtsi64_si128(sync_epoch);
+ // epoch[0:31] = sync_epoch[0:31]
+ // epoch[32:63] = sync_epoch[0:31]
+ // epoch[64:95] = sync_epoch[0:31]
+ // epoch[96:127] = sync_epoch[0:31]
+ const m128 epoch = SHUF(epoch1, epoch1, 0, 0, 0, 0);
+ // load low parts of shadow cell epochs into epoch_vect:
+ // epoch_vect[0:31] = shadow0[0:31]
+ // epoch_vect[32:63] = shadow0[64:95]
+ // epoch_vect[64:95] = shadow1[0:31]
+ // epoch_vect[96:127] = shadow1[64:95]
+ const m128 epoch_vect = SHUF(shadow0, shadow1, 0, 2, 0, 2);
+ // epoch_vect >= sync_epoch?
+ const m128 epoch_res = _mm_cmpgt_epi32(epoch_vect, epoch);
+ // addr_res & epoch_res
+ const m128 res = _mm_and_si128(addr_res, epoch_res);
+ // mask[0] = res[7]
+ // mask[1] = res[15]
+ // ...
+ // mask[15] = res[127]
+ const int mask = _mm_movemask_epi8(res);
+ return mask != 0;
+}
+#endif
+
+ALWAYS_INLINE
+bool ContainsSameAccess(u64 *s, u64 a, u64 sync_epoch, bool is_write) {
+#if defined(__SSE3__)
+ bool res = ContainsSameAccessFast(s, a, sync_epoch, is_write);
+ // NOTE: this check can fail if the shadow is concurrently mutated
+ // by other threads. But it still can be useful if you modify
+ // ContainsSameAccessFast and want to ensure that it's not completely broken.
+ // DCHECK_EQ(res, ContainsSameAccessSlow(s, a, sync_epoch, is_write));
+ return res;
+#else
+ return ContainsSameAccessSlow(s, a, sync_epoch, is_write);
+#endif
+}
+
+ALWAYS_INLINE USED
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) {
+ u64 *shadow_mem = (u64*)MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccess: @%p %p size=%d"
+ " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n",
+ (int)thr->fast_state.tid(), (void*)pc, (void*)addr,
+ (int)(1 << kAccessSizeLog), kAccessIsWrite, shadow_mem,
+ (uptr)shadow_mem[0], (uptr)shadow_mem[1],
+ (uptr)shadow_mem[2], (uptr)shadow_mem[3]);
+#if SANITIZER_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsShadowMem((uptr)shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem((uptr)shadow_mem));
+ }
+#endif
+
+ if (!SANITIZER_GO && !kAccessIsWrite && *shadow_mem == kShadowRodata) {
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopRodata);
+ return;
+ }
+
+ FastState fast_state = thr->fast_state;
+ if (UNLIKELY(fast_state.GetIgnoreBit())) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopIgnored);
+ return;
+ }
+
+ Shadow cur(fast_state);
+ cur.SetAddr0AndSizeLog(addr & 7, kAccessSizeLog);
+ cur.SetWrite(kAccessIsWrite);
+ cur.SetAtomic(kIsAtomic);
+
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
+
+ if (kCollectHistory) {
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+ cur.IncrementEpoch();
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+// Called by MemoryAccessRange in tsan_rtl_thread.cc
+ALWAYS_INLINE USED
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur) {
+ if (LIKELY(ContainsSameAccess(shadow_mem, cur.raw(),
+ thr->fast_synch_epoch, kAccessIsWrite))) {
+ StatInc(thr, StatMop);
+ StatInc(thr, kAccessIsWrite ? StatMopWrite : StatMopRead);
+ StatInc(thr, (StatType)(StatMop1 + kAccessSizeLog));
+ StatInc(thr, StatMopSame);
+ return;
+ }
+
+ MemoryAccessImpl1(thr, addr, kAccessSizeLog, kAccessIsWrite, kIsAtomic,
+ shadow_mem, cur);
+}
+
+static void MemoryRangeSet(ThreadState *thr, uptr pc, uptr addr, uptr size,
+ u64 val) {
+ (void)thr;
+ (void)pc;
+ if (size == 0)
+ return;
+ // FIXME: fix me.
+ uptr offset = addr % kShadowCell;
+ if (offset) {
+ offset = kShadowCell - offset;
+ if (size <= offset)
+ return;
+ addr += offset;
+ size -= offset;
+ }
+ DCHECK_EQ(addr % 8, 0);
+ // If a user passes some insane arguments (memset(0)),
+ // let it just crash as usual.
+ if (!IsAppMem(addr) || !IsAppMem(addr + size - 1))
+ return;
+ // Don't want to touch lots of shadow memory.
+ // If a program maps 10MB stack, there is no need reset the whole range.
+ size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1);
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows.
+ if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) {
+ u64 *p = (u64*)MemToShadow(addr);
+ CHECK(IsShadowMem((uptr)p));
+ CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1)));
+ // FIXME: may overwrite a part outside the region
+ for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) {
+ p[i++] = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ p[i++] = 0;
+ }
+ } else {
+ // The region is big, reset only beginning and end.
+ const uptr kPageSize = GetPageSizeCached();
+ u64 *begin = (u64*)MemToShadow(addr);
+ u64 *end = begin + size / kShadowCell * kShadowCnt;
+ u64 *p = begin;
+ // Set at least first kPageSize/2 to page boundary.
+ while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ *p++ = 0;
+ }
+ // Reset middle part.
+ u64 *p1 = p;
+ p = RoundDown(end, kPageSize);
+ UnmapOrDie((void*)p1, (uptr)p - (uptr)p1);
+ if (!MmapFixedNoReserve((uptr)p1, (uptr)p - (uptr)p1))
+ Die();
+ // Set the ending.
+ while (p < end) {
+ *p++ = val;
+ for (uptr j = 1; j < kShadowCnt; j++)
+ *p++ = 0;
+ }
+ }
+}
+
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ MemoryRangeSet(thr, pc, addr, size, 0);
+}
+
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ // Processing more than 1k (4k of shadow) is expensive,
+ // can cause excessive memory consumption (user does not necessary touch
+ // the whole range) and most likely unnecessary.
+ if (size > 1024)
+ size = 1024;
+ CHECK_EQ(thr->is_freeing, false);
+ thr->is_freeing = true;
+ MemoryAccessRange(thr, pc, addr, size, true);
+ thr->is_freeing = false;
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
+ Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
+ s.MarkAsFreed();
+ s.SetWrite(true);
+ s.SetAddr0AndSizeLog(0, 3);
+ MemoryRangeSet(thr, pc, addr, size, s.raw());
+}
+
+void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size) {
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, pc);
+ }
+ Shadow s(thr->fast_state);
+ s.ClearIgnoreBit();
+ s.SetWrite(true);
+ s.SetAddr0AndSizeLog(0, 3);
+ MemoryRangeSet(thr, pc, addr, size, s.raw());
+}
+
+ALWAYS_INLINE USED
+void FuncEntry(ThreadState *thr, uptr pc) {
+ StatInc(thr, StatFuncEnter);
+ DPrintf2("#%d: FuncEntry %p\n", (int)thr->fast_state.tid(), (void*)pc);
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncEnter, pc);
+ }
+
+ // Shadow stack maintenance can be replaced with
+ // stack unwinding during trace switch (which presumably must be faster).
+ DCHECK_GE(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#else
+ if (thr->shadow_stack_pos == thr->shadow_stack_end)
+ GrowShadowStack(thr);
+#endif
+ thr->shadow_stack_pos[0] = pc;
+ thr->shadow_stack_pos++;
+}
+
+ALWAYS_INLINE USED
+void FuncExit(ThreadState *thr) {
+ StatInc(thr, StatFuncExit);
+ DPrintf2("#%d: FuncExit\n", (int)thr->fast_state.tid());
+ if (kCollectHistory) {
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeFuncExit, 0);
+ }
+
+ DCHECK_GT(thr->shadow_stack_pos, thr->shadow_stack);
+#if !SANITIZER_GO
+ DCHECK_LT(thr->shadow_stack_pos, thr->shadow_stack_end);
+#endif
+ thr->shadow_stack_pos--;
+}
+
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack) {
+ DPrintf("#%d: ThreadIgnoreBegin\n", thr->tid);
+ thr->ignore_reads_and_writes++;
+ CHECK_GT(thr->ignore_reads_and_writes, 0);
+ thr->fast_state.SetIgnoreBit();
+#if !SANITIZER_GO
+ if (save_stack && !ctx->after_multithreaded_fork)
+ thr->mop_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
+}
+
+void ThreadIgnoreEnd(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_reads_and_writes, 0);
+ thr->ignore_reads_and_writes--;
+ if (thr->ignore_reads_and_writes == 0) {
+ thr->fast_state.ClearIgnoreBit();
+#if !SANITIZER_GO
+ thr->mop_ignore_set.Reset();
+#endif
+ }
+}
+
+#if !SANITIZER_GO
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+uptr __tsan_testonly_shadow_stack_current_size() {
+ ThreadState *thr = cur_thread();
+ return thr->shadow_stack_pos - thr->shadow_stack;
+}
+#endif
+
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack) {
+ DPrintf("#%d: ThreadIgnoreSyncBegin\n", thr->tid);
+ thr->ignore_sync++;
+ CHECK_GT(thr->ignore_sync, 0);
+#if !SANITIZER_GO
+ if (save_stack && !ctx->after_multithreaded_fork)
+ thr->sync_ignore_set.Add(CurrentStackId(thr, pc));
+#endif
+}
+
+void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: ThreadIgnoreSyncEnd\n", thr->tid);
+ CHECK_GT(thr->ignore_sync, 0);
+ thr->ignore_sync--;
+#if !SANITIZER_GO
+ if (thr->ignore_sync == 0)
+ thr->sync_ignore_set.Reset();
+#endif
+}
+
+bool MD5Hash::operator==(const MD5Hash &other) const {
+ return hash[0] == other.hash[0] && hash[1] == other.hash[1];
+}
+
+#if SANITIZER_DEBUG
+void build_consistency_debug() {}
+#else
+void build_consistency_release() {}
+#endif
+
+#if TSAN_COLLECT_STATS
+void build_consistency_stats() {}
+#else
+void build_consistency_nostats() {}
+#endif
+
+} // namespace __tsan
+
+#if !SANITIZER_GO
+// Must be included in this file to make sure everything is inlined.
+#include "tsan_interface_inl.h"
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_clock.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_clock.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_clock.cc (revision 351984)
@@ -0,0 +1,597 @@
+//===-- tsan_clock.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_clock.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+// SyncClock and ThreadClock implement vector clocks for sync variables
+// (mutexes, atomic variables, file descriptors, etc) and threads, respectively.
+// ThreadClock contains fixed-size vector clock for maximum number of threads.
+// SyncClock contains growable vector clock for currently necessary number of
+// threads.
+// Together they implement very simple model of operations, namely:
+//
+// void ThreadClock::acquire(const SyncClock *src) {
+// for (int i = 0; i < kMaxThreads; i++)
+// clock[i] = max(clock[i], src->clock[i]);
+// }
+//
+// void ThreadClock::release(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = max(dst->clock[i], clock[i]);
+// }
+//
+// void ThreadClock::ReleaseStore(SyncClock *dst) const {
+// for (int i = 0; i < kMaxThreads; i++)
+// dst->clock[i] = clock[i];
+// }
+//
+// void ThreadClock::acq_rel(SyncClock *dst) {
+// acquire(dst);
+// release(dst);
+// }
+//
+// Conformance to this model is extensively verified in tsan_clock_test.cc.
+// However, the implementation is significantly more complex. The complexity
+// allows to implement important classes of use cases in O(1) instead of O(N).
+//
+// The use cases are:
+// 1. Singleton/once atomic that has a single release-store operation followed
+// by zillions of acquire-loads (the acquire-load is O(1)).
+// 2. Thread-local mutex (both lock and unlock can be O(1)).
+// 3. Leaf mutex (unlock is O(1)).
+// 4. A mutex shared by 2 threads (both lock and unlock can be O(1)).
+// 5. An atomic with a single writer (writes can be O(1)).
+// The implementation dynamically adopts to workload. So if an atomic is in
+// read-only phase, these reads will be O(1); if it later switches to read/write
+// phase, the implementation will correctly handle that by switching to O(N).
+//
+// Thread-safety note: all const operations on SyncClock's are conducted under
+// a shared lock; all non-const operations on SyncClock's are conducted under
+// an exclusive lock; ThreadClock's are private to respective threads and so
+// do not need any protection.
+//
+// Description of SyncClock state:
+// clk_ - variable size vector clock, low kClkBits hold timestamp,
+// the remaining bits hold "acquired" flag (the actual value is thread's
+// reused counter);
+// if acquried == thr->reused_, then the respective thread has already
+// acquired this clock (except possibly for dirty elements).
+// dirty_ - holds up to two indeces in the vector clock that other threads
+// need to acquire regardless of "acquired" flag value;
+// release_store_tid_ - denotes that the clock state is a result of
+// release-store operation by the thread with release_store_tid_ index.
+// release_store_reused_ - reuse count of release_store_tid_.
+
+// We don't have ThreadState in these methods, so this is an ugly hack that
+// works only in C++.
+#if !SANITIZER_GO
+# define CPP_STAT_INC(typ) StatInc(cur_thread(), typ)
+#else
+# define CPP_STAT_INC(typ) (void)0
+#endif
+
+namespace __tsan {
+
+static atomic_uint32_t *ref_ptr(ClockBlock *cb) {
+ return reinterpret_cast<atomic_uint32_t *>(&cb->table[ClockBlock::kRefIdx]);
+}
+
+// Drop reference to the first level block idx.
+static void UnrefClockBlock(ClockCache *c, u32 idx, uptr blocks) {
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ atomic_uint32_t *ref = ref_ptr(cb);
+ u32 v = atomic_load(ref, memory_order_acquire);
+ for (;;) {
+ CHECK_GT(v, 0);
+ if (v == 1)
+ break;
+ if (atomic_compare_exchange_strong(ref, &v, v - 1, memory_order_acq_rel))
+ return;
+ }
+ // First level block owns second level blocks, so them as well.
+ for (uptr i = 0; i < blocks; i++)
+ ctx->clock_alloc.Free(c, cb->table[ClockBlock::kBlockIdx - i]);
+ ctx->clock_alloc.Free(c, idx);
+}
+
+ThreadClock::ThreadClock(unsigned tid, unsigned reused)
+ : tid_(tid)
+ , reused_(reused + 1) // 0 has special meaning
+ , cached_idx_()
+ , cached_size_()
+ , cached_blocks_() {
+ CHECK_LT(tid, kMaxTidInClock);
+ CHECK_EQ(reused_, ((u64)reused_ << kClkBits) >> kClkBits);
+ nclk_ = tid_ + 1;
+ last_acquire_ = 0;
+ internal_memset(clk_, 0, sizeof(clk_));
+}
+
+void ThreadClock::ResetCached(ClockCache *c) {
+ if (cached_idx_) {
+ UnrefClockBlock(c, cached_idx_, cached_blocks_);
+ cached_idx_ = 0;
+ cached_size_ = 0;
+ cached_blocks_ = 0;
+ }
+}
+
+void ThreadClock::acquire(ClockCache *c, SyncClock *src) {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(src->size_, kMaxTid);
+ CPP_STAT_INC(StatClockAcquire);
+
+ // Check if it's empty -> no need to do anything.
+ const uptr nclk = src->size_;
+ if (nclk == 0) {
+ CPP_STAT_INC(StatClockAcquireEmpty);
+ return;
+ }
+
+ bool acquired = false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ SyncClock::Dirty dirty = src->dirty_[i];
+ unsigned tid = dirty.tid;
+ if (tid != kInvalidTid) {
+ if (clk_[tid] < dirty.epoch) {
+ clk_[tid] = dirty.epoch;
+ acquired = true;
+ }
+ }
+ }
+
+ // Check if we've already acquired src after the last release operation on src
+ if (tid_ >= nclk || src->elem(tid_).reused != reused_) {
+ // O(N) acquire.
+ CPP_STAT_INC(StatClockAcquireFull);
+ nclk_ = max(nclk_, nclk);
+ u64 *dst_pos = &clk_[0];
+ for (ClockElem &src_elem : *src) {
+ u64 epoch = src_elem.epoch;
+ if (*dst_pos < epoch) {
+ *dst_pos = epoch;
+ acquired = true;
+ }
+ dst_pos++;
+ }
+
+ // Remember that this thread has acquired this clock.
+ if (nclk > tid_)
+ src->elem(tid_).reused = reused_;
+ }
+
+ if (acquired) {
+ CPP_STAT_INC(StatClockAcquiredSomething);
+ last_acquire_ = clk_[tid_];
+ ResetCached(c);
+ }
+}
+
+void ThreadClock::release(ClockCache *c, SyncClock *dst) {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
+
+ if (dst->size_ == 0) {
+ // ReleaseStore will correctly set release_store_tid_,
+ // which can be important for future operations.
+ ReleaseStore(c, dst);
+ return;
+ }
+
+ CPP_STAT_INC(StatClockRelease);
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
+
+ // Check if we had not acquired anything from other threads
+ // since the last release on dst. If so, we need to update
+ // only dst->elem(tid_).
+ if (dst->elem(tid_).epoch > last_acquire_) {
+ UpdateCurrentThread(c, dst);
+ if (dst->release_store_tid_ != tid_ ||
+ dst->release_store_reused_ != reused_)
+ dst->release_store_tid_ = kInvalidTid;
+ return;
+ }
+
+ // O(N) release.
+ CPP_STAT_INC(StatClockReleaseFull);
+ dst->Unshare(c);
+ // First, remember whether we've acquired dst.
+ bool acquired = IsAlreadyAcquired(dst);
+ if (acquired)
+ CPP_STAT_INC(StatClockReleaseAcquired);
+ // Update dst->clk_.
+ dst->FlushDirty();
+ uptr i = 0;
+ for (ClockElem &ce : *dst) {
+ ce.epoch = max(ce.epoch, clk_[i]);
+ ce.reused = 0;
+ i++;
+ }
+ // Clear 'acquired' flag in the remaining elements.
+ if (nclk_ < dst->size_)
+ CPP_STAT_INC(StatClockReleaseClearTail);
+ for (uptr i = nclk_; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
+ dst->release_store_tid_ = kInvalidTid;
+ dst->release_store_reused_ = 0;
+ // If we've acquired dst, remember this fact,
+ // so that we don't need to acquire it on next acquire.
+ if (acquired)
+ dst->elem(tid_).reused = reused_;
+}
+
+void ThreadClock::ReleaseStore(ClockCache *c, SyncClock *dst) {
+ DCHECK_LE(nclk_, kMaxTid);
+ DCHECK_LE(dst->size_, kMaxTid);
+ CPP_STAT_INC(StatClockStore);
+
+ if (dst->size_ == 0 && cached_idx_ != 0) {
+ // Reuse the cached clock.
+ // Note: we could reuse/cache the cached clock in more cases:
+ // we could update the existing clock and cache it, or replace it with the
+ // currently cached clock and release the old one. And for a shared
+ // existing clock, we could replace it with the currently cached;
+ // or unshare, update and cache. But, for simplicity, we currnetly reuse
+ // cached clock only when the target clock is empty.
+ dst->tab_ = ctx->clock_alloc.Map(cached_idx_);
+ dst->tab_idx_ = cached_idx_;
+ dst->size_ = cached_size_;
+ dst->blocks_ = cached_blocks_;
+ CHECK_EQ(dst->dirty_[0].tid, kInvalidTid);
+ // The cached clock is shared (immutable),
+ // so this is where we store the current clock.
+ dst->dirty_[0].tid = tid_;
+ dst->dirty_[0].epoch = clk_[tid_];
+ dst->release_store_tid_ = tid_;
+ dst->release_store_reused_ = reused_;
+ // Rememeber that we don't need to acquire it in future.
+ dst->elem(tid_).reused = reused_;
+ // Grab a reference.
+ atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
+ return;
+ }
+
+ // Check if we need to resize dst.
+ if (dst->size_ < nclk_)
+ dst->Resize(c, nclk_);
+
+ if (dst->release_store_tid_ == tid_ &&
+ dst->release_store_reused_ == reused_ &&
+ dst->elem(tid_).epoch > last_acquire_) {
+ CPP_STAT_INC(StatClockStoreFast);
+ UpdateCurrentThread(c, dst);
+ return;
+ }
+
+ // O(N) release-store.
+ CPP_STAT_INC(StatClockStoreFull);
+ dst->Unshare(c);
+ // Note: dst can be larger than this ThreadClock.
+ // This is fine since clk_ beyond size is all zeros.
+ uptr i = 0;
+ for (ClockElem &ce : *dst) {
+ ce.epoch = clk_[i];
+ ce.reused = 0;
+ i++;
+ }
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dst->dirty_[i].tid = kInvalidTid;
+ dst->release_store_tid_ = tid_;
+ dst->release_store_reused_ = reused_;
+ // Rememeber that we don't need to acquire it in future.
+ dst->elem(tid_).reused = reused_;
+
+ // If the resulting clock is cachable, cache it for future release operations.
+ // The clock is always cachable if we released to an empty sync object.
+ if (cached_idx_ == 0 && dst->Cachable()) {
+ // Grab a reference to the ClockBlock.
+ atomic_uint32_t *ref = ref_ptr(dst->tab_);
+ if (atomic_load(ref, memory_order_acquire) == 1)
+ atomic_store_relaxed(ref, 2);
+ else
+ atomic_fetch_add(ref_ptr(dst->tab_), 1, memory_order_relaxed);
+ cached_idx_ = dst->tab_idx_;
+ cached_size_ = dst->size_;
+ cached_blocks_ = dst->blocks_;
+ }
+}
+
+void ThreadClock::acq_rel(ClockCache *c, SyncClock *dst) {
+ CPP_STAT_INC(StatClockAcquireRelease);
+ acquire(c, dst);
+ ReleaseStore(c, dst);
+}
+
+// Updates only single element related to the current thread in dst->clk_.
+void ThreadClock::UpdateCurrentThread(ClockCache *c, SyncClock *dst) const {
+ // Update the threads time, but preserve 'acquired' flag.
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ SyncClock::Dirty *dirty = &dst->dirty_[i];
+ const unsigned tid = dirty->tid;
+ if (tid == tid_ || tid == kInvalidTid) {
+ CPP_STAT_INC(StatClockReleaseFast);
+ dirty->tid = tid_;
+ dirty->epoch = clk_[tid_];
+ return;
+ }
+ }
+ // Reset all 'acquired' flags, O(N).
+ // We are going to touch dst elements, so we need to unshare it.
+ dst->Unshare(c);
+ CPP_STAT_INC(StatClockReleaseSlow);
+ dst->elem(tid_).epoch = clk_[tid_];
+ for (uptr i = 0; i < dst->size_; i++)
+ dst->elem(i).reused = 0;
+ dst->FlushDirty();
+}
+
+// Checks whether the current thread has already acquired src.
+bool ThreadClock::IsAlreadyAcquired(const SyncClock *src) const {
+ if (src->elem(tid_).reused != reused_)
+ return false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ SyncClock::Dirty dirty = src->dirty_[i];
+ if (dirty.tid != kInvalidTid) {
+ if (clk_[dirty.tid] < dirty.epoch)
+ return false;
+ }
+ }
+ return true;
+}
+
+// Sets a single element in the vector clock.
+// This function is called only from weird places like AcquireGlobal.
+void ThreadClock::set(ClockCache *c, unsigned tid, u64 v) {
+ DCHECK_LT(tid, kMaxTid);
+ DCHECK_GE(v, clk_[tid]);
+ clk_[tid] = v;
+ if (nclk_ <= tid)
+ nclk_ = tid + 1;
+ last_acquire_ = clk_[tid_];
+ ResetCached(c);
+}
+
+void ThreadClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < nclk_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", clk_[i]);
+ printf("] tid=%u/%u last_acq=%llu", tid_, reused_, last_acquire_);
+}
+
+SyncClock::SyncClock() {
+ ResetImpl();
+}
+
+SyncClock::~SyncClock() {
+ // Reset must be called before dtor.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(blocks_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+}
+
+void SyncClock::Reset(ClockCache *c) {
+ if (size_)
+ UnrefClockBlock(c, tab_idx_, blocks_);
+ ResetImpl();
+}
+
+void SyncClock::ResetImpl() {
+ tab_ = 0;
+ tab_idx_ = 0;
+ size_ = 0;
+ blocks_ = 0;
+ release_store_tid_ = kInvalidTid;
+ release_store_reused_ = 0;
+ for (uptr i = 0; i < kDirtyTids; i++)
+ dirty_[i].tid = kInvalidTid;
+}
+
+void SyncClock::Resize(ClockCache *c, uptr nclk) {
+ CPP_STAT_INC(StatClockReleaseResize);
+ Unshare(c);
+ if (nclk <= capacity()) {
+ // Memory is already allocated, just increase the size.
+ size_ = nclk;
+ return;
+ }
+ if (size_ == 0) {
+ // Grow from 0 to one-level table.
+ CHECK_EQ(size_, 0);
+ CHECK_EQ(blocks_, 0);
+ CHECK_EQ(tab_, 0);
+ CHECK_EQ(tab_idx_, 0);
+ tab_idx_ = ctx->clock_alloc.Alloc(c);
+ tab_ = ctx->clock_alloc.Map(tab_idx_);
+ internal_memset(tab_, 0, sizeof(*tab_));
+ atomic_store_relaxed(ref_ptr(tab_), 1);
+ size_ = 1;
+ } else if (size_ > blocks_ * ClockBlock::kClockCount) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *new_cb = ctx->clock_alloc.Map(idx);
+ uptr top = size_ - blocks_ * ClockBlock::kClockCount;
+ CHECK_LT(top, ClockBlock::kClockCount);
+ const uptr move = top * sizeof(tab_->clock[0]);
+ internal_memcpy(&new_cb->clock[0], tab_->clock, move);
+ internal_memset(&new_cb->clock[top], 0, sizeof(*new_cb) - move);
+ internal_memset(tab_->clock, 0, move);
+ append_block(idx);
+ }
+ // At this point we have first level table allocated and all clock elements
+ // are evacuated from it to a second level block.
+ // Add second level tables as necessary.
+ while (nclk > capacity()) {
+ u32 idx = ctx->clock_alloc.Alloc(c);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ internal_memset(cb, 0, sizeof(*cb));
+ append_block(idx);
+ }
+ size_ = nclk;
+}
+
+// Flushes all dirty elements into the main clock array.
+void SyncClock::FlushDirty() {
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ Dirty *dirty = &dirty_[i];
+ if (dirty->tid != kInvalidTid) {
+ CHECK_LT(dirty->tid, size_);
+ elem(dirty->tid).epoch = dirty->epoch;
+ dirty->tid = kInvalidTid;
+ }
+ }
+}
+
+bool SyncClock::IsShared() const {
+ if (size_ == 0)
+ return false;
+ atomic_uint32_t *ref = ref_ptr(tab_);
+ u32 v = atomic_load(ref, memory_order_acquire);
+ CHECK_GT(v, 0);
+ return v > 1;
+}
+
+// Unshares the current clock if it's shared.
+// Shared clocks are immutable, so they need to be unshared before any updates.
+// Note: this does not apply to dirty entries as they are not shared.
+void SyncClock::Unshare(ClockCache *c) {
+ if (!IsShared())
+ return;
+ // First, copy current state into old.
+ SyncClock old;
+ old.tab_ = tab_;
+ old.tab_idx_ = tab_idx_;
+ old.size_ = size_;
+ old.blocks_ = blocks_;
+ old.release_store_tid_ = release_store_tid_;
+ old.release_store_reused_ = release_store_reused_;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ old.dirty_[i] = dirty_[i];
+ // Then, clear current object.
+ ResetImpl();
+ // Allocate brand new clock in the current object.
+ Resize(c, old.size_);
+ // Now copy state back into this object.
+ Iter old_iter(&old);
+ for (ClockElem &ce : *this) {
+ ce = *old_iter;
+ ++old_iter;
+ }
+ release_store_tid_ = old.release_store_tid_;
+ release_store_reused_ = old.release_store_reused_;
+ for (unsigned i = 0; i < kDirtyTids; i++)
+ dirty_[i] = old.dirty_[i];
+ // Drop reference to old and delete if necessary.
+ old.Reset(c);
+}
+
+// Can we cache this clock for future release operations?
+ALWAYS_INLINE bool SyncClock::Cachable() const {
+ if (size_ == 0)
+ return false;
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ if (dirty_[i].tid != kInvalidTid)
+ return false;
+ }
+ return atomic_load_relaxed(ref_ptr(tab_)) == 1;
+}
+
+// elem linearizes the two-level structure into linear array.
+// Note: this is used only for one time accesses, vector operations use
+// the iterator as it is much faster.
+ALWAYS_INLINE ClockElem &SyncClock::elem(unsigned tid) const {
+ DCHECK_LT(tid, size_);
+ const uptr block = tid / ClockBlock::kClockCount;
+ DCHECK_LE(block, blocks_);
+ tid %= ClockBlock::kClockCount;
+ if (block == blocks_)
+ return tab_->clock[tid];
+ u32 idx = get_block(block);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ return cb->clock[tid];
+}
+
+ALWAYS_INLINE uptr SyncClock::capacity() const {
+ if (size_ == 0)
+ return 0;
+ uptr ratio = sizeof(ClockBlock::clock[0]) / sizeof(ClockBlock::table[0]);
+ // How many clock elements we can fit into the first level block.
+ // +1 for ref counter.
+ uptr top = ClockBlock::kClockCount - RoundUpTo(blocks_ + 1, ratio) / ratio;
+ return blocks_ * ClockBlock::kClockCount + top;
+}
+
+ALWAYS_INLINE u32 SyncClock::get_block(uptr bi) const {
+ DCHECK(size_);
+ DCHECK_LT(bi, blocks_);
+ return tab_->table[ClockBlock::kBlockIdx - bi];
+}
+
+ALWAYS_INLINE void SyncClock::append_block(u32 idx) {
+ uptr bi = blocks_++;
+ CHECK_EQ(get_block(bi), 0);
+ tab_->table[ClockBlock::kBlockIdx - bi] = idx;
+}
+
+// Used only by tests.
+u64 SyncClock::get(unsigned tid) const {
+ for (unsigned i = 0; i < kDirtyTids; i++) {
+ Dirty dirty = dirty_[i];
+ if (dirty.tid == tid)
+ return dirty.epoch;
+ }
+ return elem(tid).epoch;
+}
+
+// Used only by Iter test.
+u64 SyncClock::get_clean(unsigned tid) const {
+ return elem(tid).epoch;
+}
+
+void SyncClock::DebugDump(int(*printf)(const char *s, ...)) {
+ printf("clock=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).epoch);
+ printf("] reused=[");
+ for (uptr i = 0; i < size_; i++)
+ printf("%s%llu", i == 0 ? "" : ",", elem(i).reused);
+ printf("] release_store_tid=%d/%d dirty_tids=%d[%llu]/%d[%llu]",
+ release_store_tid_, release_store_reused_,
+ dirty_[0].tid, dirty_[0].epoch,
+ dirty_[1].tid, dirty_[1].epoch);
+}
+
+void SyncClock::Iter::Next() {
+ // Finished with the current block, move on to the next one.
+ block_++;
+ if (block_ < parent_->blocks_) {
+ // Iterate over the next second level block.
+ u32 idx = parent_->get_block(block_);
+ ClockBlock *cb = ctx->clock_alloc.Map(idx);
+ pos_ = &cb->clock[0];
+ end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
+ ClockBlock::kClockCount);
+ return;
+ }
+ if (block_ == parent_->blocks_ &&
+ parent_->size_ > parent_->blocks_ * ClockBlock::kClockCount) {
+ // Iterate over elements in the first level block.
+ pos_ = &parent_->tab_->clock[0];
+ end_ = pos_ + min(parent_->size_ - block_ * ClockBlock::kClockCount,
+ ClockBlock::kClockCount);
+ return;
+ }
+ parent_ = nullptr; // denotes end
+}
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_clock.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_clock.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_clock.h (revision 351984)
@@ -0,0 +1,225 @@
+//===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_CLOCK_H
+#define TSAN_CLOCK_H
+
+#include "tsan_defs.h"
+#include "tsan_dense_alloc.h"
+
+namespace __tsan {
+
+typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
+typedef DenseSlabAllocCache ClockCache;
+
+// The clock that lives in sync variables (mutexes, atomics, etc).
+class SyncClock {
+ public:
+ SyncClock();
+ ~SyncClock();
+
+ uptr size() const;
+
+ // These are used only in tests.
+ u64 get(unsigned tid) const;
+ u64 get_clean(unsigned tid) const;
+
+ void Resize(ClockCache *c, uptr nclk);
+ void Reset(ClockCache *c);
+
+ void DebugDump(int(*printf)(const char *s, ...));
+
+ // Clock element iterator.
+ // Note: it iterates only over the table without regard to dirty entries.
+ class Iter {
+ public:
+ explicit Iter(SyncClock* parent);
+ Iter& operator++();
+ bool operator!=(const Iter& other);
+ ClockElem &operator*();
+
+ private:
+ SyncClock *parent_;
+ // [pos_, end_) is the current continuous range of clock elements.
+ ClockElem *pos_;
+ ClockElem *end_;
+ int block_; // Current number of second level block.
+
+ NOINLINE void Next();
+ };
+
+ Iter begin();
+ Iter end();
+
+ private:
+ friend class ThreadClock;
+ friend class Iter;
+ static const uptr kDirtyTids = 2;
+
+ struct Dirty {
+ u64 epoch : kClkBits;
+ u64 tid : 64 - kClkBits; // kInvalidId if not active
+ };
+
+ unsigned release_store_tid_;
+ unsigned release_store_reused_;
+ Dirty dirty_[kDirtyTids];
+ // If size_ is 0, tab_ is nullptr.
+ // If size <= 64 (kClockCount), tab_ contains pointer to an array with
+ // 64 ClockElem's (ClockBlock::clock).
+ // Otherwise, tab_ points to an array with up to 127 u32 elements,
+ // each pointing to the second-level 512b block with 64 ClockElem's.
+ // Unused space in the first level ClockBlock is used to store additional
+ // clock elements.
+ // The last u32 element in the first level ClockBlock is always used as
+ // reference counter.
+ //
+ // See the following scheme for details.
+ // All memory blocks are 512 bytes (allocated from ClockAlloc).
+ // Clock (clk) elements are 64 bits.
+ // Idx and ref are 32 bits.
+ //
+ // tab_
+ // |
+ // \/
+ // +----------------------------------------------------+
+ // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
+ // +----------------------------------------------------+
+ // | |
+ // | \/
+ // | +----------------+
+ // | | clk0 ... clk63 |
+ // | +----------------+
+ // \/
+ // +------------------+
+ // | clk64 ... clk127 |
+ // +------------------+
+ //
+ // Note: dirty entries, if active, always override what's stored in the clock.
+ ClockBlock *tab_;
+ u32 tab_idx_;
+ u16 size_;
+ u16 blocks_; // Number of second level blocks.
+
+ void Unshare(ClockCache *c);
+ bool IsShared() const;
+ bool Cachable() const;
+ void ResetImpl();
+ void FlushDirty();
+ uptr capacity() const;
+ u32 get_block(uptr bi) const;
+ void append_block(u32 idx);
+ ClockElem &elem(unsigned tid) const;
+};
+
+// The clock that lives in threads.
+class ThreadClock {
+ public:
+ typedef DenseSlabAllocCache Cache;
+
+ explicit ThreadClock(unsigned tid, unsigned reused = 0);
+
+ u64 get(unsigned tid) const;
+ void set(ClockCache *c, unsigned tid, u64 v);
+ void set(u64 v);
+ void tick();
+ uptr size() const;
+
+ void acquire(ClockCache *c, SyncClock *src);
+ void release(ClockCache *c, SyncClock *dst);
+ void acq_rel(ClockCache *c, SyncClock *dst);
+ void ReleaseStore(ClockCache *c, SyncClock *dst);
+ void ResetCached(ClockCache *c);
+
+ void DebugReset();
+ void DebugDump(int(*printf)(const char *s, ...));
+
+ private:
+ static const uptr kDirtyTids = SyncClock::kDirtyTids;
+ // Index of the thread associated with he clock ("current thread").
+ const unsigned tid_;
+ const unsigned reused_; // tid_ reuse count.
+ // Current thread time when it acquired something from other threads.
+ u64 last_acquire_;
+
+ // Cached SyncClock (without dirty entries and release_store_tid_).
+ // We reuse it for subsequent store-release operations without intervening
+ // acquire operations. Since it is shared (and thus constant), clock value
+ // for the current thread is then stored in dirty entries in the SyncClock.
+ // We host a refernece to the table while it is cached here.
+ u32 cached_idx_;
+ u16 cached_size_;
+ u16 cached_blocks_;
+
+ // Number of active elements in the clk_ table (the rest is zeros).
+ uptr nclk_;
+ u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
+
+ bool IsAlreadyAcquired(const SyncClock *src) const;
+ void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
+};
+
+ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
+ DCHECK_LT(tid, kMaxTidInClock);
+ return clk_[tid];
+}
+
+ALWAYS_INLINE void ThreadClock::set(u64 v) {
+ DCHECK_GE(v, clk_[tid_]);
+ clk_[tid_] = v;
+}
+
+ALWAYS_INLINE void ThreadClock::tick() {
+ clk_[tid_]++;
+}
+
+ALWAYS_INLINE uptr ThreadClock::size() const {
+ return nclk_;
+}
+
+ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
+ return Iter(this);
+}
+
+ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
+ return Iter(nullptr);
+}
+
+ALWAYS_INLINE uptr SyncClock::size() const {
+ return size_;
+}
+
+ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
+ : parent_(parent)
+ , pos_(nullptr)
+ , end_(nullptr)
+ , block_(-1) {
+ if (parent)
+ Next();
+}
+
+ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
+ pos_++;
+ if (UNLIKELY(pos_ >= end_))
+ Next();
+ return *this;
+}
+
+ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
+ return parent_ != other.parent_;
+}
+
+ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
+ return *pos_;
+}
+} // namespace __tsan
+
+#endif // TSAN_CLOCK_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_debugging.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_debugging.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_debugging.cc (revision 351984)
@@ -0,0 +1,262 @@
+//===-- tsan_debugging.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// TSan debugging API implementation.
+//===----------------------------------------------------------------------===//
+#include "tsan_interface.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+using namespace __tsan;
+
+static const char *ReportTypeDescription(ReportType typ) {
+ switch (typ) {
+ case ReportTypeRace: return "data-race";
+ case ReportTypeVptrRace: return "data-race-vptr";
+ case ReportTypeUseAfterFree: return "heap-use-after-free";
+ case ReportTypeVptrUseAfterFree: return "heap-use-after-free-vptr";
+ case ReportTypeExternalRace: return "external-race";
+ case ReportTypeThreadLeak: return "thread-leak";
+ case ReportTypeMutexDestroyLocked: return "locked-mutex-destroy";
+ case ReportTypeMutexDoubleLock: return "mutex-double-lock";
+ case ReportTypeMutexInvalidAccess: return "mutex-invalid-access";
+ case ReportTypeMutexBadUnlock: return "mutex-bad-unlock";
+ case ReportTypeMutexBadReadLock: return "mutex-bad-read-lock";
+ case ReportTypeMutexBadReadUnlock: return "mutex-bad-read-unlock";
+ case ReportTypeSignalUnsafe: return "signal-unsafe-call";
+ case ReportTypeErrnoInSignal: return "errno-in-signal-handler";
+ case ReportTypeDeadlock: return "lock-order-inversion";
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+static const char *ReportLocationTypeDescription(ReportLocationType typ) {
+ switch (typ) {
+ case ReportLocationGlobal: return "global";
+ case ReportLocationHeap: return "heap";
+ case ReportLocationStack: return "stack";
+ case ReportLocationTLS: return "tls";
+ case ReportLocationFD: return "fd";
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+static void CopyTrace(SymbolizedStack *first_frame, void **trace,
+ uptr trace_size) {
+ uptr i = 0;
+ for (SymbolizedStack *frame = first_frame; frame != nullptr;
+ frame = frame->next) {
+ trace[i++] = (void *)frame->info.address;
+ if (i >= trace_size) break;
+ }
+}
+
+// Meant to be called by the debugger.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report() {
+ return const_cast<ReportDesc*>(cur_thread()->current_report);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ *description = ReportTypeDescription(rep->typ);
+ *count = rep->count;
+ *stack_count = rep->stacks.Size();
+ *mop_count = rep->mops.Size();
+ *loc_count = rep->locs.Size();
+ *mutex_count = rep->mutexes.Size();
+ *thread_count = rep->threads.Size();
+ *unique_tid_count = rep->unique_tids.Size();
+ if (rep->sleep) CopyTrace(rep->sleep->frames, sleep_trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_tag(void *report, uptr *tag) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ *tag = rep->tag;
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->stacks.Size());
+ ReportStack *stack = rep->stacks[idx];
+ if (stack) CopyTrace(stack->frames, trace, trace_size);
+ return stack ? 1 : 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mops.Size());
+ ReportMop *mop = rep->mops[idx];
+ *tid = mop->tid;
+ *addr = (void *)mop->addr;
+ *size = mop->size;
+ *write = mop->write ? 1 : 0;
+ *atomic = mop->atomic ? 1 : 0;
+ if (mop->stack) CopyTrace(mop->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *type = ReportLocationTypeDescription(loc->type);
+ *addr = (void *)loc->global.start;
+ *start = loc->heap_chunk_start;
+ *size = loc->heap_chunk_size;
+ *tid = loc->tid;
+ *fd = loc->fd;
+ *suppressable = loc->suppressable;
+ if (loc->stack) CopyTrace(loc->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->locs.Size());
+ ReportLocation *loc = rep->locs[idx];
+ *object_type = GetObjectTypeFromTag(loc->external_tag);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->mutexes.Size());
+ ReportMutex *mutex = rep->mutexes[idx];
+ *mutex_id = mutex->id;
+ *addr = (void *)mutex->addr;
+ *destroyed = mutex->destroyed;
+ if (mutex->stack) CopyTrace(mutex->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->threads.Size());
+ ReportThread *thread = rep->threads[idx];
+ *tid = thread->id;
+ *os_id = thread->os_id;
+ *running = thread->running;
+ *name = thread->name;
+ *parent_tid = thread->parent_tid;
+ if (thread->stack) CopyTrace(thread->stack->frames, trace, trace_size);
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid) {
+ const ReportDesc *rep = (ReportDesc *)report;
+ CHECK_LT(idx, rep->unique_tids.Size());
+ *tid = rep->unique_tids[idx];
+ return 1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address_ptr,
+ uptr *region_size_ptr) {
+ uptr region_address = 0;
+ uptr region_size = 0;
+ const char *region_kind = nullptr;
+ if (name && name_size > 0) name[0] = 0;
+
+ if (IsMetaMem(addr)) {
+ region_kind = "meta shadow";
+ } else if (IsShadowMem(addr)) {
+ region_kind = "shadow";
+ } else {
+ bool is_stack = false;
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+
+ if (b != 0) {
+ region_address = (uptr)allocator()->GetBlockBegin((void *)addr);
+ region_size = b->siz;
+ region_kind = "heap";
+ } else {
+ // TODO(kuba.brecka): We should not lock. This is supposed to be called
+ // from within the debugger when other threads are stopped.
+ ctx->thread_registry->Lock();
+ ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack);
+ ctx->thread_registry->Unlock();
+ if (tctx) {
+ region_kind = is_stack ? "stack" : "tls";
+ } else {
+ region_kind = "global";
+ DataInfo info;
+ if (Symbolizer::GetOrInit()->SymbolizeData(addr, &info)) {
+ internal_strncpy(name, info.name, name_size);
+ region_address = info.start;
+ region_size = info.size;
+ }
+ }
+ }
+ }
+
+ CHECK(region_kind);
+ if (region_address_ptr) *region_address_ptr = region_address;
+ if (region_size_ptr) *region_size_ptr = region_size;
+ return region_kind;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ tid_t *os_id) {
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b == 0) return 0;
+
+ *thread_id = b->tid;
+ // No locking. This is supposed to be called from within the debugger when
+ // other threads are stopped.
+ ThreadContextBase *tctx = ctx->thread_registry->GetThreadLocked(b->tid);
+ *os_id = tctx->os_id;
+
+ StackTrace stack = StackDepotGet(b->stk);
+ size = Min(size, (uptr)stack.size);
+ for (uptr i = 0; i < size; i++) trace[i] = stack.trace[stack.size - i - 1];
+ return size;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_debugging.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_defs.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_defs.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_defs.h (revision 351984)
@@ -0,0 +1,195 @@
+//===-- tsan_defs.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_DEFS_H
+#define TSAN_DEFS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_stat.h"
+#include "ubsan/ubsan_platform.h"
+
+// Setup defaults for compile definitions.
+#ifndef TSAN_NO_HISTORY
+# define TSAN_NO_HISTORY 0
+#endif
+
+#ifndef TSAN_COLLECT_STATS
+# define TSAN_COLLECT_STATS 0
+#endif
+
+#ifndef TSAN_CONTAINS_UBSAN
+# if CAN_SANITIZE_UB && !SANITIZER_GO
+# define TSAN_CONTAINS_UBSAN 1
+# else
+# define TSAN_CONTAINS_UBSAN 0
+# endif
+#endif
+
+namespace __tsan {
+
+const int kClkBits = 42;
+const unsigned kMaxTidReuse = (1 << (64 - kClkBits)) - 1;
+
+struct ClockElem {
+ u64 epoch : kClkBits;
+ u64 reused : 64 - kClkBits; // tid reuse count
+};
+
+struct ClockBlock {
+ static const uptr kSize = 512;
+ static const uptr kTableSize = kSize / sizeof(u32);
+ static const uptr kClockCount = kSize / sizeof(ClockElem);
+ static const uptr kRefIdx = kTableSize - 1;
+ static const uptr kBlockIdx = kTableSize - 2;
+
+ union {
+ u32 table[kTableSize];
+ ClockElem clock[kClockCount];
+ };
+
+ ClockBlock() {
+ }
+};
+
+const int kTidBits = 13;
+// Reduce kMaxTid by kClockCount because one slot in ClockBlock table is
+// occupied by reference counter, so total number of elements we can store
+// in SyncClock is kClockCount * (kTableSize - 1).
+const unsigned kMaxTid = (1 << kTidBits) - ClockBlock::kClockCount;
+#if !SANITIZER_GO
+const unsigned kMaxTidInClock = kMaxTid * 2; // This includes msb 'freed' bit.
+#else
+const unsigned kMaxTidInClock = kMaxTid; // Go does not track freed memory.
+#endif
+const uptr kShadowStackSize = 64 * 1024;
+
+// Count of shadow values in a shadow cell.
+const uptr kShadowCnt = 4;
+
+// That many user bytes are mapped onto a single shadow cell.
+const uptr kShadowCell = 8;
+
+// Size of a single shadow value (u64).
+const uptr kShadowSize = 8;
+
+// Shadow memory is kShadowMultiplier times larger than user memory.
+const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell;
+
+// That many user bytes are mapped onto a single meta shadow cell.
+// Must be less or equal to minimal memory allocator alignment.
+const uptr kMetaShadowCell = 8;
+
+// Size of a single meta shadow value (u32).
+const uptr kMetaShadowSize = 4;
+
+#if TSAN_NO_HISTORY
+const bool kCollectHistory = false;
+#else
+const bool kCollectHistory = true;
+#endif
+
+const u16 kInvalidTid = kMaxTid + 1;
+
+// The following "build consistency" machinery ensures that all source files
+// are built in the same configuration. Inconsistent builds lead to
+// hard to debug crashes.
+#if SANITIZER_DEBUG
+void build_consistency_debug();
+#else
+void build_consistency_release();
+#endif
+
+#if TSAN_COLLECT_STATS
+void build_consistency_stats();
+#else
+void build_consistency_nostats();
+#endif
+
+static inline void USED build_consistency() {
+#if SANITIZER_DEBUG
+ build_consistency_debug();
+#else
+ build_consistency_release();
+#endif
+#if TSAN_COLLECT_STATS
+ build_consistency_stats();
+#else
+ build_consistency_nostats();
+#endif
+}
+
+template<typename T>
+T min(T a, T b) {
+ return a < b ? a : b;
+}
+
+template<typename T>
+T max(T a, T b) {
+ return a > b ? a : b;
+}
+
+template<typename T>
+T RoundUp(T p, u64 align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)(((u64)p + align - 1) & ~(align - 1));
+}
+
+template<typename T>
+T RoundDown(T p, u64 align) {
+ DCHECK_EQ(align & (align - 1), 0);
+ return (T)((u64)p & ~(align - 1));
+}
+
+// Zeroizes high part, returns 'bits' lsb bits.
+template<typename T>
+T GetLsb(T v, int bits) {
+ return (T)((u64)v & ((1ull << bits) - 1));
+}
+
+struct MD5Hash {
+ u64 hash[2];
+ bool operator==(const MD5Hash &other) const;
+};
+
+MD5Hash md5_hash(const void *data, uptr size);
+
+struct Processor;
+struct ThreadState;
+class ThreadContext;
+struct Context;
+struct ReportStack;
+class ReportDesc;
+class RegionAlloc;
+
+// Descriptor of user's memory block.
+struct MBlock {
+ u64 siz : 48;
+ u64 tag : 16;
+ u32 stk;
+ u16 tid;
+};
+
+COMPILER_CHECK(sizeof(MBlock) == 16);
+
+enum ExternalTag : uptr {
+ kExternalTagNone = 0,
+ kExternalTagSwiftModifyingAccess = 1,
+ kExternalTagFirstUserAvailable = 2,
+ kExternalTagMax = 1024,
+ // Don't set kExternalTagMax over 65,536, since MBlock only stores tags
+ // as 16-bit values, see tsan_defs.h.
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DEFS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_dense_alloc.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_dense_alloc.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_dense_alloc.h (revision 351984)
@@ -0,0 +1,141 @@
+//===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
+// DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
+// The only difference with traditional slab allocators is that DenseSlabAlloc
+// allocates/free indices of objects and provide a functionality to map
+// the index onto the real pointer. The index is u32, that is, 2 times smaller
+// than uptr (hense the Dense prefix).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DENSE_ALLOC_H
+#define TSAN_DENSE_ALLOC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+
+namespace __tsan {
+
+class DenseSlabAllocCache {
+ static const uptr kSize = 128;
+ typedef u32 IndexT;
+ uptr pos;
+ IndexT cache[kSize];
+ template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
+};
+
+template<typename T, uptr kL1Size, uptr kL2Size>
+class DenseSlabAlloc {
+ public:
+ typedef DenseSlabAllocCache Cache;
+ typedef typename Cache::IndexT IndexT;
+
+ explicit DenseSlabAlloc(const char *name) {
+ // Check that kL1Size and kL2Size are sane.
+ CHECK_EQ(kL1Size & (kL1Size - 1), 0);
+ CHECK_EQ(kL2Size & (kL2Size - 1), 0);
+ CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
+ // Check that it makes sense to use the dense alloc.
+ CHECK_GE(sizeof(T), sizeof(IndexT));
+ internal_memset(map_, 0, sizeof(map_));
+ freelist_ = 0;
+ fillpos_ = 0;
+ name_ = name;
+ }
+
+ ~DenseSlabAlloc() {
+ for (uptr i = 0; i < kL1Size; i++) {
+ if (map_[i] != 0)
+ UnmapOrDie(map_[i], kL2Size * sizeof(T));
+ }
+ }
+
+ IndexT Alloc(Cache *c) {
+ if (c->pos == 0)
+ Refill(c);
+ return c->cache[--c->pos];
+ }
+
+ void Free(Cache *c, IndexT idx) {
+ DCHECK_NE(idx, 0);
+ if (c->pos == Cache::kSize)
+ Drain(c);
+ c->cache[c->pos++] = idx;
+ }
+
+ T *Map(IndexT idx) {
+ DCHECK_NE(idx, 0);
+ DCHECK_LE(idx, kL1Size * kL2Size);
+ return &map_[idx / kL2Size][idx % kL2Size];
+ }
+
+ void FlushCache(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ while (c->pos) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+
+ void InitCache(Cache *c) {
+ c->pos = 0;
+ internal_memset(c->cache, 0, sizeof(c->cache));
+ }
+
+ private:
+ T *map_[kL1Size];
+ SpinMutex mtx_;
+ IndexT freelist_;
+ uptr fillpos_;
+ const char *name_;
+
+ void Refill(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ if (freelist_ == 0) {
+ if (fillpos_ == kL1Size) {
+ Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
+ name_, kL1Size, kL2Size);
+ Die();
+ }
+ VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
+ name_, fillpos_, kL1Size, kL2Size);
+ T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
+ // Reserve 0 as invalid index.
+ IndexT start = fillpos_ == 0 ? 1 : 0;
+ for (IndexT i = start; i < kL2Size; i++) {
+ new(batch + i) T;
+ *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
+ }
+ *(IndexT*)(batch + kL2Size - 1) = 0;
+ freelist_ = fillpos_ * kL2Size + start;
+ map_[fillpos_++] = batch;
+ }
+ for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
+ IndexT idx = freelist_;
+ c->cache[c->pos++] = idx;
+ freelist_ = *(IndexT*)Map(idx);
+ }
+ }
+
+ void Drain(Cache *c) {
+ SpinMutexLock lock(&mtx_);
+ for (uptr i = 0; i < Cache::kSize / 2; i++) {
+ IndexT idx = c->cache[--c->pos];
+ *(IndexT*)Map(idx) = freelist_;
+ freelist_ = idx;
+ }
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_DENSE_ALLOC_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_dense_alloc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_dispatch_defs.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_dispatch_defs.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_dispatch_defs.h (revision 351984)
@@ -0,0 +1,66 @@
+//===-- tsan_dispatch_defs.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_DISPATCH_DEFS_H
+#define TSAN_DISPATCH_DEFS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+typedef struct dispatch_object_s {} *dispatch_object_t;
+
+#define DISPATCH_DECL(name) \
+ typedef struct name##_s : public dispatch_object_s {} *name##_t
+
+DISPATCH_DECL(dispatch_queue);
+DISPATCH_DECL(dispatch_source);
+DISPATCH_DECL(dispatch_group);
+DISPATCH_DECL(dispatch_data);
+DISPATCH_DECL(dispatch_semaphore);
+DISPATCH_DECL(dispatch_io);
+
+typedef void (*dispatch_function_t)(void *arg);
+typedef void (^dispatch_block_t)(void);
+typedef void (^dispatch_io_handler_t)(bool done, dispatch_data_t data,
+ int error);
+
+typedef long dispatch_once_t; // NOLINT
+typedef __sanitizer::u64 dispatch_time_t;
+typedef int dispatch_fd_t; // NOLINT
+typedef unsigned long dispatch_io_type_t; // NOLINT
+typedef unsigned long dispatch_io_close_flags_t; // NOLINT
+
+extern "C" {
+void *dispatch_get_context(dispatch_object_t object);
+void dispatch_retain(dispatch_object_t object);
+void dispatch_release(dispatch_object_t object);
+
+extern const dispatch_block_t _dispatch_data_destructor_free;
+extern const dispatch_block_t _dispatch_data_destructor_munmap;
+} // extern "C"
+
+#define DISPATCH_DATA_DESTRUCTOR_DEFAULT nullptr
+#define DISPATCH_DATA_DESTRUCTOR_FREE _dispatch_data_destructor_free
+#define DISPATCH_DATA_DESTRUCTOR_MUNMAP _dispatch_data_destructor_munmap
+
+#if __has_attribute(noescape)
+ #define DISPATCH_NOESCAPE __attribute__((__noescape__))
+#else
+ #define DISPATCH_NOESCAPE
+#endif
+
+// Data types used in dispatch APIs
+typedef unsigned long size_t; // NOLINT
+typedef unsigned long uintptr_t; // NOLINT
+typedef __sanitizer::s64 off_t;
+typedef __sanitizer::u16 mode_t;
+typedef long long_t; // NOLINT
+
+#endif // TSAN_DISPATCH_DEFS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_external.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_external.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_external.cc (revision 351984)
@@ -0,0 +1,124 @@
+//===-- tsan_external.cc --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_rtl.h"
+#include "tsan_interceptors.h"
+
+namespace __tsan {
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+struct TagData {
+ const char *object_type;
+ const char *header;
+};
+
+static TagData registered_tags[kExternalTagMax] = {
+ {},
+ {"Swift variable", "Swift access race"},
+};
+static atomic_uint32_t used_tags{kExternalTagFirstUserAvailable}; // NOLINT.
+static TagData *GetTagData(uptr tag) {
+ // Invalid/corrupted tag? Better return NULL and let the caller deal with it.
+ if (tag >= atomic_load(&used_tags, memory_order_relaxed)) return nullptr;
+ return &registered_tags[tag];
+}
+
+const char *GetObjectTypeFromTag(uptr tag) {
+ TagData *tag_data = GetTagData(tag);
+ return tag_data ? tag_data->object_type : nullptr;
+}
+
+const char *GetReportHeaderFromTag(uptr tag) {
+ TagData *tag_data = GetTagData(tag);
+ return tag_data ? tag_data->header : nullptr;
+}
+
+void InsertShadowStackFrameForTag(ThreadState *thr, uptr tag) {
+ FuncEntry(thr, (uptr)&registered_tags[tag]);
+}
+
+uptr TagFromShadowStackFrame(uptr pc) {
+ uptr tag_count = atomic_load(&used_tags, memory_order_relaxed);
+ void *pc_ptr = (void *)pc;
+ if (pc_ptr < GetTagData(0) || pc_ptr > GetTagData(tag_count - 1))
+ return 0;
+ return (TagData *)pc_ptr - GetTagData(0);
+}
+
+#if !SANITIZER_GO
+
+typedef void(*AccessFunc)(ThreadState *, uptr, uptr, int);
+void ExternalAccess(void *addr, void *caller_pc, void *tag, AccessFunc access) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ ThreadState *thr = cur_thread();
+ if (caller_pc) FuncEntry(thr, (uptr)caller_pc);
+ InsertShadowStackFrameForTag(thr, (uptr)tag);
+ bool in_ignored_lib;
+ if (!caller_pc || !libignore()->IsIgnored((uptr)caller_pc, &in_ignored_lib)) {
+ access(thr, CALLERPC, (uptr)addr, kSizeLog1);
+ }
+ FuncExit(thr);
+ if (caller_pc) FuncExit(thr);
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type) {
+ uptr new_tag = atomic_fetch_add(&used_tags, 1, memory_order_relaxed);
+ CHECK_LT(new_tag, kExternalTagMax);
+ GetTagData(new_tag)->object_type = internal_strdup(object_type);
+ char header[127] = {0};
+ internal_snprintf(header, sizeof(header), "race on %s", object_type);
+ GetTagData(new_tag)->header = internal_strdup(header);
+ return (void *)new_tag;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_register_header(void *tag, const char *header) {
+ CHECK_GE((uptr)tag, kExternalTagFirstUserAvailable);
+ CHECK_LT((uptr)tag, kExternalTagMax);
+ atomic_uintptr_t *header_ptr =
+ (atomic_uintptr_t *)&GetTagData((uptr)tag)->header;
+ header = internal_strdup(header);
+ char *old_header =
+ (char *)atomic_exchange(header_ptr, (uptr)header, memory_order_seq_cst);
+ if (old_header) internal_free(old_header);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag) {
+ CHECK_LT(tag, atomic_load(&used_tags, memory_order_relaxed));
+ Allocator *a = allocator();
+ MBlock *b = nullptr;
+ if (a->PointerIsMine((void *)addr)) {
+ void *block_begin = a->GetBlockBegin((void *)addr);
+ if (block_begin) b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b) {
+ b->tag = (uptr)tag;
+ }
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag) {
+ ExternalAccess(addr, caller_pc, tag, MemoryRead);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag) {
+ ExternalAccess(addr, caller_pc, tag, MemoryWrite);
+}
+} // extern "C"
+
+#endif // !SANITIZER_GO
+
+} // namespace __tsan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_external.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_fd.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_fd.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_fd.cc (revision 351984)
@@ -0,0 +1,315 @@
+//===-- tsan_fd.cc --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_fd.h"
+#include "tsan_rtl.h"
+#include <sanitizer_common/sanitizer_atomic.h>
+
+namespace __tsan {
+
+const int kTableSizeL1 = 1024;
+const int kTableSizeL2 = 1024;
+const int kTableSize = kTableSizeL1 * kTableSizeL2;
+
+struct FdSync {
+ atomic_uint64_t rc;
+};
+
+struct FdDesc {
+ FdSync *sync;
+ int creation_tid;
+ u32 creation_stack;
+};
+
+struct FdContext {
+ atomic_uintptr_t tab[kTableSizeL1];
+ // Addresses used for synchronization.
+ FdSync globsync;
+ FdSync filesync;
+ FdSync socksync;
+ u64 connectsync;
+};
+
+static FdContext fdctx;
+
+static bool bogusfd(int fd) {
+ // Apparently a bogus fd value.
+ return fd < 0 || fd >= kTableSize;
+}
+
+static FdSync *allocsync(ThreadState *thr, uptr pc) {
+ FdSync *s = (FdSync*)user_alloc_internal(thr, pc, sizeof(FdSync),
+ kDefaultAlignment, false);
+ atomic_store(&s->rc, 1, memory_order_relaxed);
+ return s;
+}
+
+static FdSync *ref(FdSync *s) {
+ if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1)
+ atomic_fetch_add(&s->rc, 1, memory_order_relaxed);
+ return s;
+}
+
+static void unref(ThreadState *thr, uptr pc, FdSync *s) {
+ if (s && atomic_load(&s->rc, memory_order_relaxed) != (u64)-1) {
+ if (atomic_fetch_sub(&s->rc, 1, memory_order_acq_rel) == 1) {
+ CHECK_NE(s, &fdctx.globsync);
+ CHECK_NE(s, &fdctx.filesync);
+ CHECK_NE(s, &fdctx.socksync);
+ user_free(thr, pc, s, false);
+ }
+ }
+}
+
+static FdDesc *fddesc(ThreadState *thr, uptr pc, int fd) {
+ CHECK_GE(fd, 0);
+ CHECK_LT(fd, kTableSize);
+ atomic_uintptr_t *pl1 = &fdctx.tab[fd / kTableSizeL2];
+ uptr l1 = atomic_load(pl1, memory_order_consume);
+ if (l1 == 0) {
+ uptr size = kTableSizeL2 * sizeof(FdDesc);
+ // We need this to reside in user memory to properly catch races on it.
+ void *p = user_alloc_internal(thr, pc, size, kDefaultAlignment, false);
+ internal_memset(p, 0, size);
+ MemoryResetRange(thr, (uptr)&fddesc, (uptr)p, size);
+ if (atomic_compare_exchange_strong(pl1, &l1, (uptr)p, memory_order_acq_rel))
+ l1 = (uptr)p;
+ else
+ user_free(thr, pc, p, false);
+ }
+ return &((FdDesc*)l1)[fd % kTableSizeL2]; // NOLINT
+}
+
+// pd must be already ref'ed.
+static void init(ThreadState *thr, uptr pc, int fd, FdSync *s,
+ bool write = true) {
+ FdDesc *d = fddesc(thr, pc, fd);
+ // As a matter of fact, we don't intercept all close calls.
+ // See e.g. libc __res_iclose().
+ if (d->sync) {
+ unref(thr, pc, d->sync);
+ d->sync = 0;
+ }
+ if (flags()->io_sync == 0) {
+ unref(thr, pc, s);
+ } else if (flags()->io_sync == 1) {
+ d->sync = s;
+ } else if (flags()->io_sync == 2) {
+ unref(thr, pc, s);
+ d->sync = &fdctx.globsync;
+ }
+ d->creation_tid = thr->tid;
+ d->creation_stack = CurrentStackId(thr, pc);
+ if (write) {
+ // To catch races between fd usage and open.
+ MemoryRangeImitateWrite(thr, pc, (uptr)d, 8);
+ } else {
+ // See the dup-related comment in FdClose.
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ }
+}
+
+void FdInit() {
+ atomic_store(&fdctx.globsync.rc, (u64)-1, memory_order_relaxed);
+ atomic_store(&fdctx.filesync.rc, (u64)-1, memory_order_relaxed);
+ atomic_store(&fdctx.socksync.rc, (u64)-1, memory_order_relaxed);
+}
+
+void FdOnFork(ThreadState *thr, uptr pc) {
+ // On fork() we need to reset all fd's, because the child is going
+ // close all them, and that will cause races between previous read/write
+ // and the close.
+ for (int l1 = 0; l1 < kTableSizeL1; l1++) {
+ FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
+ if (tab == 0)
+ break;
+ for (int l2 = 0; l2 < kTableSizeL2; l2++) {
+ FdDesc *d = &tab[l2];
+ MemoryResetRange(thr, pc, (uptr)d, 8);
+ }
+ }
+}
+
+bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack) {
+ for (int l1 = 0; l1 < kTableSizeL1; l1++) {
+ FdDesc *tab = (FdDesc*)atomic_load(&fdctx.tab[l1], memory_order_relaxed);
+ if (tab == 0)
+ break;
+ if (addr >= (uptr)tab && addr < (uptr)(tab + kTableSizeL2)) {
+ int l2 = (addr - (uptr)tab) / sizeof(FdDesc);
+ FdDesc *d = &tab[l2];
+ *fd = l1 * kTableSizeL1 + l2;
+ *tid = d->creation_tid;
+ *stack = d->creation_stack;
+ return true;
+ }
+ }
+ return false;
+}
+
+void FdAcquire(ThreadState *thr, uptr pc, int fd) {
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ FdSync *s = d->sync;
+ DPrintf("#%d: FdAcquire(%d) -> %p\n", thr->tid, fd, s);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ if (s)
+ Acquire(thr, pc, (uptr)s);
+}
+
+void FdRelease(ThreadState *thr, uptr pc, int fd) {
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ FdSync *s = d->sync;
+ DPrintf("#%d: FdRelease(%d) -> %p\n", thr->tid, fd, s);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ if (s)
+ Release(thr, pc, (uptr)s);
+}
+
+void FdAccess(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdAccess(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+}
+
+void FdClose(ThreadState *thr, uptr pc, int fd, bool write) {
+ DPrintf("#%d: FdClose(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ FdDesc *d = fddesc(thr, pc, fd);
+ if (write) {
+ // To catch races between fd usage and close.
+ MemoryWrite(thr, pc, (uptr)d, kSizeLog8);
+ } else {
+ // This path is used only by dup2/dup3 calls.
+ // We do read instead of write because there is a number of legitimate
+ // cases where write would lead to false positives:
+ // 1. Some software dups a closed pipe in place of a socket before closing
+ // the socket (to prevent races actually).
+ // 2. Some daemons dup /dev/null in place of stdin/stdout.
+ // On the other hand we have not seen cases when write here catches real
+ // bugs.
+ MemoryRead(thr, pc, (uptr)d, kSizeLog8);
+ }
+ // We need to clear it, because if we do not intercept any call out there
+ // that creates fd, we will hit false postives.
+ MemoryResetRange(thr, pc, (uptr)d, 8);
+ unref(thr, pc, d->sync);
+ d->sync = 0;
+ d->creation_tid = 0;
+ d->creation_stack = 0;
+}
+
+void FdFileCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdFileCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, &fdctx.filesync);
+}
+
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write) {
+ DPrintf("#%d: FdDup(%d, %d)\n", thr->tid, oldfd, newfd);
+ if (bogusfd(oldfd) || bogusfd(newfd))
+ return;
+ // Ignore the case when user dups not yet connected socket.
+ FdDesc *od = fddesc(thr, pc, oldfd);
+ MemoryRead(thr, pc, (uptr)od, kSizeLog8);
+ FdClose(thr, pc, newfd, write);
+ init(thr, pc, newfd, ref(od->sync), write);
+}
+
+void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd) {
+ DPrintf("#%d: FdCreatePipe(%d, %d)\n", thr->tid, rfd, wfd);
+ FdSync *s = allocsync(thr, pc);
+ init(thr, pc, rfd, ref(s));
+ init(thr, pc, wfd, ref(s));
+ unref(thr, pc, s);
+}
+
+void FdEventCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdEventCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, allocsync(thr, pc));
+}
+
+void FdSignalCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSignalCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, 0);
+}
+
+void FdInotifyCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdInotifyCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, 0);
+}
+
+void FdPollCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdPollCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, allocsync(thr, pc));
+}
+
+void FdSocketCreate(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketCreate(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ // It can be a UDP socket.
+ init(thr, pc, fd, &fdctx.socksync);
+}
+
+void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd) {
+ DPrintf("#%d: FdSocketAccept(%d, %d)\n", thr->tid, fd, newfd);
+ if (bogusfd(fd))
+ return;
+ // Synchronize connect->accept.
+ Acquire(thr, pc, (uptr)&fdctx.connectsync);
+ init(thr, pc, newfd, &fdctx.socksync);
+}
+
+void FdSocketConnecting(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketConnecting(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ // Synchronize connect->accept.
+ Release(thr, pc, (uptr)&fdctx.connectsync);
+}
+
+void FdSocketConnect(ThreadState *thr, uptr pc, int fd) {
+ DPrintf("#%d: FdSocketConnect(%d)\n", thr->tid, fd);
+ if (bogusfd(fd))
+ return;
+ init(thr, pc, fd, &fdctx.socksync);
+}
+
+uptr File2addr(const char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+uptr Dir2addr(const char *path) {
+ (void)path;
+ static u64 addr;
+ return (uptr)&addr;
+}
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_fd.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_fd.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_fd.h (revision 351984)
@@ -0,0 +1,64 @@
+//===-- tsan_fd.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// This file handles synchronization via IO.
+// People use IO for synchronization along the lines of:
+//
+// int X;
+// int client_socket; // initialized elsewhere
+// int server_socket; // initialized elsewhere
+//
+// Thread 1:
+// X = 42;
+// send(client_socket, ...);
+//
+// Thread 2:
+// if (recv(server_socket, ...) > 0)
+// assert(X == 42);
+//
+// This file determines the scope of the file descriptor (pipe, socket,
+// all local files, etc) and executes acquire and release operations on
+// the scope as necessary. Some scopes are very fine grained (e.g. pipe
+// operations synchronize only with operations on the same pipe), while
+// others are corse-grained (e.g. all operations on local files synchronize
+// with each other).
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FD_H
+#define TSAN_FD_H
+
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void FdInit();
+void FdAcquire(ThreadState *thr, uptr pc, int fd);
+void FdRelease(ThreadState *thr, uptr pc, int fd);
+void FdAccess(ThreadState *thr, uptr pc, int fd);
+void FdClose(ThreadState *thr, uptr pc, int fd, bool write = true);
+void FdFileCreate(ThreadState *thr, uptr pc, int fd);
+void FdDup(ThreadState *thr, uptr pc, int oldfd, int newfd, bool write);
+void FdPipeCreate(ThreadState *thr, uptr pc, int rfd, int wfd);
+void FdEventCreate(ThreadState *thr, uptr pc, int fd);
+void FdSignalCreate(ThreadState *thr, uptr pc, int fd);
+void FdInotifyCreate(ThreadState *thr, uptr pc, int fd);
+void FdPollCreate(ThreadState *thr, uptr pc, int fd);
+void FdSocketCreate(ThreadState *thr, uptr pc, int fd);
+void FdSocketAccept(ThreadState *thr, uptr pc, int fd, int newfd);
+void FdSocketConnecting(ThreadState *thr, uptr pc, int fd);
+void FdSocketConnect(ThreadState *thr, uptr pc, int fd);
+bool FdLocation(uptr addr, int *fd, int *tid, u32 *stack);
+void FdOnFork(ThreadState *thr, uptr pc);
+
+uptr File2addr(const char *path);
+uptr Dir2addr(const char *path);
+
+} // namespace __tsan
+
+#endif // TSAN_INTERFACE_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.cc (revision 351984)
@@ -0,0 +1,125 @@
+//===-- tsan_flags.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_flags.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "ubsan/ubsan_flags.h"
+
+namespace __tsan {
+
+// Can be overriden in frontend.
+#ifdef TSAN_EXTERNAL_HOOKS
+extern "C" const char* __tsan_default_options();
+#else
+SANITIZER_WEAK_DEFAULT_IMPL
+const char *__tsan_default_options() {
+ return "";
+}
+#endif
+
+void Flags::SetDefaults() {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+ // DDFlags
+ second_deadlock_stack = false;
+}
+
+void RegisterTsanFlags(FlagParser *parser, Flags *f) {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+ // DDFlags
+ RegisterFlag(parser, "second_deadlock_stack",
+ "Report where each mutex is locked in deadlock reports",
+ &f->second_deadlock_stack);
+}
+
+void InitializeFlags(Flags *f, const char *env, const char *env_option_name) {
+ SetCommonFlagsDefaults();
+ {
+ // Override some common flags defaults.
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.allow_addr2line = true;
+ if (SANITIZER_GO) {
+ // Does not work as expected for Go: runtime handles SIGABRT and crashes.
+ cf.abort_on_error = false;
+ // Go does not have mutexes.
+ cf.detect_deadlocks = false;
+ }
+ cf.print_suppressions = false;
+ cf.stack_trace_format = " #%n %f %S %M";
+ cf.exitcode = 66;
+ cf.intercept_tls_get_addr = true;
+ OverrideCommonFlags(cf);
+ }
+
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterTsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+#if TSAN_CONTAINS_UBSAN
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+#endif
+
+ // Let a frontend override.
+ parser.ParseString(__tsan_default_options());
+#if TSAN_CONTAINS_UBSAN
+ const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ ubsan_parser.ParseString(ubsan_default_options);
+#endif
+ // Override from command line.
+ parser.ParseString(env, env_option_name);
+#if TSAN_CONTAINS_UBSAN
+ ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
+#endif
+
+ // Sanity check.
+ if (!f->report_bugs) {
+ f->report_thread_leaks = false;
+ f->report_destroy_locked = false;
+ f->report_signal_unsafe = false;
+ }
+
+ InitializeCommonFlags();
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ if (f->history_size < 0 || f->history_size > 7) {
+ Printf("ThreadSanitizer: incorrect value for history_size"
+ " (must be [0..7])\n");
+ Die();
+ }
+
+ if (f->io_sync < 0 || f->io_sync > 2) {
+ Printf("ThreadSanitizer: incorrect value for io_sync"
+ " (must be [0..2])\n");
+ Die();
+ }
+}
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.h (revision 351984)
@@ -0,0 +1,34 @@
+//===-- tsan_flags.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+// NOTE: This file may be included into user code.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_FLAGS_H
+#define TSAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+
+namespace __tsan {
+
+struct Flags : DDFlags {
+#define TSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "tsan_flags.inc"
+#undef TSAN_FLAG
+
+ void SetDefaults();
+ void ParseFromString(const char *str);
+};
+
+void InitializeFlags(Flags *flags, const char *env,
+ const char *env_option_name = nullptr);
+} // namespace __tsan
+
+#endif // TSAN_FLAGS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.inc (revision 351984)
@@ -0,0 +1,83 @@
+//===-- tsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// TSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_FLAG
+# error "Define TSAN_FLAG prior to including this file!"
+#endif
+
+// TSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+TSAN_FLAG(bool, enable_annotations, true,
+ "Enable dynamic annotations, otherwise they are no-ops.")
+// Suppress a race report if we've already output another race report
+// with the same stack.
+TSAN_FLAG(bool, suppress_equal_stacks, true,
+ "Suppress a race report if we've already output another race report "
+ "with the same stack.")
+TSAN_FLAG(bool, suppress_equal_addresses, true,
+ "Suppress a race report if we've already output another race report "
+ "on the same address.")
+
+TSAN_FLAG(bool, report_bugs, true,
+ "Turns off bug reporting entirely (useful for benchmarking).")
+TSAN_FLAG(bool, report_thread_leaks, true, "Report thread leaks at exit?")
+TSAN_FLAG(bool, report_destroy_locked, true,
+ "Report destruction of a locked mutex?")
+TSAN_FLAG(bool, report_mutex_bugs, true,
+ "Report incorrect usages of mutexes and mutex annotations?")
+TSAN_FLAG(bool, report_signal_unsafe, true,
+ "Report violations of async signal-safety "
+ "(e.g. malloc() call from a signal handler).")
+TSAN_FLAG(bool, report_atomic_races, true,
+ "Report races between atomic and plain memory accesses.")
+TSAN_FLAG(
+ bool, force_seq_cst_atomics, false,
+ "If set, all atomics are effectively sequentially consistent (seq_cst), "
+ "regardless of what user actually specified.")
+TSAN_FLAG(bool, print_benign, false, "Print matched \"benign\" races at exit.")
+TSAN_FLAG(bool, halt_on_error, false, "Exit after first reported error.")
+TSAN_FLAG(int, atexit_sleep_ms, 1000,
+ "Sleep in main thread before exiting for that many ms "
+ "(useful to catch \"at exit\" races).")
+TSAN_FLAG(const char *, profile_memory, "",
+ "If set, periodically write memory profile to that file.")
+TSAN_FLAG(int, flush_memory_ms, 0, "Flush shadow memory every X ms.")
+TSAN_FLAG(int, flush_symbolizer_ms, 5000, "Flush symbolizer caches every X ms.")
+TSAN_FLAG(
+ int, memory_limit_mb, 0,
+ "Resident memory limit in MB to aim at."
+ "If the process consumes more memory, then TSan will flush shadow memory.")
+TSAN_FLAG(bool, stop_on_start, false,
+ "Stops on start until __tsan_resume() is called (for debugging).")
+TSAN_FLAG(bool, running_on_valgrind, false,
+ "Controls whether RunningOnValgrind() returns true or false.")
+// There are a lot of goroutines in Go, so we use smaller history.
+TSAN_FLAG(
+ int, history_size, SANITIZER_GO ? 1 : 3,
+ "Per-thread history size, controls how many previous memory accesses "
+ "are remembered per thread. Possible values are [0..7]. "
+ "history_size=0 amounts to 32K memory accesses. Each next value doubles "
+ "the amount of memory accesses, up to history_size=7 that amounts to "
+ "4M memory accesses. The default value is 2 (128K memory accesses).")
+TSAN_FLAG(int, io_sync, 1,
+ "Controls level of synchronization implied by IO operations. "
+ "0 - no synchronization "
+ "1 - reasonable level of synchronization (write->read)"
+ "2 - global synchronization of all IO operations.")
+TSAN_FLAG(bool, die_after_fork, true,
+ "Die after multi-threaded fork if the child creates new threads.")
+TSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+TSAN_FLAG(bool, ignore_noninstrumented_modules, SANITIZER_MAC ? true : false,
+ "Interceptors should only detect races when called from instrumented "
+ "modules.")
+TSAN_FLAG(bool, shared_ptr_interceptor, true,
+ "Track atomic reference counting in libc++ shared_ptr and weak_ptr.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ignoreset.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ignoreset.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ignoreset.cc (revision 351984)
@@ -0,0 +1,46 @@
+//===-- tsan_ignoreset.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_ignoreset.h"
+
+namespace __tsan {
+
+const uptr IgnoreSet::kMaxSize;
+
+IgnoreSet::IgnoreSet()
+ : size_() {
+}
+
+void IgnoreSet::Add(u32 stack_id) {
+ if (size_ == kMaxSize)
+ return;
+ for (uptr i = 0; i < size_; i++) {
+ if (stacks_[i] == stack_id)
+ return;
+ }
+ stacks_[size_++] = stack_id;
+}
+
+void IgnoreSet::Reset() {
+ size_ = 0;
+}
+
+uptr IgnoreSet::Size() const {
+ return size_;
+}
+
+u32 IgnoreSet::At(uptr i) const {
+ CHECK_LT(i, size_);
+ CHECK_LE(size_, kMaxSize);
+ return stacks_[i];
+}
+
+} // namespace __tsan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ignoreset.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ignoreset.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ignoreset.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ignoreset.h (revision 351984)
@@ -0,0 +1,37 @@
+//===-- tsan_ignoreset.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// IgnoreSet holds a set of stack traces where ignores were enabled.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_IGNORESET_H
+#define TSAN_IGNORESET_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class IgnoreSet {
+ public:
+ static const uptr kMaxSize = 16;
+
+ IgnoreSet();
+ void Add(u32 stack_id);
+ void Reset();
+ uptr Size() const;
+ u32 At(uptr i) const;
+
+ private:
+ uptr size_;
+ u32 stacks_[kMaxSize];
+};
+
+} // namespace __tsan
+
+#endif // TSAN_IGNORESET_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ignoreset.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors.cc (revision 351984)
@@ -0,0 +1,2855 @@
+//===-- tsan_interceptors.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// FIXME: move as many interceptors as possible into
+// sanitizer_common/sanitizer_common_interceptors.inc
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+#include "tsan_platform.h"
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_fd.h"
+
+
+using namespace __tsan; // NOLINT
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC
+#define stdout __stdoutp
+#define stderr __stderrp
+#endif
+
+#if SANITIZER_NETBSD
+#define dirfd(dirp) (*(int *)(dirp))
+#define fileno_unlocked(fp) \
+ (((__sanitizer_FILE*)fp)->_file == -1 ? -1 : \
+ (int)(unsigned short)(((__sanitizer_FILE*)fp)->_file)) // NOLINT
+
+#define stdout ((__sanitizer_FILE*)&__sF[1])
+#define stderr ((__sanitizer_FILE*)&__sF[2])
+
+#define nanosleep __nanosleep50
+#define vfork __vfork14
+#endif
+
+#if SANITIZER_ANDROID
+#define mallopt(a, b)
+#endif
+
+#ifdef __mips__
+const int kSigCount = 129;
+#else
+const int kSigCount = 65;
+#endif
+
+#ifdef __mips__
+struct ucontext_t {
+ u64 opaque[768 / sizeof(u64) + 1];
+};
+#else
+struct ucontext_t {
+ // The size is determined by looking at sizeof of real ucontext_t on linux.
+ u64 opaque[936 / sizeof(u64) + 1];
+};
+#endif
+
+#if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1
+#define PTHREAD_ABI_BASE "GLIBC_2.3.2"
+#elif defined(__aarch64__) || SANITIZER_PPC64V2
+#define PTHREAD_ABI_BASE "GLIBC_2.17"
+#endif
+
+extern "C" int pthread_attr_init(void *attr);
+extern "C" int pthread_attr_destroy(void *attr);
+DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *)
+extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize);
+extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v));
+extern "C" int pthread_setspecific(unsigned key, const void *v);
+DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *)
+DECLARE_REAL(int, fflush, __sanitizer_FILE *fp)
+DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size)
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+extern "C" void *pthread_self();
+extern "C" void _exit(int status);
+#if !SANITIZER_NETBSD
+extern "C" int fileno_unlocked(void *stream);
+extern "C" int dirfd(void *dirp);
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD
+extern "C" int mallopt(int param, int value);
+#endif
+#if SANITIZER_NETBSD
+extern __sanitizer_FILE __sF[];
+#else
+extern __sanitizer_FILE *stdout, *stderr;
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+const int PTHREAD_MUTEX_RECURSIVE = 1;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 1;
+#else
+const int PTHREAD_MUTEX_RECURSIVE = 2;
+const int PTHREAD_MUTEX_RECURSIVE_NP = 2;
+#endif
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+const int EPOLL_CTL_ADD = 1;
+#endif
+const int SIGILL = 4;
+const int SIGABRT = 6;
+const int SIGFPE = 8;
+const int SIGSEGV = 11;
+const int SIGPIPE = 13;
+const int SIGTERM = 15;
+#if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+const int SIGBUS = 10;
+const int SIGSYS = 12;
+#else
+const int SIGBUS = 7;
+const int SIGSYS = 31;
+#endif
+void *const MAP_FAILED = (void*)-1;
+#if SANITIZER_NETBSD
+const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567;
+#elif !SANITIZER_MAC
+const int PTHREAD_BARRIER_SERIAL_THREAD = -1;
+#endif
+const int MAP_FIXED = 0x10;
+typedef long long_t; // NOLINT
+
+// From /usr/include/unistd.h
+# define F_ULOCK 0 /* Unlock a previously locked region. */
+# define F_LOCK 1 /* Lock a region for exclusive use. */
+# define F_TLOCK 2 /* Test and lock a region for exclusive use. */
+# define F_TEST 3 /* Test a region for other processes locks. */
+
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD
+const int SA_SIGINFO = 0x40;
+const int SIG_SETMASK = 3;
+#elif defined(__mips__)
+const int SA_SIGINFO = 8;
+const int SIG_SETMASK = 3;
+#else
+const int SA_SIGINFO = 4;
+const int SIG_SETMASK = 2;
+#endif
+
+#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \
+ (cur_thread_init(), !cur_thread()->is_inited)
+
+namespace __tsan {
+struct SignalDesc {
+ bool armed;
+ bool sigaction;
+ __sanitizer_siginfo siginfo;
+ ucontext_t ctx;
+};
+
+struct ThreadSignalContext {
+ int int_signal_send;
+ atomic_uintptr_t in_blocking_func;
+ atomic_uintptr_t have_pending_signals;
+ SignalDesc pending_signals[kSigCount];
+ // emptyset and oldset are too big for stack.
+ __sanitizer_sigset_t emptyset;
+ __sanitizer_sigset_t oldset;
+};
+
+// The sole reason tsan wraps atexit callbacks is to establish synchronization
+// between callback setup and callback execution.
+struct AtExitCtx {
+ void (*f)();
+ void *arg;
+};
+
+// InterceptorContext holds all global data required for interceptors.
+// It's explicitly constructed in InitializeInterceptors with placement new
+// and is never destroyed. This allows usage of members with non-trivial
+// constructors and destructors.
+struct InterceptorContext {
+ // The object is 64-byte aligned, because we want hot data to be located
+ // in a single cache line if possible (it's accessed in every interceptor).
+ ALIGNED(64) LibIgnore libignore;
+ __sanitizer_sigaction sigactions[kSigCount];
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
+ unsigned finalize_key;
+#endif
+
+ BlockingMutex atexit_mu;
+ Vector<struct AtExitCtx *> AtExitStack;
+
+ InterceptorContext()
+ : libignore(LINKER_INITIALIZED), AtExitStack() {
+ }
+};
+
+static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)];
+InterceptorContext *interceptor_ctx() {
+ return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]);
+}
+
+LibIgnore *libignore() {
+ return &interceptor_ctx()->libignore;
+}
+
+void InitializeLibIgnore() {
+ const SuppressionContext &supp = *Suppressions();
+ const uptr n = supp.SuppressionCount();
+ for (uptr i = 0; i < n; i++) {
+ const Suppression *s = supp.SuppressionAt(i);
+ if (0 == internal_strcmp(s->type, kSuppressionLib))
+ libignore()->AddIgnoredLibrary(s->templ);
+ }
+ if (flags()->ignore_noninstrumented_modules)
+ libignore()->IgnoreNoninstrumentedModules(true);
+ libignore()->OnLibraryLoaded(0);
+}
+
+// The following two hooks can be used by for cooperative scheduling when
+// locking.
+#ifdef TSAN_EXTERNAL_HOOKS
+void OnPotentiallyBlockingRegionBegin();
+void OnPotentiallyBlockingRegionEnd();
+#else
+SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {}
+SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {}
+#endif
+
+} // namespace __tsan
+
+static ThreadSignalContext *SigCtx(ThreadState *thr) {
+ ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx;
+ if (ctx == 0 && !thr->is_dead) {
+ ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext");
+ MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx));
+ thr->signal_ctx = ctx;
+ }
+ return ctx;
+}
+
+ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname,
+ uptr pc)
+ : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) {
+ Initialize(thr);
+ if (!thr_->is_inited) return;
+ if (!thr_->ignore_interceptors) FuncEntry(thr, pc);
+ DPrintf("#%d: intercept %s()\n", thr_->tid, fname);
+ ignoring_ =
+ !thr_->in_ignored_lib && libignore()->IsIgnored(pc, &in_ignored_lib_);
+ EnableIgnores();
+}
+
+ScopedInterceptor::~ScopedInterceptor() {
+ if (!thr_->is_inited) return;
+ DisableIgnores();
+ if (!thr_->ignore_interceptors) {
+ ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ CheckNoLocks(thr_);
+ }
+}
+
+void ScopedInterceptor::EnableIgnores() {
+ if (ignoring_) {
+ ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false);
+ if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++;
+ if (in_ignored_lib_) {
+ DCHECK(!thr_->in_ignored_lib);
+ thr_->in_ignored_lib = true;
+ }
+ }
+}
+
+void ScopedInterceptor::DisableIgnores() {
+ if (ignoring_) {
+ ThreadIgnoreEnd(thr_, pc_);
+ if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--;
+ if (in_ignored_lib_) {
+ DCHECK(thr_->in_ignored_lib);
+ thr_->in_ignored_lib = false;
+ }
+ }
+}
+
+#define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func)
+#if SANITIZER_FREEBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
+#elif SANITIZER_NETBSD
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \
+ INTERCEPT_FUNCTION(__libc_##func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \
+ INTERCEPT_FUNCTION(__libc_thr_##func)
+#else
+# define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func)
+# define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func)
+#endif
+
+#define READ_STRING_OF_LEN(thr, pc, s, len, n) \
+ MemoryAccessRange((thr), (pc), (uptr)(s), \
+ common_flags()->strict_string_checks ? (len) + 1 : (n), false)
+
+#define READ_STRING(thr, pc, s, n) \
+ READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n))
+
+#define BLOCK_REAL(name) (BlockingCall(thr), REAL(name))
+
+struct BlockingCall {
+ explicit BlockingCall(ThreadState *thr)
+ : thr(thr)
+ , ctx(SigCtx(thr)) {
+ for (;;) {
+ atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed);
+ if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0)
+ break;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ ProcessPendingSignals(thr);
+ }
+ // When we are in a "blocking call", we process signals asynchronously
+ // (right when they arrive). In this context we do not expect to be
+ // executing any user/runtime code. The known interceptor sequence when
+ // this is not true is: pthread_join -> munmap(stack). It's fine
+ // to ignore munmap in this case -- we handle stack shadow separately.
+ thr->ignore_interceptors++;
+ }
+
+ ~BlockingCall() {
+ thr->ignore_interceptors--;
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ }
+
+ ThreadState *thr;
+ ThreadSignalContext *ctx;
+};
+
+TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) {
+ SCOPED_TSAN_INTERCEPTOR(sleep, sec);
+ unsigned res = BLOCK_REAL(sleep)(sec);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, usleep, long_t usec) {
+ SCOPED_TSAN_INTERCEPTOR(usleep, usec);
+ int res = BLOCK_REAL(usleep)(usec);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) {
+ SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem);
+ int res = BLOCK_REAL(nanosleep)(req, rem);
+ AfterSleep(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pause, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(pause, fake);
+ return BLOCK_REAL(pause)(fake);
+}
+
+static void at_exit_wrapper() {
+ AtExitCtx *ctx;
+ {
+ // Ensure thread-safety.
+ BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+
+ // Pop AtExitCtx from the top of the stack of callback functions
+ uptr element = interceptor_ctx()->AtExitStack.Size() - 1;
+ ctx = interceptor_ctx()->AtExitStack[element];
+ interceptor_ctx()->AtExitStack.PopBack();
+ }
+
+ Acquire(cur_thread(), (uptr)0, (uptr)ctx);
+ ((void(*)())ctx->f)();
+ InternalFree(ctx);
+}
+
+static void cxa_at_exit_wrapper(void *arg) {
+ Acquire(cur_thread(), 0, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(void *arg))ctx->f)(ctx->arg);
+ InternalFree(ctx);
+}
+
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso);
+
+#if !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, atexit, void (*f)()) {
+ if (in_symbolizer())
+ return 0;
+ // We want to setup the atexit callback even if we are in ignored lib
+ // or after fork.
+ SCOPED_INTERCEPTOR_RAW(atexit, f);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0);
+}
+#endif
+
+TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) {
+ if (in_symbolizer())
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso);
+ return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso);
+}
+
+static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(),
+ void *arg, void *dso) {
+ AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+ ctx->f = f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res;
+ if (!dso) {
+ // NetBSD does not preserve the 2nd argument if dso is equal to 0
+ // Store ctx in a local stack-like structure
+
+ // Ensure thread-safety.
+ BlockingMutexLock l(&interceptor_ctx()->atexit_mu);
+
+ res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0);
+ // Push AtExitCtx on the top of the stack of callback functions
+ if (!res) {
+ interceptor_ctx()->AtExitStack.PushBack(ctx);
+ }
+ } else {
+ res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso);
+ }
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+
+#if !SANITIZER_MAC && !SANITIZER_NETBSD
+static void on_exit_wrapper(int status, void *arg) {
+ ThreadState *thr = cur_thread();
+ uptr pc = 0;
+ Acquire(thr, pc, (uptr)arg);
+ AtExitCtx *ctx = (AtExitCtx*)arg;
+ ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg);
+ InternalFree(ctx);
+}
+
+TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) {
+ if (in_symbolizer())
+ return 0;
+ SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg);
+ AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx));
+ ctx->f = (void(*)())f;
+ ctx->arg = arg;
+ Release(thr, pc, (uptr)ctx);
+ // Memory allocation in __cxa_atexit will race with free during exit,
+ // because we do not see synchronization around atexit callback list.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(on_exit)(on_exit_wrapper, ctx);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+#define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT_ON_EXIT
+#endif
+
+// Cleanup old bufs.
+static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) {
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->sp <= sp) {
+ uptr sz = thr->jmp_bufs.Size();
+ internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf));
+ thr->jmp_bufs.PopBack();
+ i--;
+ }
+ }
+}
+
+static void SetJmp(ThreadState *thr, uptr sp) {
+ if (!thr->is_inited) // called from libc guts during bootstrap
+ return;
+ // Cleanup old bufs.
+ JmpBufGarbageCollect(thr, sp);
+ // Remember the buf.
+ JmpBuf *buf = thr->jmp_bufs.PushBack();
+ buf->sp = sp;
+ buf->shadow_stack_pos = thr->shadow_stack_pos;
+ ThreadSignalContext *sctx = SigCtx(thr);
+ buf->int_signal_send = sctx ? sctx->int_signal_send : 0;
+ buf->in_blocking_func = sctx ?
+ atomic_load(&sctx->in_blocking_func, memory_order_relaxed) :
+ false;
+ buf->in_signal_handler = atomic_load(&thr->in_signal_handler,
+ memory_order_relaxed);
+}
+
+static void LongJmp(ThreadState *thr, uptr *env) {
+ uptr sp = ExtractLongJmpSp(env);
+ // Find the saved buf with matching sp.
+ for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) {
+ JmpBuf *buf = &thr->jmp_bufs[i];
+ if (buf->sp == sp) {
+ CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos);
+ // Unwind the stack.
+ while (thr->shadow_stack_pos > buf->shadow_stack_pos)
+ FuncExit(thr);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sctx) {
+ sctx->int_signal_send = buf->int_signal_send;
+ atomic_store(&sctx->in_blocking_func, buf->in_blocking_func,
+ memory_order_relaxed);
+ }
+ atomic_store(&thr->in_signal_handler, buf->in_signal_handler,
+ memory_order_relaxed);
+ JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp
+ return;
+ }
+ }
+ Printf("ThreadSanitizer: can't find longjmp buf\n");
+ CHECK(0);
+}
+
+// FIXME: put everything below into a common extern "C" block?
+extern "C" void __tsan_setjmp(uptr sp) {
+ cur_thread_init();
+ SetJmp(cur_thread(), sp);
+}
+
+#if SANITIZER_MAC
+TSAN_INTERCEPTOR(int, setjmp, void *env);
+TSAN_INTERCEPTOR(int, _setjmp, void *env);
+TSAN_INTERCEPTOR(int, sigsetjmp, void *env);
+#else // SANITIZER_MAC
+
+#if SANITIZER_NETBSD
+#define setjmp_symname __setjmp14
+#define sigsetjmp_symname __sigsetjmp14
+#else
+#define setjmp_symname setjmp
+#define sigsetjmp_symname sigsetjmp
+#endif
+
+#define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x
+#define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x)
+#define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname)
+#define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname)
+
+#define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname)
+#define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname)
+
+// Not called. Merely to satisfy TSAN_INTERCEPT().
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int TSAN_INTERCEPTOR_SETJMP(void *env);
+extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+// FIXME: any reason to have a separate declaration?
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor__setjmp(void *env);
+extern "C" int __interceptor__setjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int TSAN_INTERCEPTOR_SIGSETJMP(void *env);
+extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) {
+ CHECK(0);
+ return 0;
+}
+
+#if !SANITIZER_NETBSD
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+int __interceptor___sigsetjmp(void *env);
+extern "C" int __interceptor___sigsetjmp(void *env) {
+ CHECK(0);
+ return 0;
+}
+#endif
+
+extern "C" int setjmp_symname(void *env);
+extern "C" int _setjmp(void *env);
+extern "C" int sigsetjmp_symname(void *env);
+#if !SANITIZER_NETBSD
+extern "C" int __sigsetjmp(void *env);
+#endif
+DEFINE_REAL(int, setjmp_symname, void *env)
+DEFINE_REAL(int, _setjmp, void *env)
+DEFINE_REAL(int, sigsetjmp_symname, void *env)
+#if !SANITIZER_NETBSD
+DEFINE_REAL(int, __sigsetjmp, void *env)
+#endif
+#endif // SANITIZER_MAC
+
+#if SANITIZER_NETBSD
+#define longjmp_symname __longjmp14
+#define siglongjmp_symname __siglongjmp14
+#else
+#define longjmp_symname longjmp
+#define siglongjmp_symname siglongjmp
+#endif
+
+TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) {
+ // Note: if we call REAL(longjmp) in the context of ScopedInterceptor,
+ // bad things will happen. We will jump over ScopedInterceptor dtor and can
+ // leave thr->in_ignored_lib set.
+ {
+ SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(longjmp_symname)(env, val);
+}
+
+TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) {
+ {
+ SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(siglongjmp_symname)(env, val);
+}
+
+#if SANITIZER_NETBSD
+TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) {
+ {
+ SCOPED_INTERCEPTOR_RAW(_longjmp, env, val);
+ }
+ LongJmp(cur_thread(), env);
+ REAL(_longjmp)(env, val);
+}
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(void*, malloc, uptr size) {
+ if (in_symbolizer())
+ return InternalAlloc(size);
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(malloc, size);
+ p = user_alloc(thr, pc, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) {
+ SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz);
+ return user_memalign(thr, pc, align, sz);
+}
+
+TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) {
+ if (in_symbolizer())
+ return InternalCalloc(size, n);
+ void *p = 0;
+ {
+ SCOPED_INTERCEPTOR_RAW(calloc, size, n);
+ p = user_calloc(thr, pc, size, n);
+ }
+ invoke_malloc_hook(p, n * size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) {
+ if (in_symbolizer())
+ return InternalRealloc(p, size);
+ if (p)
+ invoke_free_hook(p);
+ {
+ SCOPED_INTERCEPTOR_RAW(realloc, p, size);
+ p = user_realloc(thr, pc, p, size);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void*, reallocarray, void *p, uptr size, uptr n) {
+ if (in_symbolizer())
+ return InternalReallocArray(p, size, n);
+ if (p)
+ invoke_free_hook(p);
+ {
+ SCOPED_INTERCEPTOR_RAW(reallocarray, p, size, n);
+ p = user_reallocarray(thr, pc, p, size, n);
+ }
+ invoke_malloc_hook(p, size);
+ return p;
+}
+
+TSAN_INTERCEPTOR(void, free, void *p) {
+ if (p == 0)
+ return;
+ if (in_symbolizer())
+ return InternalFree(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(free, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(void, cfree, void *p) {
+ if (p == 0)
+ return;
+ if (in_symbolizer())
+ return InternalFree(p);
+ invoke_free_hook(p);
+ SCOPED_INTERCEPTOR_RAW(cfree, p);
+ user_free(thr, pc, p);
+}
+
+TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) {
+ SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p);
+ return user_alloc_usable_size(p);
+}
+#endif
+
+TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT
+ SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT
+ uptr srclen = internal_strlen(src);
+ MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true);
+ MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false);
+ return REAL(strcpy)(dst, src); // NOLINT
+}
+
+TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) {
+ SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n);
+ uptr srclen = internal_strnlen(src, n);
+ MemoryAccessRange(thr, pc, (uptr)dst, n, true);
+ MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false);
+ return REAL(strncpy)(dst, src, n);
+}
+
+TSAN_INTERCEPTOR(char*, strdup, const char *str) {
+ SCOPED_TSAN_INTERCEPTOR(strdup, str);
+ // strdup will call malloc, so no instrumentation is required here.
+ return REAL(strdup)(str);
+}
+
+static bool fix_mmap_addr(void **addr, long_t sz, int flags) {
+ if (*addr) {
+ if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) {
+ if (flags & MAP_FIXED) {
+ errno = errno_EINVAL;
+ return false;
+ } else {
+ *addr = 0;
+ }
+ }
+ }
+ return true;
+}
+
+template <class Mmap>
+static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap,
+ void *addr, SIZE_T sz, int prot, int flags,
+ int fd, OFF64_T off) {
+ if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED;
+ void *res = real_mmap(addr, sz, prot, flags, fd, off);
+ if (res != MAP_FAILED) {
+ if (fd > 0) FdAccess(thr, pc, fd);
+ if (thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)res, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)res, sz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) {
+ SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz);
+ if (sz != 0) {
+ // If sz == 0, munmap will return EINVAL and don't unmap any memory.
+ DontNeedShadowFor((uptr)addr, sz);
+ ScopedGlobalProcessor sgp;
+ ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz);
+ }
+ int res = REAL(munmap)(addr, sz);
+ return res;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) {
+ SCOPED_INTERCEPTOR_RAW(memalign, align, sz);
+ return user_memalign(thr, pc, align, sz);
+}
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign)
+#else
+#define TSAN_MAYBE_INTERCEPT_MEMALIGN
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) {
+ if (in_symbolizer())
+ return InternalAlloc(sz, nullptr, align);
+ SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz);
+ return user_aligned_alloc(thr, pc, align, sz);
+}
+
+TSAN_INTERCEPTOR(void*, valloc, uptr sz) {
+ if (in_symbolizer())
+ return InternalAlloc(sz, nullptr, GetPageSizeCached());
+ SCOPED_INTERCEPTOR_RAW(valloc, sz);
+ return user_valloc(thr, pc, sz);
+}
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) {
+ if (in_symbolizer()) {
+ uptr PageSize = GetPageSizeCached();
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return InternalAlloc(sz, nullptr, PageSize);
+ }
+ SCOPED_INTERCEPTOR_RAW(pvalloc, sz);
+ return user_pvalloc(thr, pc, sz);
+}
+#define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc)
+#else
+#define TSAN_MAYBE_INTERCEPT_PVALLOC
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) {
+ if (in_symbolizer()) {
+ void *p = InternalAlloc(sz, nullptr, align);
+ if (!p)
+ return errno_ENOMEM;
+ *memptr = p;
+ return 0;
+ }
+ SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz);
+ return user_posix_memalign(thr, pc, memptr, align, sz);
+}
+#endif
+
+// __cxa_guard_acquire and friends need to be intercepted in a special way -
+// regular interceptors will break statically-linked libstdc++. Linux
+// interceptors are especially defined as weak functions (so that they don't
+// cause link errors when user defines them as well). So they silently
+// auto-disable themselves when such symbol is already present in the binary. If
+// we link libstdc++ statically, it will bring own __cxa_guard_acquire which
+// will silently replace our interceptor. That's why on Linux we simply export
+// these interceptors with INTERFACE_ATTRIBUTE.
+// On OS X, we don't support statically linking, so we just use a regular
+// interceptor.
+#if SANITIZER_MAC
+#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+#else
+#define STDCXX_INTERCEPTOR(rettype, name, ...) \
+ extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__)
+#endif
+
+// Used in thread-safe function static initialization.
+STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g);
+ OnPotentiallyBlockingRegionBegin();
+ auto on_exit = at_scope_exit(&OnPotentiallyBlockingRegionEnd);
+ for (;;) {
+ u32 cmp = atomic_load(g, memory_order_acquire);
+ if (cmp == 0) {
+ if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed))
+ return 1;
+ } else if (cmp == 1) {
+ Acquire(thr, pc, (uptr)g);
+ return 0;
+ } else {
+ internal_sched_yield();
+ }
+ }
+}
+
+STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g);
+ Release(thr, pc, (uptr)g);
+ atomic_store(g, 1, memory_order_release);
+}
+
+STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) {
+ SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g);
+ atomic_store(g, 0, memory_order_relaxed);
+}
+
+namespace __tsan {
+void DestroyThreadState() {
+ ThreadState *thr = cur_thread();
+ Processor *proc = thr->proc();
+ ThreadFinish(thr);
+ ProcUnwire(proc, thr);
+ ProcDestroy(proc);
+ ThreadSignalContext *sctx = thr->signal_ctx;
+ if (sctx) {
+ thr->signal_ctx = 0;
+ UnmapOrDie(sctx, sizeof(*sctx));
+ }
+ DTLS_Destroy();
+ cur_thread_finalize();
+}
+} // namespace __tsan
+
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+static void thread_finalize(void *v) {
+ uptr iter = (uptr)v;
+ if (iter > 1) {
+ if (pthread_setspecific(interceptor_ctx()->finalize_key,
+ (void*)(iter - 1))) {
+ Printf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ return;
+ }
+ DestroyThreadState();
+}
+#endif
+
+
+struct ThreadParam {
+ void* (*callback)(void *arg);
+ void *param;
+ atomic_uintptr_t tid;
+};
+
+extern "C" void *__tsan_thread_start_func(void *arg) {
+ ThreadParam *p = (ThreadParam*)arg;
+ void* (*callback)(void *arg) = p->callback;
+ void *param = p->param;
+ int tid = 0;
+ {
+ cur_thread_init();
+ ThreadState *thr = cur_thread();
+ // Thread-local state is not initialized yet.
+ ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ ThreadIgnoreBegin(thr, 0);
+ if (pthread_setspecific(interceptor_ctx()->finalize_key,
+ (void *)GetPthreadDestructorIterations())) {
+ Printf("ThreadSanitizer: failed to set thread key\n");
+ Die();
+ }
+ ThreadIgnoreEnd(thr, 0);
+#endif
+ while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
+ internal_sched_yield();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Regular);
+ atomic_store(&p->tid, 0, memory_order_release);
+ }
+ void *res = callback(param);
+ // Prevent the callback from being tail called,
+ // it mixes up stack traces.
+ volatile int foo = 42;
+ foo++;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_create,
+ void *th, void *attr, void *(*callback)(void*), void * param) {
+ SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param);
+
+ MaybeSpawnBackgroundThread();
+
+ if (ctx->after_multithreaded_fork) {
+ if (flags()->die_after_fork) {
+ Report("ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported. Dying (set die_after_fork=0 to override)\n");
+ Die();
+ } else {
+ VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded "
+ "fork is not supported (pid %d). Continuing because of "
+ "die_after_fork=0, but you are on your own\n", internal_getpid());
+ }
+ }
+ __sanitizer_pthread_attr_t myattr;
+ if (attr == 0) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+ int detached = 0;
+ REAL(pthread_attr_getdetachstate)(attr, &detached);
+ AdjustStackSize(attr);
+
+ ThreadParam p;
+ p.callback = callback;
+ p.param = param;
+ atomic_store(&p.tid, 0, memory_order_relaxed);
+ int res = -1;
+ {
+ // Otherwise we see false positives in pthread stack manipulation.
+ ScopedIgnoreInterceptors ignore;
+ ThreadIgnoreBegin(thr, pc);
+ res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p);
+ ThreadIgnoreEnd(thr, pc);
+ }
+ if (res == 0) {
+ int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached));
+ CHECK_NE(tid, 0);
+ // Synchronization on p.tid serves two purposes:
+ // 1. ThreadCreate must finish before the new thread starts.
+ // Otherwise the new thread can call pthread_detach, but the pthread_t
+ // identifier is not yet registered in ThreadRegistry by ThreadCreate.
+ // 2. ThreadStart must finish before this thread continues.
+ // Otherwise, this thread can call pthread_detach and reset thr->sync
+ // before the new thread got a chance to acquire from it in ThreadStart.
+ atomic_store(&p.tid, tid, memory_order_release);
+ while (atomic_load(&p.tid, memory_order_acquire) != 0)
+ internal_sched_yield();
+ }
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret);
+ int tid = ThreadTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_join)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
+ if (res == 0) {
+ ThreadJoin(thr, pc, tid);
+ }
+ return res;
+}
+
+DEFINE_REAL_PTHREAD_FUNCTIONS
+
+TSAN_INTERCEPTOR(int, pthread_detach, void *th) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_detach, th);
+ int tid = ThreadTid(thr, pc, (uptr)th);
+ int res = REAL(pthread_detach)(th);
+ if (res == 0) {
+ ThreadDetach(thr, pc, tid);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(void, pthread_exit, void *retval) {
+ {
+ SCOPED_INTERCEPTOR_RAW(pthread_exit, retval);
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+ CHECK_EQ(thr, &cur_thread_placeholder);
+#endif
+ }
+ REAL(pthread_exit)(retval);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_tryjoin_np, th, ret);
+ int tid = ThreadTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(pthread_tryjoin_np)(th, ret);
+ ThreadIgnoreEnd(thr, pc);
+ if (res == 0)
+ ThreadJoin(thr, pc, tid);
+ else
+ ThreadNotJoined(thr, pc, tid, (uptr)th);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret,
+ const struct timespec *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_timedjoin_np, th, ret, abstime);
+ int tid = ThreadTid(thr, pc, (uptr)th);
+ ThreadIgnoreBegin(thr, pc);
+ int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime);
+ ThreadIgnoreEnd(thr, pc);
+ if (res == 0)
+ ThreadJoin(thr, pc, tid);
+ else
+ ThreadNotJoined(thr, pc, tid, (uptr)th);
+ return res;
+}
+#endif
+
+// Problem:
+// NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2).
+// pthread_cond_t has different size in the different versions.
+// If call new REAL functions for old pthread_cond_t, they will corrupt memory
+// after pthread_cond_t (old cond is smaller).
+// If we call old REAL functions for new pthread_cond_t, we will lose some
+// functionality (e.g. old functions do not support waiting against
+// CLOCK_REALTIME).
+// Proper handling would require to have 2 versions of interceptors as well.
+// But this is messy, in particular requires linker scripts when sanitizer
+// runtime is linked into a shared library.
+// Instead we assume we don't have dynamic libraries built against old
+// pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag
+// that allows to work with old libraries (but this mode does not support
+// some features, e.g. pthread_condattr_getpshared).
+static void *init_cond(void *c, bool force = false) {
+ // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions.
+ // So we allocate additional memory on the side large enough to hold
+ // any pthread_cond_t object. Always call new REAL functions, but pass
+ // the aux object to them.
+ // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes
+ // first word of pthread_cond_t to zero.
+ // It's all relevant only for linux.
+ if (!common_flags()->legacy_pthread_cond)
+ return c;
+ atomic_uintptr_t *p = (atomic_uintptr_t*)c;
+ uptr cond = atomic_load(p, memory_order_acquire);
+ if (!force && cond != 0)
+ return (void*)cond;
+ void *newcond = WRAP(malloc)(pthread_cond_t_sz);
+ internal_memset(newcond, 0, pthread_cond_t_sz);
+ if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
+ memory_order_acq_rel))
+ return newcond;
+ WRAP(free)(newcond);
+ return (void*)cond;
+}
+
+struct CondMutexUnlockCtx {
+ ScopedInterceptor *si;
+ ThreadState *thr;
+ uptr pc;
+ void *m;
+};
+
+static void cond_mutex_unlock(CondMutexUnlockCtx *arg) {
+ // pthread_cond_wait interceptor has enabled async signal delivery
+ // (see BlockingCall below). Disable async signals since we are running
+ // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run
+ // since the thread is cancelled, so we have to manually execute them
+ // (the thread still can run some user code due to pthread_cleanup_push).
+ ThreadSignalContext *ctx = SigCtx(arg->thr);
+ CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1);
+ atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed);
+ MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock);
+ // Undo BlockingCall ctor effects.
+ arg->thr->ignore_interceptors--;
+ arg->si->~ScopedInterceptor();
+}
+
+INTERCEPTOR(int, pthread_cond_init, void *c, void *a) {
+ void *cond = init_cond(c, true);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ return REAL(pthread_cond_init)(cond, a);
+}
+
+static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si,
+ int (*fn)(void *c, void *m, void *abstime), void *c,
+ void *m, void *t) {
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ MutexUnlock(thr, pc, (uptr)m);
+ CondMutexUnlockCtx arg = {si, thr, pc, m};
+ int res = 0;
+ // This ensures that we handle mutex lock even in case of pthread_cancel.
+ // See test/tsan/cond_cancel.cc.
+ {
+ // Enable signal delivery while the thread is blocked.
+ BlockingCall bc(thr);
+ res = call_pthread_cancel_with_cleanup(
+ fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg);
+ }
+ if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m);
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m);
+ return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL(
+ pthread_cond_wait),
+ cond, m, 0);
+}
+
+INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime);
+ return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m,
+ abstime);
+}
+
+#if SANITIZER_MAC
+INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m,
+ void *reltime) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime);
+ return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond,
+ m, reltime);
+}
+#endif
+
+INTERCEPTOR(int, pthread_cond_signal, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_signal)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_broadcast, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false);
+ return REAL(pthread_cond_broadcast)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_destroy, void *c) {
+ void *cond = init_cond(c);
+ SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond);
+ MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true);
+ int res = REAL(pthread_cond_destroy)(cond);
+ if (common_flags()->legacy_pthread_cond) {
+ // Free our aux cond and zero the pointer to not leave dangling pointers.
+ WRAP(free)(cond);
+ atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a);
+ int res = REAL(pthread_mutex_init)(m, a);
+ if (res == 0) {
+ u32 flagz = 0;
+ if (a) {
+ int type = 0;
+ if (REAL(pthread_mutexattr_gettype)(a, &type) == 0)
+ if (type == PTHREAD_MUTEX_RECURSIVE ||
+ type == PTHREAD_MUTEX_RECURSIVE_NP)
+ flagz |= MutexFlagWriteReentrant;
+ }
+ MutexCreate(thr, pc, (uptr)m, flagz);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m);
+ int res = REAL(pthread_mutex_destroy)(m);
+ if (res == 0 || res == errno_EBUSY) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m);
+ int res = REAL(pthread_mutex_trylock)(m);
+ if (res == errno_EOWNERDEAD)
+ MutexRepair(thr, pc, (uptr)m);
+ if (res == 0 || res == errno_EOWNERDEAD)
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime);
+ int res = REAL(pthread_mutex_timedlock)(m, abstime);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+#endif
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared);
+ int res = REAL(pthread_spin_init)(m, pshared);
+ if (res == 0) {
+ MutexCreate(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m);
+ int res = REAL(pthread_spin_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_spin_lock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m);
+ int res = REAL(pthread_spin_trylock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m);
+ MutexUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_spin_unlock)(m);
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a);
+ int res = REAL(pthread_rwlock_init)(m, a);
+ if (res == 0) {
+ MutexCreate(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m);
+ int res = REAL(pthread_rwlock_destroy)(m);
+ if (res == 0) {
+ MutexDestroy(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m);
+ MutexPreReadLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_rdlock)(m);
+ if (res == 0) {
+ MutexPostReadLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m);
+ int res = REAL(pthread_rwlock_tryrdlock)(m);
+ if (res == 0) {
+ MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
+ if (res == 0) {
+ MutexPostReadLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m);
+ MutexPreLock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_wrlock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m);
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m);
+ int res = REAL(pthread_rwlock_trywrlock)(m);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime);
+ int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
+ if (res == 0) {
+ MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock);
+ }
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m);
+ MutexReadOrWriteUnlock(thr, pc, (uptr)m);
+ int res = REAL(pthread_rwlock_unlock)(m);
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ int res = REAL(pthread_barrier_init)(b, a, count);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b);
+ MemoryWrite(thr, pc, (uptr)b, kSizeLog1);
+ int res = REAL(pthread_barrier_destroy)(b);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b);
+ Release(thr, pc, (uptr)b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ int res = REAL(pthread_barrier_wait)(b);
+ MemoryRead(thr, pc, (uptr)b, kSizeLog1);
+ if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) {
+ Acquire(thr, pc, (uptr)b);
+ }
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) {
+ SCOPED_INTERCEPTOR_RAW(pthread_once, o, f);
+ if (o == 0 || f == 0)
+ return errno_EINVAL;
+ atomic_uint32_t *a;
+
+ if (SANITIZER_MAC)
+ a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t)));
+ else if (SANITIZER_NETBSD)
+ a = static_cast<atomic_uint32_t*>
+ ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz));
+ else
+ a = static_cast<atomic_uint32_t*>(o);
+
+ u32 v = atomic_load(a, memory_order_acquire);
+ if (v == 0 && atomic_compare_exchange_strong(a, &v, 1,
+ memory_order_relaxed)) {
+ (*f)();
+ if (!thr->in_ignored_lib)
+ Release(thr, pc, (uptr)o);
+ atomic_store(a, 2, memory_order_release);
+ } else {
+ while (v != 2) {
+ internal_sched_yield();
+ v = atomic_load(a, memory_order_acquire);
+ }
+ if (!thr->in_ignored_lib)
+ Acquire(thr, pc, (uptr)o);
+ }
+ return 0;
+}
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(version, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT
+#endif
+
+TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
+#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
+ SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(fstat)(fd, buf);
+#else
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat)(0, fd, buf);
+#endif
+}
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(version, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT___FXSTAT64
+#endif
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) {
+ SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf);
+ if (fd > 0)
+ FdAccess(thr, pc, fd);
+ return REAL(__fxstat64)(0, fd, buf);
+}
+#define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_FSTAT64
+#endif
+
+TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(open)(name, flags, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(open64)(name, flags, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64)
+#else
+#define TSAN_MAYBE_INTERCEPT_OPEN64
+#endif
+
+TSAN_INTERCEPTOR(int, creat, const char *name, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(creat, name, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(creat)(name, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) {
+ SCOPED_TSAN_INTERCEPTOR(creat64, name, mode);
+ READ_STRING(thr, pc, name, 0);
+ int fd = REAL(creat64)(name, mode);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64)
+#else
+#define TSAN_MAYBE_INTERCEPT_CREAT64
+#endif
+
+TSAN_INTERCEPTOR(int, dup, int oldfd) {
+ SCOPED_TSAN_INTERCEPTOR(dup, oldfd);
+ int newfd = REAL(dup)(oldfd);
+ if (oldfd >= 0 && newfd >= 0 && newfd != oldfd)
+ FdDup(thr, pc, oldfd, newfd, true);
+ return newfd;
+}
+
+TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) {
+ SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd);
+ int newfd2 = REAL(dup2)(oldfd, newfd);
+ if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
+ FdDup(thr, pc, oldfd, newfd2, false);
+ return newfd2;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags);
+ int newfd2 = REAL(dup3)(oldfd, newfd, flags);
+ if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd)
+ FdDup(thr, pc, oldfd, newfd2, false);
+ return newfd2;
+}
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags);
+ int fd = REAL(eventfd)(initval, flags);
+ if (fd >= 0)
+ FdEventCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_EVENTFD
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ fd = REAL(signalfd)(fd, mask, flags);
+ if (fd >= 0)
+ FdSignalCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd)
+#else
+#define TSAN_MAYBE_INTERCEPT_SIGNALFD
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, inotify_init, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(inotify_init, fake);
+ int fd = REAL(inotify_init)(fake);
+ if (fd >= 0)
+ FdInotifyCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT
+#endif
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, inotify_init1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags);
+ int fd = REAL(inotify_init1)(flags);
+ if (fd >= 0)
+ FdInotifyCreate(thr, pc, fd);
+ return fd;
+}
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1)
+#else
+#define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1
+#endif
+
+TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) {
+ SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol);
+ int fd = REAL(socket)(domain, type, protocol);
+ if (fd >= 0)
+ FdSocketCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) {
+ SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd);
+ int res = REAL(socketpair)(domain, type, protocol, fd);
+ if (res == 0 && fd[0] >= 0 && fd[1] >= 0)
+ FdPipeCreate(thr, pc, fd[0], fd[1]);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) {
+ SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen);
+ FdSocketConnecting(thr, pc, fd);
+ int res = REAL(connect)(fd, addr, addrlen);
+ if (res == 0 && fd >= 0)
+ FdSocketConnect(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) {
+ SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen);
+ int res = REAL(bind)(fd, addr, addrlen);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, listen, int fd, int backlog) {
+ SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog);
+ int res = REAL(listen)(fd, backlog);
+ if (fd > 0 && res == 0)
+ FdAccess(thr, pc, fd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, close, int fd) {
+ SCOPED_TSAN_INTERCEPTOR(close, fd);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ return REAL(close)(fd);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, __close, int fd) {
+ SCOPED_TSAN_INTERCEPTOR(__close, fd);
+ if (fd >= 0)
+ FdClose(thr, pc, fd);
+ return REAL(__close)(fd);
+}
+#define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close)
+#else
+#define TSAN_MAYBE_INTERCEPT___CLOSE
+#endif
+
+// glibc guts
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) {
+ SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr);
+ int fds[64];
+ int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++) {
+ if (fds[i] > 0)
+ FdClose(thr, pc, fds[i]);
+ }
+ REAL(__res_iclose)(state, free_addr);
+}
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose)
+#else
+#define TSAN_MAYBE_INTERCEPT___RES_ICLOSE
+#endif
+
+TSAN_INTERCEPTOR(int, pipe, int *pipefd) {
+ SCOPED_TSAN_INTERCEPTOR(pipe, pipefd);
+ int res = REAL(pipe)(pipefd);
+ if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
+ FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
+ return res;
+}
+
+#if !SANITIZER_MAC
+TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags);
+ int res = REAL(pipe2)(pipefd, flags);
+ if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0)
+ FdPipeCreate(thr, pc, pipefd[0], pipefd[1]);
+ return res;
+}
+#endif
+
+TSAN_INTERCEPTOR(int, unlink, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(unlink, path);
+ Release(thr, pc, File2addr(path));
+ int res = REAL(unlink)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(void*, tmpfile, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile, fake);
+ void *res = REAL(tmpfile)(fake);
+ if (res) {
+ int fd = fileno_unlocked(res);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ }
+ return res;
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(void*, tmpfile64, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake);
+ void *res = REAL(tmpfile64)(fake);
+ if (res) {
+ int fd = fileno_unlocked(res);
+ if (fd >= 0)
+ FdFileCreate(thr, pc, fd);
+ }
+ return res;
+}
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64)
+#else
+#define TSAN_MAYBE_INTERCEPT_TMPFILE64
+#endif
+
+static void FlushStreams() {
+ // Flushing all the streams here may freeze the process if a child thread is
+ // performing file stream operations at the same time.
+ REAL(fflush)(stdout);
+ REAL(fflush)(stderr);
+}
+
+TSAN_INTERCEPTOR(void, abort, int fake) {
+ SCOPED_TSAN_INTERCEPTOR(abort, fake);
+ FlushStreams();
+ REAL(abort)(fake);
+}
+
+TSAN_INTERCEPTOR(int, rmdir, char *path) {
+ SCOPED_TSAN_INTERCEPTOR(rmdir, path);
+ Release(thr, pc, Dir2addr(path));
+ int res = REAL(rmdir)(path);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, closedir, void *dirp) {
+ SCOPED_TSAN_INTERCEPTOR(closedir, dirp);
+ if (dirp) {
+ int fd = dirfd(dirp);
+ FdClose(thr, pc, fd);
+ }
+ return REAL(closedir)(dirp);
+}
+
+#if SANITIZER_LINUX
+TSAN_INTERCEPTOR(int, epoll_create, int size) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create, size);
+ int fd = REAL(epoll_create)(size);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, epoll_create1, int flags) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags);
+ int fd = REAL(epoll_create1)(flags);
+ if (fd >= 0)
+ FdPollCreate(thr, pc, fd);
+ return fd;
+}
+
+TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ if (epfd >= 0 && fd >= 0)
+ FdAccess(thr, pc, fd);
+ if (op == EPOLL_CTL_ADD && epfd >= 0)
+ FdRelease(thr, pc, epfd);
+ int res = REAL(epoll_ctl)(epfd, op, fd, ev);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout,
+ void *sigmask) {
+ SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask);
+ if (epfd >= 0)
+ FdAccess(thr, pc, epfd);
+ int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask);
+ if (res > 0 && epfd >= 0)
+ FdAcquire(thr, pc, epfd);
+ return res;
+}
+
+#define TSAN_MAYBE_INTERCEPT_EPOLL \
+ TSAN_INTERCEPT(epoll_create); \
+ TSAN_INTERCEPT(epoll_create1); \
+ TSAN_INTERCEPT(epoll_ctl); \
+ TSAN_INTERCEPT(epoll_wait); \
+ TSAN_INTERCEPT(epoll_pwait)
+#else
+#define TSAN_MAYBE_INTERCEPT_EPOLL
+#endif
+
+// The following functions are intercepted merely to process pending signals.
+// If program blocks signal X, we must deliver the signal before the function
+// returns. Similarly, if program unblocks a signal (or returns from sigsuspend)
+// it's better to deliver the signal straight away.
+TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask);
+ return REAL(sigsuspend)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigblock, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigblock, mask);
+ return REAL(sigblock)(mask);
+}
+
+TSAN_INTERCEPTOR(int, sigsetmask, int mask) {
+ SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask);
+ return REAL(sigsetmask)(mask);
+}
+
+TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set,
+ __sanitizer_sigset_t *oldset) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset);
+ return REAL(pthread_sigmask)(how, set, oldset);
+}
+
+namespace __tsan {
+
+static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
+ bool sigact, int sig,
+ __sanitizer_siginfo *info, void *uctx) {
+ __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
+ if (acquire)
+ Acquire(thr, 0, (uptr)&sigactions[sig]);
+ // Signals are generally asynchronous, so if we receive a signals when
+ // ignores are enabled we should disable ignores. This is critical for sync
+ // and interceptors, because otherwise we can miss syncronization and report
+ // false races.
+ int ignore_reads_and_writes = thr->ignore_reads_and_writes;
+ int ignore_interceptors = thr->ignore_interceptors;
+ int ignore_sync = thr->ignore_sync;
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = 0;
+ thr->fast_state.ClearIgnoreBit();
+ thr->ignore_interceptors = 0;
+ thr->ignore_sync = 0;
+ }
+ // Ensure that the handler does not spoil errno.
+ const int saved_errno = errno;
+ errno = 99;
+ // This code races with sigaction. Be careful to not read sa_sigaction twice.
+ // Also need to remember pc for reporting before the call,
+ // because the handler can reset it.
+ volatile uptr pc =
+ sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler;
+ if (pc != sig_dfl && pc != sig_ign) {
+ if (sigact)
+ ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx);
+ else
+ ((__sanitizer_sighandler_ptr)pc)(sig);
+ }
+ if (!ctx->after_multithreaded_fork) {
+ thr->ignore_reads_and_writes = ignore_reads_and_writes;
+ if (ignore_reads_and_writes)
+ thr->fast_state.SetIgnoreBit();
+ thr->ignore_interceptors = ignore_interceptors;
+ thr->ignore_sync = ignore_sync;
+ }
+ // We do not detect errno spoiling for SIGTERM,
+ // because some SIGTERM handlers do spoil errno but reraise SIGTERM,
+ // tsan reports false positive in such case.
+ // It's difficult to properly detect this situation (reraise),
+ // because in async signal processing case (when handler is called directly
+ // from rtl_generic_sighandler) we have not yet received the reraised
+ // signal; and it looks too fragile to intercept all ways to reraise a signal.
+ if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) {
+ VarSizeStackTrace stack;
+ // StackTrace::GetNestInstructionPc(pc) is used because return address is
+ // expected, OutputReport() will undo this.
+ ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeErrnoInSignal);
+ if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+ }
+ }
+ errno = saved_errno;
+}
+
+void ProcessPendingSignals(ThreadState *thr) {
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sctx == 0 ||
+ atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0)
+ return;
+ atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed);
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ internal_sigfillset(&sctx->emptyset);
+ int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset);
+ CHECK_EQ(res, 0);
+ for (int sig = 0; sig < kSigCount; sig++) {
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed) {
+ signal->armed = false;
+ CallUserSignalHandler(thr, false, true, signal->sigaction, sig,
+ &signal->siginfo, &signal->ctx);
+ }
+ }
+ res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0);
+ CHECK_EQ(res, 0);
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
+}
+
+} // namespace __tsan
+
+static bool is_sync_signal(ThreadSignalContext *sctx, int sig) {
+ return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL ||
+ sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS ||
+ // If we are sending signal to ourselves, we must process it now.
+ (sctx && sig == sctx->int_signal_send);
+}
+
+void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig,
+ __sanitizer_siginfo *info,
+ void *ctx) {
+ cur_thread_init();
+ ThreadState *thr = cur_thread();
+ ThreadSignalContext *sctx = SigCtx(thr);
+ if (sig < 0 || sig >= kSigCount) {
+ VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig);
+ return;
+ }
+ // Don't mess with synchronous signals.
+ const bool sync = is_sync_signal(sctx, sig);
+ if (sync ||
+ // If we are in blocking function, we can safely process it now
+ // (but check if we are in a recursive interceptor,
+ // i.e. pthread_join()->munmap()).
+ (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) {
+ atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed);
+ if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) {
+ atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed);
+ CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx);
+ atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed);
+ } else {
+ // Be very conservative with when we do acquire in this case.
+ // It's unsafe to do acquire in async handlers, because ThreadState
+ // can be in inconsistent state.
+ // SIGSYS looks relatively safe -- it's synchronous and can actually
+ // need some global state.
+ bool acq = (sig == SIGSYS);
+ CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx);
+ }
+ atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed);
+ return;
+ }
+
+ if (sctx == 0)
+ return;
+ SignalDesc *signal = &sctx->pending_signals[sig];
+ if (signal->armed == false) {
+ signal->armed = true;
+ signal->sigaction = sigact;
+ if (info)
+ internal_memcpy(&signal->siginfo, info, sizeof(*info));
+ if (ctx)
+ internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx));
+ atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed);
+ }
+}
+
+static void rtl_sighandler(int sig) {
+ rtl_generic_sighandler(false, sig, 0, 0);
+}
+
+static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) {
+ rtl_generic_sighandler(true, sig, info, ctx);
+}
+
+TSAN_INTERCEPTOR(int, raise, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(raise, sig);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ sctx->int_signal_send = sig;
+ int res = REAL(raise)(sig);
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, kill, int pid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(kill, pid, sig);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ if (pid == (int)internal_getpid()) {
+ sctx->int_signal_send = sig;
+ }
+ int res = REAL(kill)(pid, sig);
+ if (pid == (int)internal_getpid()) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) {
+ SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig);
+ ThreadSignalContext *sctx = SigCtx(thr);
+ CHECK_NE(sctx, 0);
+ int prev = sctx->int_signal_send;
+ if (tid == pthread_self()) {
+ sctx->int_signal_send = sig;
+ }
+ int res = REAL(pthread_kill)(tid, sig);
+ if (tid == pthread_self()) {
+ CHECK_EQ(sctx->int_signal_send, sig);
+ sctx->int_signal_send = prev;
+ }
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) {
+ SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz);
+ // It's intercepted merely to process pending signals.
+ return REAL(gettimeofday)(tv, tz);
+}
+
+TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service,
+ void *hints, void *rv) {
+ SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv);
+ // We miss atomic synchronization in getaddrinfo,
+ // and can report false race between malloc and free
+ // inside of getaddrinfo. So ignore memory accesses.
+ ThreadIgnoreBegin(thr, pc);
+ int res = REAL(getaddrinfo)(node, service, hints, rv);
+ ThreadIgnoreEnd(thr, pc);
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, fork, int fake) {
+ if (in_symbolizer())
+ return REAL(fork)(fake);
+ SCOPED_INTERCEPTOR_RAW(fork, fake);
+ ForkBefore(thr, pc);
+ int pid;
+ {
+ // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and
+ // we'll assert in CheckNoLocks() unless we ignore interceptors.
+ ScopedIgnoreInterceptors ignore;
+ pid = REAL(fork)(fake);
+ }
+ if (pid == 0) {
+ // child
+ ForkChildAfter(thr, pc);
+ FdOnFork(thr, pc);
+ } else if (pid > 0) {
+ // parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
+ }
+ return pid;
+}
+
+TSAN_INTERCEPTOR(int, vfork, int fake) {
+ // Some programs (e.g. openjdk) call close for all file descriptors
+ // in the child process. Under tsan it leads to false positives, because
+ // address space is shared, so the parent process also thinks that
+ // the descriptors are closed (while they are actually not).
+ // This leads to false positives due to missed synchronization.
+ // Strictly saying this is undefined behavior, because vfork child is not
+ // allowed to call any functions other than exec/exit. But this is what
+ // openjdk does, so we want to handle it.
+ // We could disable interceptors in the child process. But it's not possible
+ // to simply intercept and wrap vfork, because vfork child is not allowed
+ // to return from the function that calls vfork, and that's exactly what
+ // we would do. So this would require some assembly trickery as well.
+ // Instead we simply turn vfork into fork.
+ return WRAP(fork)(fake);
+}
+
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data);
+struct dl_iterate_phdr_data {
+ ThreadState *thr;
+ uptr pc;
+ dl_iterate_phdr_cb_t cb;
+ void *data;
+};
+
+static bool IsAppNotRodata(uptr addr) {
+ return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata;
+}
+
+static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size,
+ void *data) {
+ dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data;
+ // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later
+ // accessible in dl_iterate_phdr callback. But we don't see synchronization
+ // inside of dynamic linker, so we "unpoison" it here in order to not
+ // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough
+ // because some libc functions call __libc_dlopen.
+ if (info && IsAppNotRodata((uptr)info->dlpi_name))
+ MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
+ internal_strlen(info->dlpi_name));
+ int res = cbdata->cb(info, size, cbdata->data);
+ // Perform the check one more time in case info->dlpi_name was overwritten
+ // by user callback.
+ if (info && IsAppNotRodata((uptr)info->dlpi_name))
+ MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name,
+ internal_strlen(info->dlpi_name));
+ return res;
+}
+
+TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) {
+ SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data);
+ dl_iterate_phdr_data cbdata;
+ cbdata.thr = thr;
+ cbdata.pc = pc;
+ cbdata.cb = cb;
+ cbdata.data = data;
+ int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata);
+ return res;
+}
+#endif
+
+static int OnExit(ThreadState *thr) {
+ int status = Finalize(thr);
+ FlushStreams();
+ return status;
+}
+
+struct TsanInterceptorContext {
+ ThreadState *thr;
+ const uptr caller_pc;
+ const uptr pc;
+};
+
+#if !SANITIZER_MAC
+static void HandleRecvmsg(ThreadState *thr, uptr pc,
+ __sanitizer_msghdr *msg) {
+ int fds[64];
+ int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds));
+ for (int i = 0; i < cnt; i++)
+ FdEventCreate(thr, pc, fds[i]);
+}
+#endif
+
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+// Causes interceptor recursion (getaddrinfo() and fopen())
+#undef SANITIZER_INTERCEPT_GETADDRINFO
+// We define our own.
+#if SANITIZER_INTERCEPT_TLS_GET_ADDR
+#define NEED_TLS_GET_ADDR
+#endif
+#undef SANITIZER_INTERCEPT_TLS_GET_ADDR
+#undef SANITIZER_INTERCEPT_PTHREAD_SIGMASK
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \
+ INTERCEPT_FUNCTION_VER(name, ver)
+
+#define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \
+ true)
+
+#define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \
+ MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \
+ ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \
+ false)
+
+#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \
+ SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+
+#define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \
+ ctx = (void *)&_ctx; \
+ (void) ctx;
+
+#define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \
+ if (path) \
+ Acquire(thr, pc, File2addr(path)); \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdFileCreate(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \
+ if (file) { \
+ int fd = fileno_unlocked(file); \
+ if (fd >= 0) FdClose(thr, pc, fd); \
+ }
+
+#define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \
+ libignore()->OnLibraryLoaded(filename)
+
+#define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \
+ libignore()->OnLibraryUnloaded()
+
+#define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \
+ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u)
+
+#define COMMON_INTERCEPTOR_RELEASE(ctx, u) \
+ Release(((TsanInterceptorContext *) ctx)->thr, pc, u)
+
+#define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \
+ Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path))
+
+#define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \
+ FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \
+ FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \
+ FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd)
+
+#define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \
+ FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd)
+
+#define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \
+ ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name)
+
+#define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \
+ __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name)
+
+#define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name)
+
+#define COMMON_INTERCEPTOR_ON_EXIT(ctx) \
+ OnExit(((TsanInterceptorContext *) ctx)->thr)
+
+#define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \
+ MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \
+ MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \
+ MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \
+ MutexRepair(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \
+ MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, (uptr)m)
+
+#define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \
+ off) \
+ do { \
+ return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \
+ off); \
+ } while (false)
+
+#if !SANITIZER_MAC
+#define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \
+ HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \
+ ((TsanInterceptorContext *)ctx)->pc, msg)
+#endif
+
+#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \
+ if (TsanThread *t = GetCurrentThread()) { \
+ *begin = t->tls_begin(); \
+ *end = t->tls_end(); \
+ } else { \
+ *begin = *end = 0; \
+ }
+
+#define COMMON_INTERCEPTOR_USER_CALLBACK_START() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START()
+
+#define COMMON_INTERCEPTOR_USER_CALLBACK_END() \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END()
+
+#include "sanitizer_common/sanitizer_common_interceptors.inc"
+
+static int sigaction_impl(int sig, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *old);
+static __sanitizer_sighandler_ptr signal_impl(int sig,
+ __sanitizer_sighandler_ptr h);
+
+#define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \
+ { return sigaction_impl(signo, act, oldact); }
+
+#define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \
+ { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); }
+
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+int sigaction_impl(int sig, const __sanitizer_sigaction *act,
+ __sanitizer_sigaction *old) {
+ // Note: if we call REAL(sigaction) directly for any reason without proxying
+ // the signal handler through rtl_sigaction, very bad things will happen.
+ // The handler will run synchronously and corrupt tsan per-thread state.
+ SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old);
+ __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions;
+ __sanitizer_sigaction old_stored;
+ if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored));
+ __sanitizer_sigaction newact;
+ if (act) {
+ // Copy act into sigactions[sig].
+ // Can't use struct copy, because compiler can emit call to memcpy.
+ // Can't use internal_memcpy, because it copies byte-by-byte,
+ // and signal handler reads the handler concurrently. It it can read
+ // some bytes from old value and some bytes from new value.
+ // Use volatile to prevent insertion of memcpy.
+ sigactions[sig].handler =
+ *(volatile __sanitizer_sighandler_ptr const *)&act->handler;
+ sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags;
+ internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask,
+ sizeof(sigactions[sig].sa_mask));
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD
+ sigactions[sig].sa_restorer = act->sa_restorer;
+#endif
+ internal_memcpy(&newact, act, sizeof(newact));
+ internal_sigfillset(&newact.sa_mask);
+ if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) {
+ if (newact.sa_flags & SA_SIGINFO)
+ newact.sigaction = rtl_sigaction;
+ else
+ newact.handler = rtl_sighandler;
+ }
+ ReleaseStore(thr, pc, (uptr)&sigactions[sig]);
+ act = &newact;
+ }
+ int res = REAL(sigaction)(sig, act, old);
+ if (res == 0 && old) {
+ uptr cb = (uptr)old->sigaction;
+ if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) {
+ internal_memcpy(old, &old_stored, sizeof(*old));
+ }
+ }
+ return res;
+}
+
+static __sanitizer_sighandler_ptr signal_impl(int sig,
+ __sanitizer_sighandler_ptr h) {
+ __sanitizer_sigaction act;
+ act.handler = h;
+ internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask));
+ act.sa_flags = 0;
+ __sanitizer_sigaction old;
+ int res = sigaction_symname(sig, &act, &old);
+ if (res) return (__sanitizer_sighandler_ptr)sig_err;
+ return old.handler;
+}
+
+#define TSAN_SYSCALL() \
+ ThreadState *thr = cur_thread(); \
+ if (thr->ignore_interceptors) \
+ return; \
+ ScopedSyscall scoped_syscall(thr) \
+/**/
+
+struct ScopedSyscall {
+ ThreadState *thr;
+
+ explicit ScopedSyscall(ThreadState *thr)
+ : thr(thr) {
+ Initialize(thr);
+ }
+
+ ~ScopedSyscall() {
+ ProcessPendingSignals(thr);
+ }
+};
+
+#if !SANITIZER_FREEBSD && !SANITIZER_MAC
+static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) {
+ TSAN_SYSCALL();
+ MemoryAccessRange(thr, pc, p, s, write);
+}
+
+static void syscall_acquire(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ Acquire(thr, pc, addr);
+ DPrintf("syscall_acquire(%p)\n", addr);
+}
+
+static void syscall_release(uptr pc, uptr addr) {
+ TSAN_SYSCALL();
+ DPrintf("syscall_release(%p)\n", addr);
+ Release(thr, pc, addr);
+}
+
+static void syscall_fd_close(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ FdClose(thr, pc, fd);
+}
+
+static USED void syscall_fd_acquire(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ FdAcquire(thr, pc, fd);
+ DPrintf("syscall_fd_acquire(%p)\n", fd);
+}
+
+static USED void syscall_fd_release(uptr pc, int fd) {
+ TSAN_SYSCALL();
+ DPrintf("syscall_fd_release(%p)\n", fd);
+ FdRelease(thr, pc, fd);
+}
+
+static void syscall_pre_fork(uptr pc) {
+ TSAN_SYSCALL();
+ ForkBefore(thr, pc);
+}
+
+static void syscall_post_fork(uptr pc, int pid) {
+ TSAN_SYSCALL();
+ if (pid == 0) {
+ // child
+ ForkChildAfter(thr, pc);
+ FdOnFork(thr, pc);
+ } else if (pid > 0) {
+ // parent
+ ForkParentAfter(thr, pc);
+ } else {
+ // error
+ ForkParentAfter(thr, pc);
+ }
+}
+#endif
+
+#define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \
+ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false)
+
+#define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \
+ syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true)
+
+#define COMMON_SYSCALL_POST_READ_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+
+#define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \
+ do { \
+ (void)(p); \
+ (void)(s); \
+ } while (false)
+
+#define COMMON_SYSCALL_ACQUIRE(addr) \
+ syscall_acquire(GET_CALLER_PC(), (uptr)(addr))
+
+#define COMMON_SYSCALL_RELEASE(addr) \
+ syscall_release(GET_CALLER_PC(), (uptr)(addr))
+
+#define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd)
+
+#define COMMON_SYSCALL_PRE_FORK() \
+ syscall_pre_fork(GET_CALLER_PC())
+
+#define COMMON_SYSCALL_POST_FORK(res) \
+ syscall_post_fork(GET_CALLER_PC(), res)
+
+#include "sanitizer_common/sanitizer_common_syscalls.inc"
+#include "sanitizer_common/sanitizer_syscalls_netbsd.inc"
+
+#ifdef NEED_TLS_GET_ADDR
+// Define own interceptor instead of sanitizer_common's for three reasons:
+// 1. It must not process pending signals.
+// Signal handlers may contain MOVDQA instruction (see below).
+// 2. It must be as simple as possible to not contain MOVDQA.
+// 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which
+// is empty for tsan (meant only for msan).
+// Note: __tls_get_addr can be called with mis-aligned stack due to:
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066
+// So the interceptor must work with mis-aligned stack, in particular, does not
+// execute MOVDQA with stack addresses.
+TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) {
+ void *res = REAL(__tls_get_addr)(arg);
+ ThreadState *thr = cur_thread();
+ if (!thr)
+ return res;
+ DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr,
+ thr->tls_addr + thr->tls_size);
+ if (!dtv)
+ return res;
+ // New DTLS block has been allocated.
+ MemoryResetRange(thr, 0, dtv->beg, dtv->size);
+ return res;
+}
+#endif
+
+#if SANITIZER_NETBSD
+TSAN_INTERCEPTOR(void, _lwp_exit) {
+ SCOPED_TSAN_INTERCEPTOR(_lwp_exit);
+ DestroyThreadState();
+ REAL(_lwp_exit)();
+}
+#define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT__LWP_EXIT
+#endif
+
+#if SANITIZER_FREEBSD
+TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) {
+ SCOPED_TSAN_INTERCEPTOR(thr_exit, state);
+ DestroyThreadState();
+ REAL(thr_exit(state));
+}
+#define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit)
+#else
+#define TSAN_MAYBE_INTERCEPT_THR_EXIT
+#endif
+
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m)
+TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)())
+TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b,
+ void *c)
+
+namespace __tsan {
+
+static void finalize(void *arg) {
+ ThreadState *thr = cur_thread();
+ int status = Finalize(thr);
+ // Make sure the output is not lost.
+ FlushStreams();
+ if (status)
+ Die();
+}
+
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+static void unreachable() {
+ Report("FATAL: ThreadSanitizer: unreachable called\n");
+ Die();
+}
+#endif
+
+// Define default implementation since interception of libdispatch is optional.
+SANITIZER_WEAK_ATTRIBUTE void InitializeLibdispatchInterceptors() {}
+
+void InitializeInterceptors() {
+#if !SANITIZER_MAC
+ // We need to setup it early, because functions like dlsym() can call it.
+ REAL(memset) = internal_memset;
+ REAL(memcpy) = internal_memcpy;
+#endif
+
+ // Instruct libc malloc to consume less memory.
+#if SANITIZER_LINUX
+ mallopt(1, 0); // M_MXFAST
+ mallopt(-3, 32*1024); // M_MMAP_THRESHOLD
+#endif
+
+ new(interceptor_ctx()) InterceptorContext();
+
+ InitializeCommonInterceptors();
+ InitializeSignalInterceptors();
+ InitializeLibdispatchInterceptors();
+
+#if !SANITIZER_MAC
+ // We can not use TSAN_INTERCEPT to get setjmp addr,
+ // because it does &setjmp and setjmp is not present in some versions of libc.
+ using __interception::InterceptFunction;
+ InterceptFunction(TSAN_STRING_SETJMP, (uptr*)&REAL(setjmp_symname), 0, 0);
+ InterceptFunction("_setjmp", (uptr*)&REAL(_setjmp), 0, 0);
+ InterceptFunction(TSAN_STRING_SIGSETJMP, (uptr*)&REAL(sigsetjmp_symname), 0,
+ 0);
+#if !SANITIZER_NETBSD
+ InterceptFunction("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0);
+#endif
+#endif
+
+ TSAN_INTERCEPT(longjmp_symname);
+ TSAN_INTERCEPT(siglongjmp_symname);
+#if SANITIZER_NETBSD
+ TSAN_INTERCEPT(_longjmp);
+#endif
+
+ TSAN_INTERCEPT(malloc);
+ TSAN_INTERCEPT(__libc_memalign);
+ TSAN_INTERCEPT(calloc);
+ TSAN_INTERCEPT(realloc);
+ TSAN_INTERCEPT(reallocarray);
+ TSAN_INTERCEPT(free);
+ TSAN_INTERCEPT(cfree);
+ TSAN_INTERCEPT(munmap);
+ TSAN_MAYBE_INTERCEPT_MEMALIGN;
+ TSAN_INTERCEPT(valloc);
+ TSAN_MAYBE_INTERCEPT_PVALLOC;
+ TSAN_INTERCEPT(posix_memalign);
+
+ TSAN_INTERCEPT(strcpy); // NOLINT
+ TSAN_INTERCEPT(strncpy);
+ TSAN_INTERCEPT(strdup);
+
+ TSAN_INTERCEPT(pthread_create);
+ TSAN_INTERCEPT(pthread_join);
+ TSAN_INTERCEPT(pthread_detach);
+ TSAN_INTERCEPT(pthread_exit);
+ #if SANITIZER_LINUX
+ TSAN_INTERCEPT(pthread_tryjoin_np);
+ TSAN_INTERCEPT(pthread_timedjoin_np);
+ #endif
+
+ TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE);
+ TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE);
+
+ TSAN_INTERCEPT(pthread_mutex_init);
+ TSAN_INTERCEPT(pthread_mutex_destroy);
+ TSAN_INTERCEPT(pthread_mutex_trylock);
+ TSAN_INTERCEPT(pthread_mutex_timedlock);
+
+ TSAN_INTERCEPT(pthread_spin_init);
+ TSAN_INTERCEPT(pthread_spin_destroy);
+ TSAN_INTERCEPT(pthread_spin_lock);
+ TSAN_INTERCEPT(pthread_spin_trylock);
+ TSAN_INTERCEPT(pthread_spin_unlock);
+
+ TSAN_INTERCEPT(pthread_rwlock_init);
+ TSAN_INTERCEPT(pthread_rwlock_destroy);
+ TSAN_INTERCEPT(pthread_rwlock_rdlock);
+ TSAN_INTERCEPT(pthread_rwlock_tryrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedrdlock);
+ TSAN_INTERCEPT(pthread_rwlock_wrlock);
+ TSAN_INTERCEPT(pthread_rwlock_trywrlock);
+ TSAN_INTERCEPT(pthread_rwlock_timedwrlock);
+ TSAN_INTERCEPT(pthread_rwlock_unlock);
+
+ TSAN_INTERCEPT(pthread_barrier_init);
+ TSAN_INTERCEPT(pthread_barrier_destroy);
+ TSAN_INTERCEPT(pthread_barrier_wait);
+
+ TSAN_INTERCEPT(pthread_once);
+
+ TSAN_INTERCEPT(fstat);
+ TSAN_MAYBE_INTERCEPT___FXSTAT;
+ TSAN_MAYBE_INTERCEPT_FSTAT64;
+ TSAN_MAYBE_INTERCEPT___FXSTAT64;
+ TSAN_INTERCEPT(open);
+ TSAN_MAYBE_INTERCEPT_OPEN64;
+ TSAN_INTERCEPT(creat);
+ TSAN_MAYBE_INTERCEPT_CREAT64;
+ TSAN_INTERCEPT(dup);
+ TSAN_INTERCEPT(dup2);
+ TSAN_INTERCEPT(dup3);
+ TSAN_MAYBE_INTERCEPT_EVENTFD;
+ TSAN_MAYBE_INTERCEPT_SIGNALFD;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT;
+ TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1;
+ TSAN_INTERCEPT(socket);
+ TSAN_INTERCEPT(socketpair);
+ TSAN_INTERCEPT(connect);
+ TSAN_INTERCEPT(bind);
+ TSAN_INTERCEPT(listen);
+ TSAN_MAYBE_INTERCEPT_EPOLL;
+ TSAN_INTERCEPT(close);
+ TSAN_MAYBE_INTERCEPT___CLOSE;
+ TSAN_MAYBE_INTERCEPT___RES_ICLOSE;
+ TSAN_INTERCEPT(pipe);
+ TSAN_INTERCEPT(pipe2);
+
+ TSAN_INTERCEPT(unlink);
+ TSAN_INTERCEPT(tmpfile);
+ TSAN_MAYBE_INTERCEPT_TMPFILE64;
+ TSAN_INTERCEPT(abort);
+ TSAN_INTERCEPT(rmdir);
+ TSAN_INTERCEPT(closedir);
+
+ TSAN_INTERCEPT(sigsuspend);
+ TSAN_INTERCEPT(sigblock);
+ TSAN_INTERCEPT(sigsetmask);
+ TSAN_INTERCEPT(pthread_sigmask);
+ TSAN_INTERCEPT(raise);
+ TSAN_INTERCEPT(kill);
+ TSAN_INTERCEPT(pthread_kill);
+ TSAN_INTERCEPT(sleep);
+ TSAN_INTERCEPT(usleep);
+ TSAN_INTERCEPT(nanosleep);
+ TSAN_INTERCEPT(pause);
+ TSAN_INTERCEPT(gettimeofday);
+ TSAN_INTERCEPT(getaddrinfo);
+
+ TSAN_INTERCEPT(fork);
+ TSAN_INTERCEPT(vfork);
+#if !SANITIZER_ANDROID
+ TSAN_INTERCEPT(dl_iterate_phdr);
+#endif
+ TSAN_MAYBE_INTERCEPT_ON_EXIT;
+ TSAN_INTERCEPT(__cxa_atexit);
+ TSAN_INTERCEPT(_exit);
+
+#ifdef NEED_TLS_GET_ADDR
+ TSAN_INTERCEPT(__tls_get_addr);
+#endif
+
+ TSAN_MAYBE_INTERCEPT__LWP_EXIT;
+ TSAN_MAYBE_INTERCEPT_THR_EXIT;
+
+#if !SANITIZER_MAC && !SANITIZER_ANDROID
+ // Need to setup it, because interceptors check that the function is resolved.
+ // But atexit is emitted directly into the module, so can't be resolved.
+ REAL(atexit) = (int(*)(void(*)()))unreachable;
+#endif
+
+ if (REAL(__cxa_atexit)(&finalize, 0, 0)) {
+ Printf("ThreadSanitizer: failed to setup atexit callback\n");
+ Die();
+ }
+
+#if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) {
+ Printf("ThreadSanitizer: failed to create thread key\n");
+ Die();
+ }
+#endif
+
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once);
+ TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask);
+
+ FdInit();
+}
+
+} // namespace __tsan
+
+// Invisible barrier for tests.
+// There were several unsuccessful iterations for this functionality:
+// 1. Initially it was implemented in user code using
+// REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on
+// MacOS. Futexes are linux-specific for this matter.
+// 2. Then we switched to atomics+usleep(10). But usleep produced parasitic
+// "as-if synchronized via sleep" messages in reports which failed some
+// output tests.
+// 3. Then we switched to atomics+sched_yield. But this produced tons of tsan-
+// visible events, which lead to "failed to restore stack trace" failures.
+// Note that no_sanitize_thread attribute does not turn off atomic interception
+// so attaching it to the function defined in user code does not help.
+// That's why we now have what we have.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_testonly_barrier_init(u64 *barrier, u32 count) {
+ if (count >= (1 << 8)) {
+ Printf("barrier_init: count is too large (%d)\n", count);
+ Die();
+ }
+ // 8 lsb is thread count, the remaining are count of entered threads.
+ *barrier = count;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_testonly_barrier_wait(u64 *barrier) {
+ unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED);
+ unsigned old_epoch = (old >> 8) / (old & 0xff);
+ for (;;) {
+ unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED);
+ unsigned cur_epoch = (cur >> 8) / (cur & 0xff);
+ if (cur_epoch != old_epoch)
+ return;
+ internal_sched_yield();
+ }
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors.h (revision 351984)
@@ -0,0 +1,76 @@
+#ifndef TSAN_INTERCEPTORS_H
+#define TSAN_INTERCEPTORS_H
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+class ScopedInterceptor {
+ public:
+ ScopedInterceptor(ThreadState *thr, const char *fname, uptr pc);
+ ~ScopedInterceptor();
+ void DisableIgnores();
+ void EnableIgnores();
+ private:
+ ThreadState *const thr_;
+ const uptr pc_;
+ bool in_ignored_lib_;
+ bool ignoring_;
+};
+
+LibIgnore *libignore();
+
+#if !SANITIZER_GO
+INLINE bool in_symbolizer() {
+ cur_thread_init();
+ return UNLIKELY(cur_thread()->in_symbolizer);
+}
+#endif
+
+} // namespace __tsan
+
+#define SCOPED_INTERCEPTOR_RAW(func, ...) \
+ cur_thread_init(); \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = GET_CALLER_PC(); \
+ ScopedInterceptor si(thr, #func, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc; \
+/**/
+
+#define SCOPED_TSAN_INTERCEPTOR(func, ...) \
+ SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \
+ if (REAL(func) == 0) { \
+ Report("FATAL: ThreadSanitizer: failed to intercept %s\n", #func); \
+ Die(); \
+ } \
+ if (!thr->is_inited || thr->ignore_interceptors || thr->in_ignored_lib) \
+ return REAL(func)(__VA_ARGS__); \
+/**/
+
+#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() \
+ si.DisableIgnores();
+
+#define SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() \
+ si.EnableIgnores();
+
+#define TSAN_INTERCEPTOR(ret, func, ...) INTERCEPTOR(ret, func, __VA_ARGS__)
+
+#if SANITIZER_NETBSD
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func));
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func));
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...) \
+ TSAN_INTERCEPTOR(ret, __libc_thr_##func, __VA_ARGS__) \
+ ALIAS(WRAPPER_NAME(pthread_##func2));
+#else
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS(ret, func, ...)
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(ret, func, ...)
+# define TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(ret, func, func2, ...)
+#endif
+
+#endif // TSAN_INTERCEPTORS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors_mac.cc (revision 351984)
@@ -0,0 +1,479 @@
+//===-- tsan_interceptors_mac.cc ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific interceptors.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+#include "sanitizer_common/sanitizer_addrhashmap.h"
+
+#include <errno.h>
+#include <libkern/OSAtomic.h>
+#include <objc/objc-sync.h>
+#include <sys/ucontext.h>
+
+#if defined(__has_include) && __has_include(<xpc/xpc.h>)
+#include <xpc/xpc.h>
+#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+
+typedef long long_t; // NOLINT
+
+extern "C" {
+int getcontext(ucontext_t *ucp) __attribute__((returns_twice));
+int setcontext(const ucontext_t *ucp);
+}
+
+namespace __tsan {
+
+// The non-barrier versions of OSAtomic* functions are semantically mo_relaxed,
+// but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are
+// actually aliases of each other, and we cannot have different interceptors for
+// them, because they're actually the same function. Thus, we have to stay
+// conservative and treat the non-barrier versions as mo_acq_rel.
+static const morder kMacOrderBarrier = mo_acq_rel;
+static const morder kMacOrderNonBarrier = mo_acq_rel;
+
+#define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \
+ }
+
+#define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \
+ }
+
+#define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \
+ }
+
+#define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \
+ mo) \
+ TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, ptr); \
+ return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \
+ }
+
+#define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \
+ m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderBarrier) \
+ m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \
+ kMacOrderBarrier)
+
+#define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \
+ m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \
+ kMacOrderNonBarrier) \
+ m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \
+ __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier)
+
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add,
+ OSATOMIC_INTERCEPTOR_PLUS_X)
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add,
+ OSATOMIC_INTERCEPTOR_PLUS_1)
+OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub,
+ OSATOMIC_INTERCEPTOR_MINUS_1)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X,
+ OSATOMIC_INTERCEPTOR)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and,
+ OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
+OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor,
+ OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR)
+
+#define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \
+ TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderNonBarrier, kMacOrderNonBarrier); \
+ } \
+ \
+ TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \
+ t volatile *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \
+ return tsan_atomic_f##_compare_exchange_strong( \
+ (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \
+ kMacOrderBarrier, kMacOrderNonBarrier); \
+ }
+
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64,
+ long_t)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64,
+ void *)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32,
+ int32_t)
+OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64,
+ int64_t)
+
+#define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \
+ TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \
+ SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \
+ volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \
+ char bit = 0x80u >> (n & 7); \
+ char mask = clear ? ~bit : bit; \
+ char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \
+ return orig_byte & bit; \
+ }
+
+#define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \
+ OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \
+ OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier)
+
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false)
+OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and,
+ true)
+
+TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset);
+ __tsan_release(item);
+ REAL(OSAtomicEnqueue)(list, item, offset);
+}
+
+TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset);
+ void *item = REAL(OSAtomicDequeue)(list, offset);
+ if (item) __tsan_acquire(item);
+ return item;
+}
+
+// OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X.
+#if !SANITIZER_IOS
+
+TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset);
+ __tsan_release(item);
+ REAL(OSAtomicFifoEnqueue)(list, item, offset);
+}
+
+TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list,
+ size_t offset) {
+ SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset);
+ void *item = REAL(OSAtomicFifoDequeue)(list, offset);
+ if (item) __tsan_acquire(item);
+ return item;
+}
+
+#endif
+
+TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(OSSpinLockLock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock);
+ REAL(OSSpinLockLock)(lock);
+ Acquire(thr, pc, (uptr)lock);
+}
+
+TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(OSSpinLockTry)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock);
+ bool result = REAL(OSSpinLockTry)(lock);
+ if (result)
+ Acquire(thr, pc, (uptr)lock);
+ return result;
+}
+
+TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(OSSpinLockUnlock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock);
+ Release(thr, pc, (uptr)lock);
+ REAL(OSSpinLockUnlock)(lock);
+}
+
+TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(os_lock_lock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock);
+ REAL(os_lock_lock)(lock);
+ Acquire(thr, pc, (uptr)lock);
+}
+
+TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(os_lock_trylock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock);
+ bool result = REAL(os_lock_trylock)(lock);
+ if (result)
+ Acquire(thr, pc, (uptr)lock);
+ return result;
+}
+
+TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) {
+ CHECK(!cur_thread()->is_dead);
+ if (!cur_thread()->is_inited) {
+ return REAL(os_lock_unlock)(lock);
+ }
+ SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock);
+ Release(thr, pc, (uptr)lock);
+ REAL(os_lock_unlock)(lock);
+}
+
+#if defined(__has_include) && __has_include(<xpc/xpc.h>)
+
+TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler,
+ xpc_connection_t connection, xpc_handler_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection,
+ handler);
+ Release(thr, pc, (uptr)connection);
+ xpc_handler_t new_handler = ^(xpc_object_t object) {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ handler(object);
+ };
+ REAL(xpc_connection_set_event_handler)(connection, new_handler);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection,
+ dispatch_block_t barrier) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier);
+ Release(thr, pc, (uptr)connection);
+ dispatch_block_t new_barrier = ^() {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ barrier();
+ };
+ REAL(xpc_connection_send_barrier)(connection, new_barrier);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply,
+ xpc_connection_t connection, xpc_object_t message,
+ dispatch_queue_t replyq, xpc_handler_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection,
+ message, replyq, handler);
+ Release(thr, pc, (uptr)connection);
+ xpc_handler_t new_handler = ^(xpc_object_t object) {
+ {
+ SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply);
+ Acquire(thr, pc, (uptr)connection);
+ }
+ handler(object);
+ };
+ REAL(xpc_connection_send_message_with_reply)
+ (connection, message, replyq, new_handler);
+}
+
+TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) {
+ SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection);
+ Release(thr, pc, (uptr)connection);
+ REAL(xpc_connection_cancel)(connection);
+}
+
+#endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>)
+
+// Determines whether the Obj-C object pointer is a tagged pointer. Tagged
+// pointers encode the object data directly in their pointer bits and do not
+// have an associated memory allocation. The Obj-C runtime uses tagged pointers
+// to transparently optimize small objects.
+static bool IsTaggedObjCPointer(id obj) {
+ const uptr kPossibleTaggedBits = 0x8000000000000001ull;
+ return ((uptr)obj & kPossibleTaggedBits) != 0;
+}
+
+// Returns an address which can be used to inform TSan about synchronization
+// points (MutexLock/Unlock). The TSan infrastructure expects this to be a valid
+// address in the process space. We do a small allocation here to obtain a
+// stable address (the array backing the hash map can change). The memory is
+// never free'd (leaked) and allocation and locking are slow, but this code only
+// runs for @synchronized with tagged pointers, which is very rare.
+static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) {
+ typedef AddrHashMap<uptr, 5> Map;
+ static Map Addresses;
+ Map::Handle h(&Addresses, addr);
+ if (h.created()) {
+ ThreadIgnoreBegin(thr, pc);
+ *h = (uptr) user_alloc(thr, pc, /*size=*/1);
+ ThreadIgnoreEnd(thr, pc);
+ }
+ return *h;
+}
+
+// Returns an address on which we can synchronize given an Obj-C object pointer.
+// For normal object pointers, this is just the address of the object in memory.
+// Tagged pointers are not backed by an actual memory allocation, so we need to
+// synthesize a valid address.
+static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) {
+ if (IsTaggedObjCPointer(obj))
+ return GetOrCreateSyncAddress((uptr)obj, thr, pc);
+ return (uptr)obj;
+}
+
+TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) {
+ SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj);
+ if (!obj) return REAL(objc_sync_enter)(obj);
+ uptr addr = SyncAddressForObjCObject(obj, thr, pc);
+ MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant);
+ int result = REAL(objc_sync_enter)(obj);
+ CHECK_EQ(result, OBJC_SYNC_SUCCESS);
+ MutexPostLock(thr, pc, addr, MutexFlagWriteReentrant);
+ return result;
+}
+
+TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) {
+ SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj);
+ if (!obj) return REAL(objc_sync_exit)(obj);
+ uptr addr = SyncAddressForObjCObject(obj, thr, pc);
+ MutexUnlock(thr, pc, addr);
+ int result = REAL(objc_sync_exit)(obj);
+ if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr);
+ return result;
+}
+
+TSAN_INTERCEPTOR(int, swapcontext, ucontext_t *oucp, const ucontext_t *ucp) {
+ {
+ SCOPED_INTERCEPTOR_RAW(swapcontext, oucp, ucp);
+ }
+ // Bacause of swapcontext() semantics we have no option but to copy its
+ // impementation here
+ if (!oucp || !ucp) {
+ errno = EINVAL;
+ return -1;
+ }
+ ThreadState *thr = cur_thread();
+ const int UCF_SWAPPED = 0x80000000;
+ oucp->uc_onstack &= ~UCF_SWAPPED;
+ thr->ignore_interceptors++;
+ int ret = getcontext(oucp);
+ if (!(oucp->uc_onstack & UCF_SWAPPED)) {
+ thr->ignore_interceptors--;
+ if (!ret) {
+ oucp->uc_onstack |= UCF_SWAPPED;
+ ret = setcontext(ucp);
+ }
+ }
+ return ret;
+}
+
+// On macOS, libc++ is always linked dynamically, so intercepting works the
+// usual way.
+#define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR
+
+namespace {
+struct fake_shared_weak_count {
+ volatile a64 shared_owners;
+ volatile a64 shared_weak_owners;
+ virtual void _unused_0x0() = 0;
+ virtual void _unused_0x8() = 0;
+ virtual void on_zero_shared() = 0;
+ virtual void _unused_0x18() = 0;
+ virtual void on_zero_shared_weak() = 0;
+};
+} // namespace
+
+// The following code adds libc++ interceptors for:
+// void __shared_weak_count::__release_shared() _NOEXCEPT;
+// bool __shared_count::__release_shared() _NOEXCEPT;
+// Shared and weak pointers in C++ maintain reference counts via atomics in
+// libc++.dylib, which are TSan-invisible, and this leads to false positives in
+// destructor code. These interceptors re-implements the whole functions so that
+// the mo_acq_rel semantics of the atomic decrement are visible.
+//
+// Unfortunately, the interceptors cannot simply Acquire/Release some sync
+// object and call the original function, because it would have a race between
+// the sync and the destruction of the object. Calling both under a lock will
+// not work because the destructor can invoke this interceptor again (and even
+// in a different thread, so recursive locks don't help).
+
+STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv,
+ fake_shared_weak_count *o) {
+ if (!flags()->shared_ptr_interceptor)
+ return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o);
+
+ SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv,
+ o);
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+ Acquire(thr, pc, (uptr)&o->shared_owners);
+ o->on_zero_shared();
+ if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) ==
+ 0) {
+ Acquire(thr, pc, (uptr)&o->shared_weak_owners);
+ o->on_zero_shared_weak();
+ }
+ }
+}
+
+STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv,
+ fake_shared_weak_count *o) {
+ if (!flags()->shared_ptr_interceptor)
+ return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o);
+
+ SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o);
+ if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) {
+ Acquire(thr, pc, (uptr)&o->shared_owners);
+ o->on_zero_shared();
+ return true;
+ }
+ return false;
+}
+
+namespace {
+struct call_once_callback_args {
+ void (*orig_func)(void *arg);
+ void *orig_arg;
+ void *flag;
+};
+
+void call_once_callback_wrapper(void *arg) {
+ call_once_callback_args *new_args = (call_once_callback_args *)arg;
+ new_args->orig_func(new_args->orig_arg);
+ __tsan_release(new_args->flag);
+}
+} // namespace
+
+// This adds a libc++ interceptor for:
+// void __call_once(volatile unsigned long&, void*, void(*)(void*));
+// C++11 call_once is implemented via an internal function __call_once which is
+// inside libc++.dylib, and the atomic release store inside it is thus
+// TSan-invisible. To avoid false positives, this interceptor wraps the callback
+// function and performs an explicit Release after the user code has run.
+STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag,
+ void *arg, void (*func)(void *arg)) {
+ call_once_callback_args new_args = {func, arg, flag};
+ REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args,
+ call_once_callback_wrapper);
+}
+
+} // namespace __tsan
+
+#endif // SANITIZER_MAC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interceptors_mac.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface.cc (revision 351984)
@@ -0,0 +1,160 @@
+//===-- tsan_interface.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface.h"
+#include "tsan_interface_ann.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan; // NOLINT
+
+typedef u16 uint16_t;
+typedef u32 uint32_t;
+typedef u64 uint64_t;
+
+void __tsan_init() {
+ cur_thread_init();
+ Initialize(cur_thread());
+}
+
+void __tsan_flush_memory() {
+ FlushShadowMemory();
+}
+
+void __tsan_read16(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
+}
+
+void __tsan_write16(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr + 8, kSizeLog8);
+}
+
+void __tsan_read16_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+}
+
+void __tsan_write16_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr + 8, kSizeLog8);
+}
+
+// __tsan_unaligned_read/write calls are emitted by compiler.
+
+void __tsan_unaligned_read2(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, false, false);
+}
+
+void __tsan_unaligned_read4(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, false, false);
+}
+
+void __tsan_unaligned_read8(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, false, false);
+}
+
+void __tsan_unaligned_read16(const void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, false, false);
+}
+
+void __tsan_unaligned_write2(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 2, true, false);
+}
+
+void __tsan_unaligned_write4(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 4, true, false);
+}
+
+void __tsan_unaligned_write8(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 8, true, false);
+}
+
+void __tsan_unaligned_write16(void *addr) {
+ UnalignedMemoryAccess(cur_thread(), CALLERPC, (uptr)addr, 16, true, false);
+}
+
+// __sanitizer_unaligned_load/store are for user instrumentation.
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *addr) {
+ __tsan_unaligned_read2(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *addr) {
+ __tsan_unaligned_read4(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *addr) {
+ __tsan_unaligned_read8(addr);
+ return *addr;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *addr, u16 v) {
+ __tsan_unaligned_write2(addr);
+ *addr = v;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *addr, u32 v) {
+ __tsan_unaligned_write4(addr);
+ *addr = v;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *addr, u64 v) {
+ __tsan_unaligned_write8(addr);
+ *addr = v;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_fiber() {
+ return cur_thread();
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_create_fiber(unsigned flags) {
+ return FiberCreate(cur_thread(), CALLERPC, flags);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_destroy_fiber(void *fiber) {
+ FiberDestroy(cur_thread(), CALLERPC, static_cast<ThreadState *>(fiber));
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_switch_to_fiber(void *fiber, unsigned flags) {
+ FiberSwitch(cur_thread(), CALLERPC, static_cast<ThreadState *>(fiber), flags);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_set_fiber_name(void *fiber, const char *name) {
+ ThreadSetName(static_cast<ThreadState *>(fiber), name);
+}
+} // extern "C"
+
+void __tsan_acquire(void *addr) {
+ Acquire(cur_thread(), CALLERPC, (uptr)addr);
+}
+
+void __tsan_release(void *addr) {
+ Release(cur_thread(), CALLERPC, (uptr)addr);
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface.h (revision 351984)
@@ -0,0 +1,417 @@
+//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// The functions declared in this header will be inserted by the instrumentation
+// module.
+// This header can be included by the instrumented program or by TSan tests.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_H
+#define TSAN_INTERFACE_H
+
+#include <sanitizer_common/sanitizer_internal_defs.h>
+using __sanitizer::uptr;
+using __sanitizer::tid_t;
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !SANITIZER_GO
+
+// This function should be called at the very beginning of the process,
+// before any instrumented code is executed and before any call to malloc.
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_init();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_flush_memory();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read2(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read4(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read8(const void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_read16(const void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write2(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write4(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write8(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_unaligned_write16(void *addr);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read1_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read2_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read4_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read8_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_read16_pc(void *addr, void *pc);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write1_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write2_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write4_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write8_pc(void *addr, void *pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_write16_pc(void *addr, void *pc);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_vptr_read(void **vptr_p);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_vptr_update(void **vptr_p, void *new_val);
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_entry(void *call_pc);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_func_exit();
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_begin();
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_ignore_thread_end();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_external_register_tag(const char *object_type);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_register_header(void *tag, const char *header);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_assign_tag(void *addr, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_read(void *addr, void *caller_pc, void *tag);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_external_write(void *addr, void *caller_pc, void *tag);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_read_range(void *addr, unsigned long size); // NOLINT
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_write_range(void *addr, unsigned long size); // NOLINT
+
+// User may provide function that would be called right when TSan detects
+// an error. The argument 'report' is an opaque pointer that can be used to
+// gather additional information using other TSan report API functions.
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_on_report(void *report);
+
+// If TSan is currently reporting a detected issue on the current thread,
+// returns an opaque pointer to the current report. Otherwise returns NULL.
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__tsan_get_current_report();
+
+// Returns a report's description (issue type), number of duplicate issues
+// found, counts of array data (stack traces, memory operations, locations,
+// mutexes, threads, unique thread IDs) and a stack trace of a sleep() call (if
+// one was involved in the issue).
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_data(void *report, const char **description, int *count,
+ int *stack_count, int *mop_count, int *loc_count,
+ int *mutex_count, int *thread_count,
+ int *unique_tid_count, void **sleep_trace,
+ uptr trace_size);
+
+/// Retrieves the "tag" from a report (for external-race report types). External
+/// races can be associated with a tag which give them more meaning. For example
+/// tag value '1' means "Swift access race". Tag value '0' indicated a plain
+/// external race.
+///
+/// \param report opaque pointer to the current report (obtained as argument in
+/// __tsan_on_report, or from __tsan_get_current_report)
+/// \param [out] tag points to storage that will be filled with the tag value
+///
+/// \returns non-zero value on success, zero on failure
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_tag(void *report, uptr *tag);
+
+// Returns information about stack traces included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_stack(void *report, uptr idx, void **trace,
+ uptr trace_size);
+
+// Returns information about memory operations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mop(void *report, uptr idx, int *tid, void **addr,
+ int *size, int *write, int *atomic, void **trace,
+ uptr trace_size);
+
+// Returns information about locations included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc(void *report, uptr idx, const char **type,
+ void **addr, uptr *start, uptr *size, int *tid,
+ int *fd, int *suppressable, void **trace,
+ uptr trace_size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_loc_object_type(void *report, uptr idx,
+ const char **object_type);
+
+// Returns information about mutexes included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_mutex(void *report, uptr idx, uptr *mutex_id, void **addr,
+ int *destroyed, void **trace, uptr trace_size);
+
+// Returns information about threads included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_thread(void *report, uptr idx, int *tid, tid_t *os_id,
+ int *running, const char **name, int *parent_tid,
+ void **trace, uptr trace_size);
+
+// Returns information about unique thread IDs included in the report.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_report_unique_tid(void *report, uptr idx, int *tid);
+
+// Returns the type of the pointer (heap, stack, global, ...) and if possible
+// also the starting address (e.g. of a heap allocation) and size.
+SANITIZER_INTERFACE_ATTRIBUTE
+const char *__tsan_locate_address(uptr addr, char *name, uptr name_size,
+ uptr *region_address, uptr *region_size);
+
+// Returns the allocation stack for a heap pointer.
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_get_alloc_stack(uptr addr, uptr *trace, uptr size, int *thread_id,
+ tid_t *os_id);
+
+#endif // SANITIZER_GO
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+namespace __tsan {
+
+// These should match declarations from public tsan_interface_atomic.h header.
+typedef unsigned char a8;
+typedef unsigned short a16; // NOLINT
+typedef unsigned int a32;
+typedef unsigned long long a64; // NOLINT
+#if !SANITIZER_GO && (defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)) && !defined(__mips64)
+__extension__ typedef __int128 a128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
+
+// Part of ABI, do not change.
+// https://github.com/llvm/llvm-project/blob/master/libcxx/include/atomic
+typedef enum {
+ mo_relaxed,
+ mo_consume,
+ mo_acquire,
+ mo_release,
+ mo_acq_rel,
+ mo_seq_cst
+} morder;
+
+struct ThreadState;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v, morder mo,
+ morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
+ morder mo, morder fmo);
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
+ morder mo, morder fmo);
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo);
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_thread_fence(morder mo);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_signal_fence(morder mo);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(ThreadState *thr, uptr cpc, uptr pc,
+ u8 *a);
+} // extern "C"
+
+} // namespace __tsan
+
+#endif // TSAN_INTERFACE_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_ann.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_ann.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_ann.cc (revision 351984)
@@ -0,0 +1,552 @@
+//===-- tsan_interface_ann.cc ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_interface_ann.h"
+#include "tsan_mutex.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+#include "tsan_platform.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan; // NOLINT
+
+namespace __tsan {
+
+class ScopedAnnotation {
+ public:
+ ScopedAnnotation(ThreadState *thr, const char *aname, uptr pc)
+ : thr_(thr) {
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: annotation %s()\n", thr_->tid, aname);
+ }
+
+ ~ScopedAnnotation() {
+ FuncExit(thr_);
+ CheckNoLocks(thr_);
+ }
+ private:
+ ThreadState *const thr_;
+};
+
+#define SCOPED_ANNOTATION_RET(typ, ret) \
+ if (!flags()->enable_annotations) \
+ return ret; \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = (uptr)__builtin_return_address(0); \
+ StatInc(thr, StatAnnotation); \
+ StatInc(thr, Stat##typ); \
+ ScopedAnnotation sa(thr, __func__, caller_pc); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc; \
+/**/
+
+#define SCOPED_ANNOTATION(typ) SCOPED_ANNOTATION_RET(typ, )
+
+static const int kMaxDescLen = 128;
+
+struct ExpectRace {
+ ExpectRace *next;
+ ExpectRace *prev;
+ atomic_uintptr_t hitcount;
+ atomic_uintptr_t addcount;
+ uptr addr;
+ uptr size;
+ char *file;
+ int line;
+ char desc[kMaxDescLen];
+};
+
+struct DynamicAnnContext {
+ Mutex mtx;
+ ExpectRace expect;
+ ExpectRace benign;
+
+ DynamicAnnContext()
+ : mtx(MutexTypeAnnotations, StatMtxAnnotations) {
+ }
+};
+
+static DynamicAnnContext *dyn_ann_ctx;
+static char dyn_ann_ctx_placeholder[sizeof(DynamicAnnContext)] ALIGNED(64);
+
+static void AddExpectRace(ExpectRace *list,
+ char *f, int l, uptr addr, uptr size, char *desc) {
+ ExpectRace *race = list->next;
+ for (; race != list; race = race->next) {
+ if (race->addr == addr && race->size == size) {
+ atomic_store_relaxed(&race->addcount,
+ atomic_load_relaxed(&race->addcount) + 1);
+ return;
+ }
+ }
+ race = (ExpectRace*)internal_alloc(MBlockExpectRace, sizeof(ExpectRace));
+ race->addr = addr;
+ race->size = size;
+ race->file = f;
+ race->line = l;
+ race->desc[0] = 0;
+ atomic_store_relaxed(&race->hitcount, 0);
+ atomic_store_relaxed(&race->addcount, 1);
+ if (desc) {
+ int i = 0;
+ for (; i < kMaxDescLen - 1 && desc[i]; i++)
+ race->desc[i] = desc[i];
+ race->desc[i] = 0;
+ }
+ race->prev = list;
+ race->next = list->next;
+ race->next->prev = race;
+ list->next = race;
+}
+
+static ExpectRace *FindRace(ExpectRace *list, uptr addr, uptr size) {
+ for (ExpectRace *race = list->next; race != list; race = race->next) {
+ uptr maxbegin = max(race->addr, addr);
+ uptr minend = min(race->addr + race->size, addr + size);
+ if (maxbegin < minend)
+ return race;
+ }
+ return 0;
+}
+
+static bool CheckContains(ExpectRace *list, uptr addr, uptr size) {
+ ExpectRace *race = FindRace(list, addr, size);
+ if (race == 0)
+ return false;
+ DPrintf("Hit expected/benign race: %s addr=%zx:%d %s:%d\n",
+ race->desc, race->addr, (int)race->size, race->file, race->line);
+ atomic_fetch_add(&race->hitcount, 1, memory_order_relaxed);
+ return true;
+}
+
+static void InitList(ExpectRace *list) {
+ list->next = list;
+ list->prev = list;
+}
+
+void InitializeDynamicAnnotations() {
+ dyn_ann_ctx = new(dyn_ann_ctx_placeholder) DynamicAnnContext;
+ InitList(&dyn_ann_ctx->expect);
+ InitList(&dyn_ann_ctx->benign);
+}
+
+bool IsExpectedReport(uptr addr, uptr size) {
+ ReadLock lock(&dyn_ann_ctx->mtx);
+ if (CheckContains(&dyn_ann_ctx->expect, addr, size))
+ return true;
+ if (CheckContains(&dyn_ann_ctx->benign, addr, size))
+ return true;
+ return false;
+}
+
+static void CollectMatchedBenignRaces(Vector<ExpectRace> *matched,
+ int *unique_count, int *hit_count, atomic_uintptr_t ExpectRace::*counter) {
+ ExpectRace *list = &dyn_ann_ctx->benign;
+ for (ExpectRace *race = list->next; race != list; race = race->next) {
+ (*unique_count)++;
+ const uptr cnt = atomic_load_relaxed(&(race->*counter));
+ if (cnt == 0)
+ continue;
+ *hit_count += cnt;
+ uptr i = 0;
+ for (; i < matched->Size(); i++) {
+ ExpectRace *race0 = &(*matched)[i];
+ if (race->line == race0->line
+ && internal_strcmp(race->file, race0->file) == 0
+ && internal_strcmp(race->desc, race0->desc) == 0) {
+ atomic_fetch_add(&(race0->*counter), cnt, memory_order_relaxed);
+ break;
+ }
+ }
+ if (i == matched->Size())
+ matched->PushBack(*race);
+ }
+}
+
+void PrintMatchedBenignRaces() {
+ Lock lock(&dyn_ann_ctx->mtx);
+ int unique_count = 0;
+ int hit_count = 0;
+ int add_count = 0;
+ Vector<ExpectRace> hit_matched;
+ CollectMatchedBenignRaces(&hit_matched, &unique_count, &hit_count,
+ &ExpectRace::hitcount);
+ Vector<ExpectRace> add_matched;
+ CollectMatchedBenignRaces(&add_matched, &unique_count, &add_count,
+ &ExpectRace::addcount);
+ if (hit_matched.Size()) {
+ Printf("ThreadSanitizer: Matched %d \"benign\" races (pid=%d):\n",
+ hit_count, (int)internal_getpid());
+ for (uptr i = 0; i < hit_matched.Size(); i++) {
+ Printf("%d %s:%d %s\n",
+ atomic_load_relaxed(&hit_matched[i].hitcount),
+ hit_matched[i].file, hit_matched[i].line, hit_matched[i].desc);
+ }
+ }
+ if (hit_matched.Size()) {
+ Printf("ThreadSanitizer: Annotated %d \"benign\" races, %d unique"
+ " (pid=%d):\n",
+ add_count, unique_count, (int)internal_getpid());
+ for (uptr i = 0; i < add_matched.Size(); i++) {
+ Printf("%d %s:%d %s\n",
+ atomic_load_relaxed(&add_matched[i].addcount),
+ add_matched[i].file, add_matched[i].line, add_matched[i].desc);
+ }
+ }
+}
+
+static void ReportMissedExpectedRace(ExpectRace *race) {
+ Printf("==================\n");
+ Printf("WARNING: ThreadSanitizer: missed expected data race\n");
+ Printf(" %s addr=%zx %s:%d\n",
+ race->desc, race->addr, race->file, race->line);
+ Printf("==================\n");
+}
+} // namespace __tsan
+
+using namespace __tsan; // NOLINT
+
+extern "C" {
+void INTERFACE_ATTRIBUTE AnnotateHappensBefore(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensBefore);
+ Release(thr, pc, addr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateHappensAfter(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensAfter);
+ Acquire(thr, pc, addr);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignal(char *f, int l, uptr cv) {
+ SCOPED_ANNOTATION(AnnotateCondVarSignal);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarSignalAll(char *f, int l, uptr cv) {
+ SCOPED_ANNOTATION(AnnotateCondVarSignalAll);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateMutexIsNotPHB(char *f, int l, uptr mu) {
+ SCOPED_ANNOTATION(AnnotateMutexIsNotPHB);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateCondVarWait(char *f, int l, uptr cv,
+ uptr lock) {
+ SCOPED_ANNOTATION(AnnotateCondVarWait);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreate(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockCreate);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockCreateStatic(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockCreateStatic);
+ MutexCreate(thr, pc, m, MutexFlagWriteReentrant | MutexFlagLinkerInit);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockDestroy(char *f, int l, uptr m) {
+ SCOPED_ANNOTATION(AnnotateRWLockDestroy);
+ MutexDestroy(thr, pc, m);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockAcquired(char *f, int l, uptr m,
+ uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockAcquired);
+ if (is_w)
+ MutexPostLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
+ else
+ MutexPostReadLock(thr, pc, m, MutexFlagDoPreLockOnPostLock);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateRWLockReleased(char *f, int l, uptr m,
+ uptr is_w) {
+ SCOPED_ANNOTATION(AnnotateRWLockReleased);
+ if (is_w)
+ MutexUnlock(thr, pc, m);
+ else
+ MutexReadUnlock(thr, pc, m);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateTraceMemory(char *f, int l, uptr mem) {
+ SCOPED_ANNOTATION(AnnotateTraceMemory);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateFlushState(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateFlushState);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateNewMemory(char *f, int l, uptr mem,
+ uptr size) {
+ SCOPED_ANNOTATION(AnnotateNewMemory);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateNoOp(char *f, int l, uptr mem) {
+ SCOPED_ANNOTATION(AnnotateNoOp);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateFlushExpectedRaces(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateFlushExpectedRaces);
+ Lock lock(&dyn_ann_ctx->mtx);
+ while (dyn_ann_ctx->expect.next != &dyn_ann_ctx->expect) {
+ ExpectRace *race = dyn_ann_ctx->expect.next;
+ if (atomic_load_relaxed(&race->hitcount) == 0) {
+ ctx->nmissed_expected++;
+ ReportMissedExpectedRace(race);
+ }
+ race->prev->next = race->next;
+ race->next->prev = race->prev;
+ internal_free(race);
+ }
+}
+
+void INTERFACE_ATTRIBUTE AnnotateEnableRaceDetection(
+ char *f, int l, int enable) {
+ SCOPED_ANNOTATION(AnnotateEnableRaceDetection);
+ // FIXME: Reconsider this functionality later. It may be irrelevant.
+}
+
+void INTERFACE_ATTRIBUTE AnnotateMutexIsUsedAsCondVar(
+ char *f, int l, uptr mu) {
+ SCOPED_ANNOTATION(AnnotateMutexIsUsedAsCondVar);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQGet(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQGet);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQPut(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQPut);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQDestroy(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQDestroy);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePCQCreate(
+ char *f, int l, uptr pcq) {
+ SCOPED_ANNOTATION(AnnotatePCQCreate);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateExpectRace(
+ char *f, int l, uptr mem, char *desc) {
+ SCOPED_ANNOTATION(AnnotateExpectRace);
+ Lock lock(&dyn_ann_ctx->mtx);
+ AddExpectRace(&dyn_ann_ctx->expect,
+ f, l, mem, 1, desc);
+ DPrintf("Add expected race: %s addr=%zx %s:%d\n", desc, mem, f, l);
+}
+
+static void BenignRaceImpl(
+ char *f, int l, uptr mem, uptr size, char *desc) {
+ Lock lock(&dyn_ann_ctx->mtx);
+ AddExpectRace(&dyn_ann_ctx->benign,
+ f, l, mem, size, desc);
+ DPrintf("Add benign race: %s addr=%zx %s:%d\n", desc, mem, f, l);
+}
+
+// FIXME: Turn it off later. WTF is benign race?1?? Go talk to Hans Boehm.
+void INTERFACE_ATTRIBUTE AnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr size, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, size, desc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateBenignRace(
+ char *f, int l, uptr mem, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRace);
+ BenignRaceImpl(f, l, mem, 1, desc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsBegin);
+ ThreadIgnoreBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreReadsEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreReadsEnd);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesBegin);
+ ThreadIgnoreBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreWritesEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreWritesEnd);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncBegin(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreSyncBegin);
+ ThreadIgnoreSyncBegin(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateIgnoreSyncEnd(char *f, int l) {
+ SCOPED_ANNOTATION(AnnotateIgnoreSyncEnd);
+ ThreadIgnoreSyncEnd(thr, pc);
+}
+
+void INTERFACE_ATTRIBUTE AnnotatePublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
+ SCOPED_ANNOTATION(AnnotatePublishMemoryRange);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateUnpublishMemoryRange(
+ char *f, int l, uptr addr, uptr size) {
+ SCOPED_ANNOTATION(AnnotateUnpublishMemoryRange);
+}
+
+void INTERFACE_ATTRIBUTE AnnotateThreadName(
+ char *f, int l, char *name) {
+ SCOPED_ANNOTATION(AnnotateThreadName);
+ ThreadSetName(thr, name);
+}
+
+// We deliberately omit the implementation of WTFAnnotateHappensBefore() and
+// WTFAnnotateHappensAfter(). Those are being used by Webkit to annotate
+// atomic operations, which should be handled by ThreadSanitizer correctly.
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensBefore(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensBefore);
+}
+
+void INTERFACE_ATTRIBUTE WTFAnnotateHappensAfter(char *f, int l, uptr addr) {
+ SCOPED_ANNOTATION(AnnotateHappensAfter);
+}
+
+void INTERFACE_ATTRIBUTE WTFAnnotateBenignRaceSized(
+ char *f, int l, uptr mem, uptr sz, char *desc) {
+ SCOPED_ANNOTATION(AnnotateBenignRaceSized);
+ BenignRaceImpl(f, l, mem, sz, desc);
+}
+
+int INTERFACE_ATTRIBUTE RunningOnValgrind() {
+ return flags()->running_on_valgrind;
+}
+
+double __attribute__((weak)) INTERFACE_ATTRIBUTE ValgrindSlowdown(void) {
+ return 10.0;
+}
+
+const char INTERFACE_ATTRIBUTE* ThreadSanitizerQuery(const char *query) {
+ if (internal_strcmp(query, "pure_happens_before") == 0)
+ return "1";
+ else
+ return "0";
+}
+
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsInitialized(char *f, int l, uptr mem, uptr sz) {}
+void INTERFACE_ATTRIBUTE
+AnnotateMemoryIsUninitialized(char *f, int l, uptr mem, uptr sz) {}
+
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_create(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_create);
+ MutexCreate(thr, pc, (uptr)m, flagz & MutexCreationFlagMask);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_destroy(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_destroy);
+ MutexDestroy(thr, pc, (uptr)m, flagz);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_lock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_lock);
+ if (!(flagz & MutexFlagTryLock)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPreReadLock(thr, pc, (uptr)m);
+ else
+ MutexPreLock(thr, pc, (uptr)m);
+ }
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_lock(void *m, unsigned flagz, int rec) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_lock);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+ if (!(flagz & MutexFlagTryLockFailed)) {
+ if (flagz & MutexFlagReadLock)
+ MutexPostReadLock(thr, pc, (uptr)m, flagz);
+ else
+ MutexPostLock(thr, pc, (uptr)m, flagz, rec);
+ }
+}
+
+INTERFACE_ATTRIBUTE
+int __tsan_mutex_pre_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION_RET(__tsan_mutex_pre_unlock, 0);
+ int ret = 0;
+ if (flagz & MutexFlagReadLock) {
+ CHECK(!(flagz & MutexFlagRecursiveUnlock));
+ MutexReadUnlock(thr, pc, (uptr)m);
+ } else {
+ ret = MutexUnlock(thr, pc, (uptr)m, flagz);
+ }
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+ return ret;
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_unlock(void *m, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_unlock);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_signal);
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_signal(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_signal);
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_pre_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_pre_divert);
+ // Exit from ignore region started in __tsan_mutex_pre_lock/unlock/signal.
+ ThreadIgnoreSyncEnd(thr, pc);
+ ThreadIgnoreEnd(thr, pc);
+}
+
+INTERFACE_ATTRIBUTE
+void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
+ SCOPED_ANNOTATION(__tsan_mutex_post_divert);
+ ThreadIgnoreBegin(thr, pc, /*save_stack=*/false);
+ ThreadIgnoreSyncBegin(thr, pc, /*save_stack=*/false);
+}
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_ann.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_ann.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_ann.h (revision 351984)
@@ -0,0 +1,32 @@
+//===-- tsan_interface_ann.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interface for dynamic annotations.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_ANN_H
+#define TSAN_INTERFACE_ANN_H
+
+#include <sanitizer_common/sanitizer_internal_defs.h>
+
+// This header should NOT include any other headers.
+// All functions in this header are extern "C" and start with __tsan_.
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_acquire(void *addr);
+SANITIZER_INTERFACE_ATTRIBUTE void __tsan_release(void *addr);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TSAN_INTERFACE_ANN_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_atomic.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_atomic.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_atomic.cc (revision 351984)
@@ -0,0 +1,955 @@
+//===-- tsan_interface_atomic.cc ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+// ThreadSanitizer atomic operations are based on C++11/C1x standards.
+// For background see C++11 standard. A slightly older, publicly
+// available draft of the standard (not entirely up-to-date, but close enough
+// for casual browsing) is available here:
+// http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
+// The following page contains more background information:
+// http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "tsan_flags.h"
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan; // NOLINT
+
+#if !SANITIZER_GO && __TSAN_HAS_INT128
+// Protects emulation of 128-bit atomic operations.
+static StaticSpinMutex mutex128;
+#endif
+
+static bool IsLoadOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_consume
+ || mo == mo_acquire || mo == mo_seq_cst;
+}
+
+static bool IsStoreOrder(morder mo) {
+ return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
+}
+
+static bool IsReleaseOrder(morder mo) {
+ return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static bool IsAcquireOrder(morder mo) {
+ return mo == mo_consume || mo == mo_acquire
+ || mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+static bool IsAcqRelOrder(morder mo) {
+ return mo == mo_acq_rel || mo == mo_seq_cst;
+}
+
+template<typename T> T func_xchg(volatile T *v, T op) {
+ T res = __sync_lock_test_and_set(v, op);
+ // __sync_lock_test_and_set does not contain full barrier.
+ __sync_synchronize();
+ return res;
+}
+
+template<typename T> T func_add(volatile T *v, T op) {
+ return __sync_fetch_and_add(v, op);
+}
+
+template<typename T> T func_sub(volatile T *v, T op) {
+ return __sync_fetch_and_sub(v, op);
+}
+
+template<typename T> T func_and(volatile T *v, T op) {
+ return __sync_fetch_and_and(v, op);
+}
+
+template<typename T> T func_or(volatile T *v, T op) {
+ return __sync_fetch_and_or(v, op);
+}
+
+template<typename T> T func_xor(volatile T *v, T op) {
+ return __sync_fetch_and_xor(v, op);
+}
+
+template<typename T> T func_nand(volatile T *v, T op) {
+ // clang does not support __sync_fetch_and_nand.
+ T cmp = *v;
+ for (;;) {
+ T newv = ~(cmp & op);
+ T cur = __sync_val_compare_and_swap(v, cmp, newv);
+ if (cmp == cur)
+ return cmp;
+ cmp = cur;
+ }
+}
+
+template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
+ return __sync_val_compare_and_swap(v, cmp, xch);
+}
+
+// clang does not support 128-bit atomic ops.
+// Atomic ops are executed under tsan internal mutex,
+// here we assume that the atomic variables are not accessed
+// from non-instrumented code.
+#if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
+ && __TSAN_HAS_INT128
+a128 func_xchg(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = op;
+ return cmp;
+}
+
+a128 func_add(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp + op;
+ return cmp;
+}
+
+a128 func_sub(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp - op;
+ return cmp;
+}
+
+a128 func_and(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp & op;
+ return cmp;
+}
+
+a128 func_or(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp | op;
+ return cmp;
+}
+
+a128 func_xor(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = cmp ^ op;
+ return cmp;
+}
+
+a128 func_nand(volatile a128 *v, a128 op) {
+ SpinMutexLock lock(&mutex128);
+ a128 cmp = *v;
+ *v = ~(cmp & op);
+ return cmp;
+}
+
+a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
+ SpinMutexLock lock(&mutex128);
+ a128 cur = *v;
+ if (cur == cmp)
+ *v = xch;
+ return cur;
+}
+#endif
+
+template<typename T>
+static int SizeLog() {
+ if (sizeof(T) <= 1)
+ return kSizeLog1;
+ else if (sizeof(T) <= 2)
+ return kSizeLog2;
+ else if (sizeof(T) <= 4)
+ return kSizeLog4;
+ else
+ return kSizeLog8;
+ // For 16-byte atomics we also use 8-byte memory access,
+ // this leads to false negatives only in very obscure cases.
+}
+
+#if !SANITIZER_GO
+static atomic_uint8_t *to_atomic(const volatile a8 *a) {
+ return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
+}
+
+static atomic_uint16_t *to_atomic(const volatile a16 *a) {
+ return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
+}
+#endif
+
+static atomic_uint32_t *to_atomic(const volatile a32 *a) {
+ return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
+}
+
+static atomic_uint64_t *to_atomic(const volatile a64 *a) {
+ return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
+}
+
+static memory_order to_mo(morder mo) {
+ switch (mo) {
+ case mo_relaxed: return memory_order_relaxed;
+ case mo_consume: return memory_order_consume;
+ case mo_acquire: return memory_order_acquire;
+ case mo_release: return memory_order_release;
+ case mo_acq_rel: return memory_order_acq_rel;
+ case mo_seq_cst: return memory_order_seq_cst;
+ }
+ CHECK(0);
+ return memory_order_seq_cst;
+}
+
+template<typename T>
+static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
+ return atomic_load(to_atomic(a), to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
+static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ return *a;
+}
+#endif
+
+template<typename T>
+static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
+ CHECK(IsLoadOrder(mo));
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ if (!IsAcquireOrder(mo)) {
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ return NoTsanAtomicLoad(a, mo);
+ }
+ // Don't create sync object if it does not exist yet. For example, an atomic
+ // pointer is initialized to nullptr and then periodically acquire-loaded.
+ T v = NoTsanAtomicLoad(a, mo);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
+ if (s) {
+ AcquireImpl(thr, pc, &s->clock);
+ // Re-read under sync mutex because we need a consistent snapshot
+ // of the value and the clock we acquire.
+ v = NoTsanAtomicLoad(a, mo);
+ s->mtx.ReadUnlock();
+ }
+ MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ return v;
+}
+
+template<typename T>
+static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
+ atomic_store(to_atomic(a), v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128 && !SANITIZER_GO
+static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
+ SpinMutexLock lock(&mutex128);
+ *a = v;
+}
+#endif
+
+template<typename T>
+static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ CHECK(IsStoreOrder(mo));
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ // This fast-path is critical for performance.
+ // Assume the access is atomic.
+ // Strictly saying even relaxed store cuts off release sequence,
+ // so must reset the clock.
+ if (!IsReleaseOrder(mo)) {
+ NoTsanAtomicStore(a, v, mo);
+ return;
+ }
+ __sync_synchronize();
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ NoTsanAtomicStore(a, v, mo);
+ s->mtx.Unlock();
+}
+
+template<typename T, T (*F)(volatile T *v, T op)>
+static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ SyncVar *s = 0;
+ if (mo != mo_relaxed) {
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ if (IsAcqRelOrder(mo))
+ AcquireReleaseImpl(thr, pc, &s->clock);
+ else if (IsReleaseOrder(mo))
+ ReleaseImpl(thr, pc, &s->clock);
+ else if (IsAcquireOrder(mo))
+ AcquireImpl(thr, pc, &s->clock);
+ }
+ v = F(a, v);
+ if (s)
+ s->mtx.Unlock();
+ return v;
+}
+
+template<typename T>
+static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
+ return func_xchg(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
+ return func_add(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
+ return func_sub(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
+ return func_and(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
+ return func_or(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
+ return func_xor(a, v);
+}
+
+template<typename T>
+static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
+ return func_nand(a, v);
+}
+
+template<typename T>
+static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
+ morder mo) {
+ return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
+}
+
+template<typename T>
+static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
+ return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
+}
+
+#if __TSAN_HAS_INT128
+static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ a128 old = *c;
+ a128 cur = func_cas(a, old, v);
+ if (cur == old)
+ return true;
+ *c = cur;
+ return false;
+}
+#endif
+
+template<typename T>
+static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
+ NoTsanAtomicCAS(a, &c, v, mo, fmo);
+ return c;
+}
+
+template<typename T>
+static bool AtomicCAS(ThreadState *thr, uptr pc,
+ volatile T *a, T *c, T v, morder mo, morder fmo) {
+ (void)fmo; // Unused because llvm does not pass it yet.
+ MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
+ SyncVar *s = 0;
+ bool write_lock = mo != mo_acquire && mo != mo_consume;
+ if (mo != mo_relaxed) {
+ s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ if (IsAcqRelOrder(mo))
+ AcquireReleaseImpl(thr, pc, &s->clock);
+ else if (IsReleaseOrder(mo))
+ ReleaseImpl(thr, pc, &s->clock);
+ else if (IsAcquireOrder(mo))
+ AcquireImpl(thr, pc, &s->clock);
+ }
+ T cc = *c;
+ T pr = func_cas(a, cc, v);
+ if (s) {
+ if (write_lock)
+ s->mtx.Unlock();
+ else
+ s->mtx.ReadUnlock();
+ }
+ if (pr == cc)
+ return true;
+ *c = pr;
+ return false;
+}
+
+template<typename T>
+static T AtomicCAS(ThreadState *thr, uptr pc,
+ volatile T *a, T c, T v, morder mo, morder fmo) {
+ AtomicCAS(thr, pc, a, &c, v, mo, fmo);
+ return c;
+}
+
+#if !SANITIZER_GO
+static void NoTsanAtomicFence(morder mo) {
+ __sync_synchronize();
+}
+
+static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
+ // FIXME(dvyukov): not implemented.
+ __sync_synchronize();
+}
+#endif
+
+// Interface functions follow.
+#if !SANITIZER_GO
+
+// C/C++
+
+static morder convert_morder(morder mo) {
+ if (flags()->force_seq_cst_atomics)
+ return (morder)mo_seq_cst;
+
+ // Filter out additional memory order flags:
+ // MEMMODEL_SYNC = 1 << 15
+ // __ATOMIC_HLE_ACQUIRE = 1 << 16
+ // __ATOMIC_HLE_RELEASE = 1 << 17
+ //
+ // HLE is an optimization, and we pretend that elision always fails.
+ // MEMMODEL_SYNC is used when lowering __sync_ atomics,
+ // since we use __sync_ atomics for actual atomic operations,
+ // we can safely ignore it as well. It also subtly affects semantics,
+ // but we don't model the difference.
+ return (morder)(mo & 0x7fff);
+}
+
+#define SCOPED_ATOMIC(func, ...) \
+ ThreadState *const thr = cur_thread(); \
+ if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
+ ProcessPendingSignals(thr); \
+ return NoTsanAtomic##func(__VA_ARGS__); \
+ } \
+ const uptr callpc = (uptr)__builtin_return_address(0); \
+ uptr pc = StackTrace::GetCurrentPc(); \
+ mo = convert_morder(mo); \
+ AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
+ ScopedAtomic sa(thr, callpc, a, mo, __func__); \
+ return Atomic##func(thr, pc, __VA_ARGS__); \
+/**/
+
+class ScopedAtomic {
+ public:
+ ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
+ morder mo, const char *func)
+ : thr_(thr) {
+ FuncEntry(thr_, pc);
+ DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
+ }
+ ~ScopedAtomic() {
+ ProcessPendingSignals(thr_);
+ FuncExit(thr_);
+ }
+ private:
+ ThreadState *thr_;
+};
+
+static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
+ StatInc(thr, StatAtomic);
+ StatInc(thr, t);
+ StatInc(thr, size == 1 ? StatAtomic1
+ : size == 2 ? StatAtomic2
+ : size == 4 ? StatAtomic4
+ : size == 8 ? StatAtomic8
+ : StatAtomic16);
+ StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
+ : mo == mo_consume ? StatAtomicConsume
+ : mo == mo_acquire ? StatAtomicAcquire
+ : mo == mo_release ? StatAtomicRelease
+ : mo == mo_acq_rel ? StatAtomicAcq_Rel
+ : StatAtomicSeq_Cst);
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
+ SCOPED_ATOMIC(Load, a, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(Store, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(Exchange, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchAdd, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchSub, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchAnd, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchOr, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchXor, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
+ SCOPED_ATOMIC(FetchNand, a, v, mo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+
+#if __TSAN_HAS_INT128
+SANITIZER_INTERFACE_ATTRIBUTE
+a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
+ morder mo, morder fmo) {
+ SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
+}
+#endif
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_thread_fence(morder mo) {
+ char* a = 0;
+ SCOPED_ATOMIC(Fence, mo);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_atomic_signal_fence(morder mo) {
+}
+} // extern "C"
+
+#else // #if !SANITIZER_GO
+
+// Go
+
+#define ATOMIC(func, ...) \
+ if (thr->ignore_sync) { \
+ NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+#define ATOMIC_RET(func, ret, ...) \
+ if (thr->ignore_sync) { \
+ (ret) = NoTsanAtomic##func(__VA_ARGS__); \
+ } else { \
+ FuncEntry(thr, cpc); \
+ (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
+ FuncExit(thr); \
+ } \
+/**/
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic32_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a32 cur = 0;
+ a32 cmp = *(a32*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
+ *(bool*)(a+16) = (cur == cmp);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __tsan_go_atomic64_compare_exchange(
+ ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
+ a64 cur = 0;
+ a64 cmp = *(a64*)(a+8);
+ ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
+ *(bool*)(a+24) = (cur == cmp);
+}
+} // extern "C"
+#endif // #if !SANITIZER_GO
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_inl.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_inl.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_inl.h (revision 351984)
@@ -0,0 +1,124 @@
+//===-- tsan_interface_inl.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface.h"
+#include "tsan_rtl.h"
+
+#define CALLERPC ((uptr)__builtin_return_address(0))
+
+using namespace __tsan; // NOLINT
+
+void __tsan_read1(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_read2(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_read4(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_read8(void *addr) {
+ MemoryRead(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_write1(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_write2(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_write4(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_write8(void *addr) {
+ MemoryWrite(cur_thread(), CALLERPC, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_read1_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_read2_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_read4_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_read8_pc(void *addr, void *pc) {
+ MemoryRead(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_write1_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_write2_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog2);
+}
+
+void __tsan_write4_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog4);
+}
+
+void __tsan_write8_pc(void *addr, void *pc) {
+ MemoryWrite(cur_thread(), (uptr)pc, (uptr)addr, kSizeLog8);
+}
+
+void __tsan_vptr_update(void **vptr_p, void *new_val) {
+ CHECK_EQ(sizeof(vptr_p), 8);
+ if (*vptr_p != new_val) {
+ ThreadState *thr = cur_thread();
+ thr->is_vptr_access = true;
+ MemoryWrite(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
+ thr->is_vptr_access = false;
+ }
+}
+
+void __tsan_vptr_read(void **vptr_p) {
+ CHECK_EQ(sizeof(vptr_p), 8);
+ ThreadState *thr = cur_thread();
+ thr->is_vptr_access = true;
+ MemoryRead(thr, CALLERPC, (uptr)vptr_p, kSizeLog8);
+ thr->is_vptr_access = false;
+}
+
+void __tsan_func_entry(void *pc) {
+ FuncEntry(cur_thread(), (uptr)pc);
+}
+
+void __tsan_func_exit() {
+ FuncExit(cur_thread());
+}
+
+void __tsan_ignore_thread_begin() {
+ ThreadIgnoreBegin(cur_thread(), CALLERPC);
+}
+
+void __tsan_ignore_thread_end() {
+ ThreadIgnoreEnd(cur_thread(), CALLERPC);
+}
+
+void __tsan_read_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, false);
+}
+
+void __tsan_write_range(void *addr, uptr size) {
+ MemoryAccessRange(cur_thread(), CALLERPC, (uptr)addr, size, true);
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_java.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_java.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_java.cc (revision 351984)
@@ -0,0 +1,267 @@
+//===-- tsan_interface_java.cc --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_interface_java.h"
+#include "tsan_rtl.h"
+#include "tsan_mutex.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+using namespace __tsan; // NOLINT
+
+const jptr kHeapAlignment = 8;
+
+namespace __tsan {
+
+struct JavaContext {
+ const uptr heap_begin;
+ const uptr heap_size;
+
+ JavaContext(jptr heap_begin, jptr heap_size)
+ : heap_begin(heap_begin)
+ , heap_size(heap_size) {
+ }
+};
+
+class ScopedJavaFunc {
+ public:
+ ScopedJavaFunc(ThreadState *thr, uptr pc)
+ : thr_(thr) {
+ Initialize(thr_);
+ FuncEntry(thr, pc);
+ }
+
+ ~ScopedJavaFunc() {
+ FuncExit(thr_);
+ // FIXME(dvyukov): process pending signals.
+ }
+
+ private:
+ ThreadState *thr_;
+};
+
+static u64 jctx_buf[sizeof(JavaContext) / sizeof(u64) + 1];
+static JavaContext *jctx;
+
+} // namespace __tsan
+
+#define SCOPED_JAVA_FUNC(func) \
+ ThreadState *thr = cur_thread(); \
+ const uptr caller_pc = GET_CALLER_PC(); \
+ const uptr pc = StackTrace::GetCurrentPc(); \
+ (void)pc; \
+ ScopedJavaFunc scoped(thr, caller_pc); \
+/**/
+
+void __tsan_java_init(jptr heap_begin, jptr heap_size) {
+ SCOPED_JAVA_FUNC(__tsan_java_init);
+ DPrintf("#%d: java_init(%p, %p)\n", thr->tid, heap_begin, heap_size);
+ CHECK_EQ(jctx, 0);
+ CHECK_GT(heap_begin, 0);
+ CHECK_GT(heap_size, 0);
+ CHECK_EQ(heap_begin % kHeapAlignment, 0);
+ CHECK_EQ(heap_size % kHeapAlignment, 0);
+ CHECK_LT(heap_begin, heap_begin + heap_size);
+ jctx = new(jctx_buf) JavaContext(heap_begin, heap_size);
+}
+
+int __tsan_java_fini() {
+ SCOPED_JAVA_FUNC(__tsan_java_fini);
+ DPrintf("#%d: java_fini()\n", thr->tid);
+ CHECK_NE(jctx, 0);
+ // FIXME(dvyukov): this does not call atexit() callbacks.
+ int status = Finalize(thr);
+ DPrintf("#%d: java_fini() = %d\n", thr->tid, status);
+ return status;
+}
+
+void __tsan_java_alloc(jptr ptr, jptr size) {
+ SCOPED_JAVA_FUNC(__tsan_java_alloc);
+ DPrintf("#%d: java_alloc(%p, %p)\n", thr->tid, ptr, size);
+ CHECK_NE(jctx, 0);
+ CHECK_NE(size, 0);
+ CHECK_EQ(ptr % kHeapAlignment, 0);
+ CHECK_EQ(size % kHeapAlignment, 0);
+ CHECK_GE(ptr, jctx->heap_begin);
+ CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ OnUserAlloc(thr, pc, ptr, size, false);
+}
+
+void __tsan_java_free(jptr ptr, jptr size) {
+ SCOPED_JAVA_FUNC(__tsan_java_free);
+ DPrintf("#%d: java_free(%p, %p)\n", thr->tid, ptr, size);
+ CHECK_NE(jctx, 0);
+ CHECK_NE(size, 0);
+ CHECK_EQ(ptr % kHeapAlignment, 0);
+ CHECK_EQ(size % kHeapAlignment, 0);
+ CHECK_GE(ptr, jctx->heap_begin);
+ CHECK_LE(ptr + size, jctx->heap_begin + jctx->heap_size);
+
+ ctx->metamap.FreeRange(thr->proc(), ptr, size);
+}
+
+void __tsan_java_move(jptr src, jptr dst, jptr size) {
+ SCOPED_JAVA_FUNC(__tsan_java_move);
+ DPrintf("#%d: java_move(%p, %p, %p)\n", thr->tid, src, dst, size);
+ CHECK_NE(jctx, 0);
+ CHECK_NE(size, 0);
+ CHECK_EQ(src % kHeapAlignment, 0);
+ CHECK_EQ(dst % kHeapAlignment, 0);
+ CHECK_EQ(size % kHeapAlignment, 0);
+ CHECK_GE(src, jctx->heap_begin);
+ CHECK_LE(src + size, jctx->heap_begin + jctx->heap_size);
+ CHECK_GE(dst, jctx->heap_begin);
+ CHECK_LE(dst + size, jctx->heap_begin + jctx->heap_size);
+ CHECK_NE(dst, src);
+ CHECK_NE(size, 0);
+
+ // Assuming it's not running concurrently with threads that do
+ // memory accesses and mutex operations (stop-the-world phase).
+ ctx->metamap.MoveMemory(src, dst, size);
+
+ // Move shadow.
+ u64 *s = (u64*)MemToShadow(src);
+ u64 *d = (u64*)MemToShadow(dst);
+ u64 *send = (u64*)MemToShadow(src + size);
+ uptr inc = 1;
+ if (dst > src) {
+ s = (u64*)MemToShadow(src + size) - 1;
+ d = (u64*)MemToShadow(dst + size) - 1;
+ send = (u64*)MemToShadow(src) - 1;
+ inc = -1;
+ }
+ for (; s != send; s += inc, d += inc) {
+ *d = *s;
+ *s = 0;
+ }
+}
+
+jptr __tsan_java_find(jptr *from_ptr, jptr to) {
+ SCOPED_JAVA_FUNC(__tsan_java_find);
+ DPrintf("#%d: java_find(&%p, %p)\n", *from_ptr, to);
+ CHECK_EQ((*from_ptr) % kHeapAlignment, 0);
+ CHECK_EQ(to % kHeapAlignment, 0);
+ CHECK_GE(*from_ptr, jctx->heap_begin);
+ CHECK_LE(to, jctx->heap_begin + jctx->heap_size);
+ for (uptr from = *from_ptr; from < to; from += kHeapAlignment) {
+ MBlock *b = ctx->metamap.GetBlock(from);
+ if (b) {
+ *from_ptr = from;
+ return b->siz;
+ }
+ }
+ return 0;
+}
+
+void __tsan_java_finalize() {
+ SCOPED_JAVA_FUNC(__tsan_java_finalize);
+ DPrintf("#%d: java_mutex_finalize()\n", thr->tid);
+ AcquireGlobal(thr, 0);
+}
+
+void __tsan_java_mutex_lock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_lock);
+ DPrintf("#%d: java_mutex_lock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock);
+}
+
+void __tsan_java_mutex_unlock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock);
+ DPrintf("#%d: java_mutex_unlock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexUnlock(thr, pc, addr);
+}
+
+void __tsan_java_mutex_read_lock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_read_lock);
+ DPrintf("#%d: java_mutex_read_lock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexPostReadLock(thr, pc, addr, MutexFlagLinkerInit |
+ MutexFlagWriteReentrant | MutexFlagDoPreLockOnPostLock);
+}
+
+void __tsan_java_mutex_read_unlock(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_read_unlock);
+ DPrintf("#%d: java_mutex_read_unlock(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ MutexReadUnlock(thr, pc, addr);
+}
+
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_lock_rec);
+ DPrintf("#%d: java_mutex_lock_rec(%p, %d)\n", thr->tid, addr, rec);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+ CHECK_GT(rec, 0);
+
+ MutexPostLock(thr, pc, addr, MutexFlagLinkerInit | MutexFlagWriteReentrant |
+ MutexFlagDoPreLockOnPostLock | MutexFlagRecursiveLock, rec);
+}
+
+int __tsan_java_mutex_unlock_rec(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_mutex_unlock_rec);
+ DPrintf("#%d: java_mutex_unlock_rec(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ return MutexUnlock(thr, pc, addr, MutexFlagRecursiveUnlock);
+}
+
+void __tsan_java_acquire(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_acquire);
+ DPrintf("#%d: java_acquire(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Acquire(thr, caller_pc, addr);
+}
+
+void __tsan_java_release(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_release);
+ DPrintf("#%d: java_release(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ Release(thr, caller_pc, addr);
+}
+
+void __tsan_java_release_store(jptr addr) {
+ SCOPED_JAVA_FUNC(__tsan_java_release);
+ DPrintf("#%d: java_release_store(%p)\n", thr->tid, addr);
+ CHECK_NE(jctx, 0);
+ CHECK_GE(addr, jctx->heap_begin);
+ CHECK_LT(addr, jctx->heap_begin + jctx->heap_size);
+
+ ReleaseStore(thr, caller_pc, addr);
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_java.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_java.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_interface_java.h (revision 351984)
@@ -0,0 +1,99 @@
+//===-- tsan_interface_java.h -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interface for verification of Java or mixed Java/C++ programs.
+// The interface is intended to be used from within a JVM and notify TSan
+// about such events like Java locks and GC memory compaction.
+//
+// For plain memory accesses and function entry/exit a JVM is intended to use
+// C++ interfaces: __tsan_readN/writeN and __tsan_func_enter/exit.
+//
+// For volatile memory accesses and atomic operations JVM is intended to use
+// standard atomics API: __tsan_atomicN_load/store/etc.
+//
+// For usage examples see lit_tests/java_*.cc
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_JAVA_H
+#define TSAN_INTERFACE_JAVA_H
+
+#ifndef INTERFACE_ATTRIBUTE
+# define INTERFACE_ATTRIBUTE __attribute__((visibility("default")))
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef unsigned long jptr; // NOLINT
+
+// Must be called before any other callback from Java.
+void __tsan_java_init(jptr heap_begin, jptr heap_size) INTERFACE_ATTRIBUTE;
+// Must be called when the application exits.
+// Not necessary the last callback (concurrently running threads are OK).
+// Returns exit status or 0 if tsan does not want to override it.
+int __tsan_java_fini() INTERFACE_ATTRIBUTE;
+
+// Callback for memory allocations.
+// May be omitted for allocations that are not subject to data races
+// nor contain synchronization objects (e.g. String).
+void __tsan_java_alloc(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
+// Callback for memory free.
+// Can be aggregated for several objects (preferably).
+void __tsan_java_free(jptr ptr, jptr size) INTERFACE_ATTRIBUTE;
+// Callback for memory move by GC.
+// Can be aggregated for several objects (preferably).
+// The ranges can overlap.
+void __tsan_java_move(jptr src, jptr dst, jptr size) INTERFACE_ATTRIBUTE;
+// This function must be called on the finalizer thread
+// before executing a batch of finalizers.
+// It ensures necessary synchronization between
+// java object creation and finalization.
+void __tsan_java_finalize() INTERFACE_ATTRIBUTE;
+// Finds the first allocated memory block in the [*from_ptr, to) range, saves
+// its address in *from_ptr and returns its size. Returns 0 if there are no
+// allocated memory blocks in the range.
+jptr __tsan_java_find(jptr *from_ptr, jptr to) INTERFACE_ATTRIBUTE;
+
+// Mutex lock.
+// Addr is any unique address associated with the mutex.
+// Can be called on recursive reentry.
+void __tsan_java_mutex_lock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex unlock.
+void __tsan_java_mutex_unlock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex read lock.
+void __tsan_java_mutex_read_lock(jptr addr) INTERFACE_ATTRIBUTE;
+// Mutex read unlock.
+void __tsan_java_mutex_read_unlock(jptr addr) INTERFACE_ATTRIBUTE;
+// Recursive mutex lock, intended for handling of Object.wait().
+// The 'rec' value must be obtained from the previous
+// __tsan_java_mutex_unlock_rec().
+void __tsan_java_mutex_lock_rec(jptr addr, int rec) INTERFACE_ATTRIBUTE;
+// Recursive mutex unlock, intended for handling of Object.wait().
+// The return value says how many times this thread called lock()
+// w/o a pairing unlock() (i.e. how many recursive levels it unlocked).
+// It must be passed back to __tsan_java_mutex_lock_rec() to restore
+// the same recursion level.
+int __tsan_java_mutex_unlock_rec(jptr addr) INTERFACE_ATTRIBUTE;
+
+// Raw acquire/release primitives.
+// Can be used to establish happens-before edges on volatile/final fields,
+// in atomic operations, etc. release_store is the same as release, but it
+// breaks release sequence on addr (see C++ standard 1.10/7 for details).
+void __tsan_java_acquire(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release(jptr addr) INTERFACE_ATTRIBUTE;
+void __tsan_java_release_store(jptr addr) INTERFACE_ATTRIBUTE;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#undef INTERFACE_ATTRIBUTE
+
+#endif // #ifndef TSAN_INTERFACE_JAVA_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_libdispatch.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_libdispatch.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_libdispatch.cc (revision 351984)
@@ -0,0 +1,782 @@
+//===-- tsan_libdispatch.cc -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Support for intercepting libdispatch (GCD).
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "interception/interception.h"
+#include "tsan_interceptors.h"
+#include "tsan_rtl.h"
+
+#include "BlocksRuntime/Block.h"
+#include "tsan_dispatch_defs.h"
+
+namespace __tsan {
+ typedef u16 uint16_t;
+
+typedef struct {
+ dispatch_queue_t queue;
+ void *orig_context;
+ dispatch_function_t orig_work;
+ bool free_context_in_callback;
+ bool submitted_synchronously;
+ bool is_barrier_block;
+ uptr non_queue_sync_object;
+} block_context_t;
+
+// The offsets of different fields of the dispatch_queue_t structure, exported
+// by libdispatch.dylib.
+extern "C" struct dispatch_queue_offsets_s {
+ const uint16_t dqo_version;
+ const uint16_t dqo_label;
+ const uint16_t dqo_label_size;
+ const uint16_t dqo_flags;
+ const uint16_t dqo_flags_size;
+ const uint16_t dqo_serialnum;
+ const uint16_t dqo_serialnum_size;
+ const uint16_t dqo_width;
+ const uint16_t dqo_width_size;
+ const uint16_t dqo_running;
+ const uint16_t dqo_running_size;
+ const uint16_t dqo_suspend_cnt;
+ const uint16_t dqo_suspend_cnt_size;
+ const uint16_t dqo_target_queue;
+ const uint16_t dqo_target_queue_size;
+ const uint16_t dqo_priority;
+ const uint16_t dqo_priority_size;
+} dispatch_queue_offsets;
+
+static bool IsQueueSerial(dispatch_queue_t q) {
+ CHECK_EQ(dispatch_queue_offsets.dqo_width_size, 2);
+ uptr width = *(uint16_t *)(((uptr)q) + dispatch_queue_offsets.dqo_width);
+ CHECK_NE(width, 0);
+ return width == 1;
+}
+
+static dispatch_queue_t GetTargetQueueFromQueue(dispatch_queue_t q) {
+ CHECK_EQ(dispatch_queue_offsets.dqo_target_queue_size, 8);
+ dispatch_queue_t tq = *(
+ dispatch_queue_t *)(((uptr)q) + dispatch_queue_offsets.dqo_target_queue);
+ return tq;
+}
+
+static dispatch_queue_t GetTargetQueueFromSource(dispatch_source_t source) {
+ dispatch_queue_t tq = GetTargetQueueFromQueue((dispatch_queue_t)source);
+ CHECK_NE(tq, 0);
+ return tq;
+}
+
+static block_context_t *AllocContext(ThreadState *thr, uptr pc,
+ dispatch_queue_t queue, void *orig_context,
+ dispatch_function_t orig_work) {
+ block_context_t *new_context =
+ (block_context_t *)user_alloc_internal(thr, pc, sizeof(block_context_t));
+ new_context->queue = queue;
+ new_context->orig_context = orig_context;
+ new_context->orig_work = orig_work;
+ new_context->free_context_in_callback = true;
+ new_context->submitted_synchronously = false;
+ new_context->is_barrier_block = false;
+ new_context->non_queue_sync_object = 0;
+ return new_context;
+}
+
+#define GET_QUEUE_SYNC_VARS(context, q) \
+ bool is_queue_serial = q && IsQueueSerial(q); \
+ uptr sync_ptr = (uptr)q ?: context->non_queue_sync_object; \
+ uptr serial_sync = (uptr)sync_ptr; \
+ uptr concurrent_sync = sync_ptr ? ((uptr)sync_ptr) + sizeof(uptr) : 0; \
+ bool serial_task = context->is_barrier_block || is_queue_serial
+
+static void dispatch_sync_pre_execute(ThreadState *thr, uptr pc,
+ block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ Acquire(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ if (serial_sync) Acquire(thr, pc, serial_sync);
+ if (serial_task && concurrent_sync) Acquire(thr, pc, concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
+static void dispatch_sync_post_execute(ThreadState *thr, uptr pc,
+ block_context_t *context) {
+ uptr submit_sync = (uptr)context;
+ if (context->submitted_synchronously) Release(thr, pc, submit_sync);
+
+ dispatch_queue_t q = context->queue;
+ do {
+ GET_QUEUE_SYNC_VARS(context, q);
+ if (serial_task && serial_sync) Release(thr, pc, serial_sync);
+ if (!serial_task && concurrent_sync) Release(thr, pc, concurrent_sync);
+
+ if (q) q = GetTargetQueueFromQueue(q);
+ } while (q);
+}
+
+static void dispatch_callback_wrap(void *param) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_callback_wrap);
+ block_context_t *context = (block_context_t *)param;
+
+ dispatch_sync_pre_execute(thr, pc, context);
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ context->orig_work(context->orig_context);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+
+ dispatch_sync_post_execute(thr, pc, context);
+
+ if (context->free_context_in_callback) user_free(thr, pc, context);
+}
+
+static void invoke_block(void *param) {
+ dispatch_block_t block = (dispatch_block_t)param;
+ block();
+}
+
+static void invoke_and_release_block(void *param) {
+ dispatch_block_t block = (dispatch_block_t)param;
+ block();
+ Block_release(block);
+}
+
+#define DISPATCH_INTERCEPT_ASYNC_B(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, dispatch_block_t block) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ dispatch_block_t heap_block = Block_copy(block); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ block_context_t *new_context = \
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block); \
+ new_context->is_barrier_block = barrier; \
+ Release(thr, pc, (uptr)new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name##_f)(q, new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ }
+
+#define DISPATCH_INTERCEPT_SYNC_B(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, \
+ DISPATCH_NOESCAPE dispatch_block_t block) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, block); \
+ block_context_t new_context = { \
+ q, block, &invoke_block, false, true, barrier, 0}; \
+ Release(thr, pc, (uptr)&new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name##_f)(q, &new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ Acquire(thr, pc, (uptr)&new_context); \
+ }
+
+#define DISPATCH_INTERCEPT_ASYNC_F(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, void *context, \
+ dispatch_function_t work) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, context, work); \
+ block_context_t *new_context = \
+ AllocContext(thr, pc, q, context, work); \
+ new_context->is_barrier_block = barrier; \
+ Release(thr, pc, (uptr)new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name)(q, new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ }
+
+#define DISPATCH_INTERCEPT_SYNC_F(name, barrier) \
+ TSAN_INTERCEPTOR(void, name, dispatch_queue_t q, void *context, \
+ dispatch_function_t work) { \
+ SCOPED_TSAN_INTERCEPTOR(name, q, context, work); \
+ block_context_t new_context = { \
+ q, context, work, false, true, barrier, 0}; \
+ Release(thr, pc, (uptr)&new_context); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START(); \
+ REAL(name)(q, &new_context, dispatch_callback_wrap); \
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END(); \
+ Acquire(thr, pc, (uptr)&new_context); \
+ }
+
+#define DISPATCH_INTERCEPT(name, barrier) \
+ DISPATCH_INTERCEPT_ASYNC_F(name##_async_f, barrier) \
+ DISPATCH_INTERCEPT_ASYNC_B(name##_async, barrier) \
+ DISPATCH_INTERCEPT_SYNC_F(name##_sync_f, barrier) \
+ DISPATCH_INTERCEPT_SYNC_B(name##_sync, barrier)
+
+// We wrap dispatch_async, dispatch_sync and friends where we allocate a new
+// context, which is used to synchronize (we release the context before
+// submitting, and the callback acquires it before executing the original
+// callback).
+DISPATCH_INTERCEPT(dispatch, false)
+DISPATCH_INTERCEPT(dispatch_barrier, true)
+
+DECLARE_REAL(void, dispatch_after_f, dispatch_time_t when,
+ dispatch_queue_t queue, void *context, dispatch_function_t work)
+
+TSAN_INTERCEPTOR(void, dispatch_after, dispatch_time_t when,
+ dispatch_queue_t queue, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_after, when, queue, block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ block_context_t *new_context =
+ AllocContext(thr, pc, queue, heap_block, &invoke_and_release_block);
+ Release(thr, pc, (uptr)new_context);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_after_f)(when, queue, new_context, dispatch_callback_wrap);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+}
+
+TSAN_INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when,
+ dispatch_queue_t queue, void *context,
+ dispatch_function_t work) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_after_f, when, queue, context, work);
+ WRAP(dispatch_after)(when, queue, ^(void) {
+ work(context);
+ });
+}
+
+// GCD's dispatch_once implementation has a fast path that contains a racy read
+// and it's inlined into user's code. Furthermore, this fast path doesn't
+// establish a proper happens-before relations between the initialization and
+// code following the call to dispatch_once. We could deal with this in
+// instrumented code, but there's not much we can do about it in system
+// libraries. Let's disable the fast path (by never storing the value ~0 to
+// predicate), so the interceptor is always called, and let's add proper release
+// and acquire semantics. Since TSan does not see its own atomic stores, the
+// race on predicate won't be reported - the only accesses to it that TSan sees
+// are the loads on the fast path. Loads don't race. Secondly, dispatch_once is
+// both a macro and a real function, we want to intercept the function, so we
+// need to undefine the macro.
+#undef dispatch_once
+TSAN_INTERCEPTOR(void, dispatch_once, dispatch_once_t *predicate,
+ DISPATCH_NOESCAPE dispatch_block_t block) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_once, predicate, block);
+ atomic_uint32_t *a = reinterpret_cast<atomic_uint32_t *>(predicate);
+ u32 v = atomic_load(a, memory_order_acquire);
+ if (v == 0 &&
+ atomic_compare_exchange_strong(a, &v, 1, memory_order_relaxed)) {
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ block();
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Release(thr, pc, (uptr)a);
+ atomic_store(a, 2, memory_order_release);
+ } else {
+ while (v != 2) {
+ internal_sched_yield();
+ v = atomic_load(a, memory_order_acquire);
+ }
+ Acquire(thr, pc, (uptr)a);
+ }
+}
+
+#undef dispatch_once_f
+TSAN_INTERCEPTOR(void, dispatch_once_f, dispatch_once_t *predicate,
+ void *context, dispatch_function_t function) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_once_f, predicate, context, function);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ WRAP(dispatch_once)(predicate, ^(void) {
+ function(context);
+ });
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+}
+
+TSAN_INTERCEPTOR(long_t, dispatch_semaphore_signal,
+ dispatch_semaphore_t dsema) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_semaphore_signal, dsema);
+ Release(thr, pc, (uptr)dsema);
+ return REAL(dispatch_semaphore_signal)(dsema);
+}
+
+TSAN_INTERCEPTOR(long_t, dispatch_semaphore_wait, dispatch_semaphore_t dsema,
+ dispatch_time_t timeout) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_semaphore_wait, dsema, timeout);
+ long_t result = REAL(dispatch_semaphore_wait)(dsema, timeout);
+ if (result == 0) Acquire(thr, pc, (uptr)dsema);
+ return result;
+}
+
+TSAN_INTERCEPTOR(long_t, dispatch_group_wait, dispatch_group_t group,
+ dispatch_time_t timeout) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_wait, group, timeout);
+ long_t result = REAL(dispatch_group_wait)(group, timeout);
+ if (result == 0) Acquire(thr, pc, (uptr)group);
+ return result;
+}
+
+// Used, but not intercepted.
+extern "C" void dispatch_group_enter(dispatch_group_t group);
+
+TSAN_INTERCEPTOR(void, dispatch_group_leave, dispatch_group_t group) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_leave, group);
+ // Acquired in the group notification callback in dispatch_group_notify[_f].
+ Release(thr, pc, (uptr)group);
+ REAL(dispatch_group_leave)(group);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_async, dispatch_group_t group,
+ dispatch_queue_t queue, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_async, group, queue, block);
+ dispatch_retain(group);
+ dispatch_group_enter(group);
+ __block dispatch_block_t block_copy = (dispatch_block_t)Block_copy(block);
+ WRAP(dispatch_async)(queue, ^(void) {
+ block_copy();
+ Block_release(block_copy);
+ WRAP(dispatch_group_leave)(group);
+ dispatch_release(group);
+ });
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t queue, void *context,
+ dispatch_function_t work) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_async_f, group, queue, context, work);
+ dispatch_retain(group);
+ dispatch_group_enter(group);
+ WRAP(dispatch_async)(queue, ^(void) {
+ work(context);
+ WRAP(dispatch_group_leave)(group);
+ dispatch_release(group);
+ });
+}
+
+DECLARE_REAL(void, dispatch_group_notify_f, dispatch_group_t group,
+ dispatch_queue_t q, void *context, dispatch_function_t work)
+
+TSAN_INTERCEPTOR(void, dispatch_group_notify, dispatch_group_t group,
+ dispatch_queue_t q, dispatch_block_t block) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_group_notify, group, q, block);
+
+ // To make sure the group is still available in the callback (otherwise
+ // it can be already destroyed). Will be released in the callback.
+ dispatch_retain(group);
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(^(void) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_read_callback);
+ // Released when leaving the group (dispatch_group_leave).
+ Acquire(thr, pc, (uptr)group);
+ }
+ dispatch_release(group);
+ block();
+ });
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ block_context_t *new_context =
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block);
+ new_context->is_barrier_block = true;
+ Release(thr, pc, (uptr)new_context);
+ REAL(dispatch_group_notify_f)(group, q, new_context, dispatch_callback_wrap);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_group_notify_f, dispatch_group_t group,
+ dispatch_queue_t q, void *context, dispatch_function_t work) {
+ WRAP(dispatch_group_notify)(group, q, ^(void) { work(context); });
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_event_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_event_handler, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_event_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0 };
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_event_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_event_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_event_handler_f, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_event_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_event_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_cancel_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_cancel_handler, source, handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_cancel_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0};
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_cancel_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_cancel_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_cancel_handler_f, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_cancel_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_cancel_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_registration_handler,
+ dispatch_source_t source, dispatch_block_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_registration_handler, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_registration_handler)(source, nullptr);
+ dispatch_queue_t q = GetTargetQueueFromSource(source);
+ __block block_context_t new_context = {
+ q, handler, &invoke_block, false, false, false, 0};
+ dispatch_block_t new_handler = Block_copy(^(void) {
+ new_context.orig_context = handler; // To explicitly capture "handler".
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_source_set_registration_handler)(source, new_handler);
+ Block_release(new_handler);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_source_set_registration_handler_f,
+ dispatch_source_t source, dispatch_function_t handler) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_source_set_registration_handler_f, source,
+ handler);
+ if (handler == nullptr)
+ return REAL(dispatch_source_set_registration_handler)(source, nullptr);
+ dispatch_block_t block = ^(void) {
+ handler(dispatch_get_context(source));
+ };
+ WRAP(dispatch_source_set_registration_handler)(source, block);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_apply, size_t iterations,
+ dispatch_queue_t queue,
+ DISPATCH_NOESCAPE void (^block)(size_t)) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_apply, iterations, queue, block);
+
+ u8 sync1, sync2;
+ uptr parent_to_child_sync = (uptr)&sync1;
+ uptr child_to_parent_sync = (uptr)&sync2;
+
+ Release(thr, pc, parent_to_child_sync);
+ void (^new_block)(size_t) = ^(size_t iteration) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_apply);
+ Acquire(thr, pc, parent_to_child_sync);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ block(iteration);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Release(thr, pc, child_to_parent_sync);
+ };
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_apply)(iterations, queue, new_block);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Acquire(thr, pc, child_to_parent_sync);
+}
+
+static void invoke_block_iteration(void *param, size_t iteration) {
+ auto block = (void (^)(size_t)) param;
+ block(iteration);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_apply_f, size_t iterations,
+ dispatch_queue_t queue, void *context,
+ void (*work)(void *, size_t)) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_apply_f, iterations, queue, context, work);
+
+ // Unfortunately, we cannot delegate to dispatch_apply, since libdispatch
+ // implements dispatch_apply in terms of dispatch_apply_f.
+ u8 sync1, sync2;
+ uptr parent_to_child_sync = (uptr)&sync1;
+ uptr child_to_parent_sync = (uptr)&sync2;
+
+ Release(thr, pc, parent_to_child_sync);
+ void (^new_block)(size_t) = ^(size_t iteration) {
+ SCOPED_INTERCEPTOR_RAW(dispatch_apply_f);
+ Acquire(thr, pc, parent_to_child_sync);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ work(context, iteration);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Release(thr, pc, child_to_parent_sync);
+ };
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ REAL(dispatch_apply_f)(iterations, queue, new_block, invoke_block_iteration);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ Acquire(thr, pc, child_to_parent_sync);
+}
+
+DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr)
+DECLARE_REAL_AND_INTERCEPTOR(int, munmap, void *addr, long_t sz)
+
+TSAN_INTERCEPTOR(dispatch_data_t, dispatch_data_create, const void *buffer,
+ size_t size, dispatch_queue_t q, dispatch_block_t destructor) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_data_create, buffer, size, q, destructor);
+ if ((q == nullptr) || (destructor == DISPATCH_DATA_DESTRUCTOR_DEFAULT))
+ return REAL(dispatch_data_create)(buffer, size, q, destructor);
+
+ if (destructor == DISPATCH_DATA_DESTRUCTOR_FREE)
+ destructor = ^(void) { WRAP(free)((void *)(uintptr_t)buffer); };
+ else if (destructor == DISPATCH_DATA_DESTRUCTOR_MUNMAP)
+ destructor = ^(void) { WRAP(munmap)((void *)(uintptr_t)buffer, size); };
+
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START();
+ dispatch_block_t heap_block = Block_copy(destructor);
+ SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END();
+ block_context_t *new_context =
+ AllocContext(thr, pc, q, heap_block, &invoke_and_release_block);
+ uptr submit_sync = (uptr)new_context;
+ Release(thr, pc, submit_sync);
+ return REAL(dispatch_data_create)(buffer, size, q, ^(void) {
+ dispatch_callback_wrap(new_context);
+ });
+}
+
+typedef void (^fd_handler_t)(dispatch_data_t data, int error);
+typedef void (^cleanup_handler_t)(int error);
+
+TSAN_INTERCEPTOR(void, dispatch_read, dispatch_fd_t fd, size_t length,
+ dispatch_queue_t q, fd_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_read, fd, length, q, h);
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ fd_handler_t new_h = Block_copy(^(dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_read)(fd, length, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_write, dispatch_fd_t fd, dispatch_data_t data,
+ dispatch_queue_t q, fd_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_write, fd, data, q, h);
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ fd_handler_t new_h = Block_copy(^(dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_write)(fd, data, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_read, dispatch_io_t channel, off_t offset,
+ size_t length, dispatch_queue_t q, dispatch_io_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_read, channel, offset, length, q, h);
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ dispatch_io_handler_t new_h =
+ Block_copy(^(bool done, dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(done, data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_read)(channel, offset, length, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_write, dispatch_io_t channel, off_t offset,
+ dispatch_data_t data, dispatch_queue_t q,
+ dispatch_io_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_write, channel, offset, data, q, h);
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ dispatch_io_handler_t new_h =
+ Block_copy(^(bool done, dispatch_data_t data, int error) {
+ new_context.orig_context = ^(void) {
+ h(done, data, error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_write)(channel, offset, data, q, new_h);
+ Block_release(new_h);
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_barrier, dispatch_io_t channel,
+ dispatch_block_t barrier) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_barrier, channel, barrier);
+ __block block_context_t new_context = {
+ nullptr, nullptr, &invoke_block, false, false, false, 0};
+ new_context.non_queue_sync_object = (uptr)channel;
+ new_context.is_barrier_block = true;
+ dispatch_block_t new_block = Block_copy(^(void) {
+ new_context.orig_context = ^(void) {
+ barrier();
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ REAL(dispatch_io_barrier)(channel, new_block);
+ Block_release(new_block);
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create, dispatch_io_type_t type,
+ dispatch_fd_t fd, dispatch_queue_t q, cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create, type, fd, q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel = REAL(dispatch_io_create)(type, fd, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create_with_path,
+ dispatch_io_type_t type, const char *path, int oflag,
+ mode_t mode, dispatch_queue_t q, cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create_with_path, type, path, oflag, mode,
+ q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel =
+ REAL(dispatch_io_create_with_path)(type, path, oflag, mode, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(dispatch_io_t, dispatch_io_create_with_io,
+ dispatch_io_type_t type, dispatch_io_t io, dispatch_queue_t q,
+ cleanup_handler_t h) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_create_with_io, type, io, q, h);
+ __block dispatch_io_t new_channel = nullptr;
+ __block block_context_t new_context = {
+ q, nullptr, &invoke_block, false, false, false, 0};
+ cleanup_handler_t new_h = Block_copy(^(int error) {
+ {
+ SCOPED_INTERCEPTOR_RAW(dispatch_io_create_callback);
+ Acquire(thr, pc, (uptr)new_channel); // Release() in dispatch_io_close.
+ }
+ new_context.orig_context = ^(void) {
+ h(error);
+ };
+ dispatch_callback_wrap(&new_context);
+ });
+ uptr submit_sync = (uptr)&new_context;
+ Release(thr, pc, submit_sync);
+ new_channel = REAL(dispatch_io_create_with_io)(type, io, q, new_h);
+ Block_release(new_h);
+ return new_channel;
+}
+
+TSAN_INTERCEPTOR(void, dispatch_io_close, dispatch_io_t channel,
+ dispatch_io_close_flags_t flags) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_io_close, channel, flags);
+ Release(thr, pc, (uptr)channel); // Acquire() in dispatch_io_create[_*].
+ return REAL(dispatch_io_close)(channel, flags);
+}
+
+// Resuming a suspended queue needs to synchronize with all subsequent
+// executions of blocks in that queue.
+TSAN_INTERCEPTOR(void, dispatch_resume, dispatch_object_t o) {
+ SCOPED_TSAN_INTERCEPTOR(dispatch_resume, o);
+ Release(thr, pc, (uptr)o); // Synchronizes with the Acquire() on serial_sync
+ // in dispatch_sync_pre_execute
+ return REAL(dispatch_resume)(o);
+}
+
+void InitializeLibdispatchInterceptors() {
+ INTERCEPT_FUNCTION(dispatch_async);
+ INTERCEPT_FUNCTION(dispatch_async_f);
+ INTERCEPT_FUNCTION(dispatch_sync);
+ INTERCEPT_FUNCTION(dispatch_sync_f);
+ INTERCEPT_FUNCTION(dispatch_barrier_async);
+ INTERCEPT_FUNCTION(dispatch_barrier_async_f);
+ INTERCEPT_FUNCTION(dispatch_barrier_sync);
+ INTERCEPT_FUNCTION(dispatch_barrier_sync_f);
+ INTERCEPT_FUNCTION(dispatch_after);
+ INTERCEPT_FUNCTION(dispatch_after_f);
+ INTERCEPT_FUNCTION(dispatch_once);
+ INTERCEPT_FUNCTION(dispatch_once_f);
+ INTERCEPT_FUNCTION(dispatch_semaphore_signal);
+ INTERCEPT_FUNCTION(dispatch_semaphore_wait);
+ INTERCEPT_FUNCTION(dispatch_group_wait);
+ INTERCEPT_FUNCTION(dispatch_group_leave);
+ INTERCEPT_FUNCTION(dispatch_group_async);
+ INTERCEPT_FUNCTION(dispatch_group_async_f);
+ INTERCEPT_FUNCTION(dispatch_group_notify);
+ INTERCEPT_FUNCTION(dispatch_group_notify_f);
+ INTERCEPT_FUNCTION(dispatch_source_set_event_handler);
+ INTERCEPT_FUNCTION(dispatch_source_set_event_handler_f);
+ INTERCEPT_FUNCTION(dispatch_source_set_cancel_handler);
+ INTERCEPT_FUNCTION(dispatch_source_set_cancel_handler_f);
+ INTERCEPT_FUNCTION(dispatch_source_set_registration_handler);
+ INTERCEPT_FUNCTION(dispatch_source_set_registration_handler_f);
+ INTERCEPT_FUNCTION(dispatch_apply);
+ INTERCEPT_FUNCTION(dispatch_apply_f);
+ INTERCEPT_FUNCTION(dispatch_data_create);
+ INTERCEPT_FUNCTION(dispatch_read);
+ INTERCEPT_FUNCTION(dispatch_write);
+ INTERCEPT_FUNCTION(dispatch_io_read);
+ INTERCEPT_FUNCTION(dispatch_io_write);
+ INTERCEPT_FUNCTION(dispatch_io_barrier);
+ INTERCEPT_FUNCTION(dispatch_io_create);
+ INTERCEPT_FUNCTION(dispatch_io_create_with_path);
+ INTERCEPT_FUNCTION(dispatch_io_create_with_io);
+ INTERCEPT_FUNCTION(dispatch_io_close);
+ INTERCEPT_FUNCTION(dispatch_resume);
+}
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_malloc_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_malloc_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_malloc_mac.cc (revision 351984)
@@ -0,0 +1,71 @@
+//===-- tsan_malloc_mac.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_errno.h"
+#include "tsan_interceptors.h"
+#include "tsan_stack_trace.h"
+
+using namespace __tsan;
+#define COMMON_MALLOC_ZONE_NAME "tsan"
+#define COMMON_MALLOC_ENTER()
+#define COMMON_MALLOC_SANITIZER_INITIALIZED (cur_thread()->is_inited)
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ void *p = \
+ user_memalign(cur_thread(), StackTrace::GetCurrentPc(), alignment, size)
+#define COMMON_MALLOC_MALLOC(size) \
+ if (in_symbolizer()) return InternalAlloc(size); \
+ SCOPED_INTERCEPTOR_RAW(malloc, size); \
+ void *p = user_alloc(thr, pc, size)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ if (in_symbolizer()) return InternalRealloc(ptr, size); \
+ SCOPED_INTERCEPTOR_RAW(realloc, ptr, size); \
+ void *p = user_realloc(thr, pc, ptr, size)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ if (in_symbolizer()) return InternalCalloc(count, size); \
+ SCOPED_INTERCEPTOR_RAW(calloc, size, count); \
+ void *p = user_calloc(thr, pc, size, count)
+#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ if (in_symbolizer()) { \
+ void *p = InternalAlloc(size, nullptr, alignment); \
+ if (!p) return errno_ENOMEM; \
+ *memptr = p; \
+ return 0; \
+ } \
+ SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, alignment, size); \
+ int res = user_posix_memalign(thr, pc, memptr, alignment, size);
+#define COMMON_MALLOC_VALLOC(size) \
+ if (in_symbolizer()) \
+ return InternalAlloc(size, nullptr, GetPageSizeCached()); \
+ SCOPED_INTERCEPTOR_RAW(valloc, size); \
+ void *p = user_valloc(thr, pc, size)
+#define COMMON_MALLOC_FREE(ptr) \
+ if (in_symbolizer()) return InternalFree(ptr); \
+ SCOPED_INTERCEPTOR_RAW(free, ptr); \
+ user_free(thr, pc, ptr)
+#define COMMON_MALLOC_SIZE(ptr) uptr size = user_alloc_usable_size(ptr);
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __tsan
+#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_malloc_mac.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_md5.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_md5.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_md5.cc (revision 351984)
@@ -0,0 +1,250 @@
+//===-- tsan_md5.cc -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
+#define G(x, y, z) ((y) ^ ((z) & ((x) ^ (y))))
+#define H(x, y, z) ((x) ^ (y) ^ (z))
+#define I(x, y, z) ((y) ^ ((x) | ~(z)))
+
+#define STEP(f, a, b, c, d, x, t, s) \
+ (a) += f((b), (c), (d)) + (x) + (t); \
+ (a) = (((a) << (s)) | (((a) & 0xffffffff) >> (32 - (s)))); \
+ (a) += (b);
+
+#define SET(n) \
+ (*(const MD5_u32plus *)&ptr[(n) * 4])
+#define GET(n) \
+ SET(n)
+
+typedef unsigned int MD5_u32plus;
+typedef unsigned long ulong_t; // NOLINT
+
+typedef struct {
+ MD5_u32plus lo, hi;
+ MD5_u32plus a, b, c, d;
+ unsigned char buffer[64];
+ MD5_u32plus block[16];
+} MD5_CTX;
+
+static const void *body(MD5_CTX *ctx, const void *data, ulong_t size) {
+ const unsigned char *ptr = (const unsigned char *)data;
+ MD5_u32plus a, b, c, d;
+ MD5_u32plus saved_a, saved_b, saved_c, saved_d;
+
+ a = ctx->a;
+ b = ctx->b;
+ c = ctx->c;
+ d = ctx->d;
+
+ do {
+ saved_a = a;
+ saved_b = b;
+ saved_c = c;
+ saved_d = d;
+
+ STEP(F, a, b, c, d, SET(0), 0xd76aa478, 7)
+ STEP(F, d, a, b, c, SET(1), 0xe8c7b756, 12)
+ STEP(F, c, d, a, b, SET(2), 0x242070db, 17)
+ STEP(F, b, c, d, a, SET(3), 0xc1bdceee, 22)
+ STEP(F, a, b, c, d, SET(4), 0xf57c0faf, 7)
+ STEP(F, d, a, b, c, SET(5), 0x4787c62a, 12)
+ STEP(F, c, d, a, b, SET(6), 0xa8304613, 17)
+ STEP(F, b, c, d, a, SET(7), 0xfd469501, 22)
+ STEP(F, a, b, c, d, SET(8), 0x698098d8, 7)
+ STEP(F, d, a, b, c, SET(9), 0x8b44f7af, 12)
+ STEP(F, c, d, a, b, SET(10), 0xffff5bb1, 17)
+ STEP(F, b, c, d, a, SET(11), 0x895cd7be, 22)
+ STEP(F, a, b, c, d, SET(12), 0x6b901122, 7)
+ STEP(F, d, a, b, c, SET(13), 0xfd987193, 12)
+ STEP(F, c, d, a, b, SET(14), 0xa679438e, 17)
+ STEP(F, b, c, d, a, SET(15), 0x49b40821, 22)
+
+ STEP(G, a, b, c, d, GET(1), 0xf61e2562, 5)
+ STEP(G, d, a, b, c, GET(6), 0xc040b340, 9)
+ STEP(G, c, d, a, b, GET(11), 0x265e5a51, 14)
+ STEP(G, b, c, d, a, GET(0), 0xe9b6c7aa, 20)
+ STEP(G, a, b, c, d, GET(5), 0xd62f105d, 5)
+ STEP(G, d, a, b, c, GET(10), 0x02441453, 9)
+ STEP(G, c, d, a, b, GET(15), 0xd8a1e681, 14)
+ STEP(G, b, c, d, a, GET(4), 0xe7d3fbc8, 20)
+ STEP(G, a, b, c, d, GET(9), 0x21e1cde6, 5)
+ STEP(G, d, a, b, c, GET(14), 0xc33707d6, 9)
+ STEP(G, c, d, a, b, GET(3), 0xf4d50d87, 14)
+ STEP(G, b, c, d, a, GET(8), 0x455a14ed, 20)
+ STEP(G, a, b, c, d, GET(13), 0xa9e3e905, 5)
+ STEP(G, d, a, b, c, GET(2), 0xfcefa3f8, 9)
+ STEP(G, c, d, a, b, GET(7), 0x676f02d9, 14)
+ STEP(G, b, c, d, a, GET(12), 0x8d2a4c8a, 20)
+
+ STEP(H, a, b, c, d, GET(5), 0xfffa3942, 4)
+ STEP(H, d, a, b, c, GET(8), 0x8771f681, 11)
+ STEP(H, c, d, a, b, GET(11), 0x6d9d6122, 16)
+ STEP(H, b, c, d, a, GET(14), 0xfde5380c, 23)
+ STEP(H, a, b, c, d, GET(1), 0xa4beea44, 4)
+ STEP(H, d, a, b, c, GET(4), 0x4bdecfa9, 11)
+ STEP(H, c, d, a, b, GET(7), 0xf6bb4b60, 16)
+ STEP(H, b, c, d, a, GET(10), 0xbebfbc70, 23)
+ STEP(H, a, b, c, d, GET(13), 0x289b7ec6, 4)
+ STEP(H, d, a, b, c, GET(0), 0xeaa127fa, 11)
+ STEP(H, c, d, a, b, GET(3), 0xd4ef3085, 16)
+ STEP(H, b, c, d, a, GET(6), 0x04881d05, 23)
+ STEP(H, a, b, c, d, GET(9), 0xd9d4d039, 4)
+ STEP(H, d, a, b, c, GET(12), 0xe6db99e5, 11)
+ STEP(H, c, d, a, b, GET(15), 0x1fa27cf8, 16)
+ STEP(H, b, c, d, a, GET(2), 0xc4ac5665, 23)
+
+ STEP(I, a, b, c, d, GET(0), 0xf4292244, 6)
+ STEP(I, d, a, b, c, GET(7), 0x432aff97, 10)
+ STEP(I, c, d, a, b, GET(14), 0xab9423a7, 15)
+ STEP(I, b, c, d, a, GET(5), 0xfc93a039, 21)
+ STEP(I, a, b, c, d, GET(12), 0x655b59c3, 6)
+ STEP(I, d, a, b, c, GET(3), 0x8f0ccc92, 10)
+ STEP(I, c, d, a, b, GET(10), 0xffeff47d, 15)
+ STEP(I, b, c, d, a, GET(1), 0x85845dd1, 21)
+ STEP(I, a, b, c, d, GET(8), 0x6fa87e4f, 6)
+ STEP(I, d, a, b, c, GET(15), 0xfe2ce6e0, 10)
+ STEP(I, c, d, a, b, GET(6), 0xa3014314, 15)
+ STEP(I, b, c, d, a, GET(13), 0x4e0811a1, 21)
+ STEP(I, a, b, c, d, GET(4), 0xf7537e82, 6)
+ STEP(I, d, a, b, c, GET(11), 0xbd3af235, 10)
+ STEP(I, c, d, a, b, GET(2), 0x2ad7d2bb, 15)
+ STEP(I, b, c, d, a, GET(9), 0xeb86d391, 21)
+
+ a += saved_a;
+ b += saved_b;
+ c += saved_c;
+ d += saved_d;
+
+ ptr += 64;
+ } while (size -= 64);
+
+ ctx->a = a;
+ ctx->b = b;
+ ctx->c = c;
+ ctx->d = d;
+
+ return ptr;
+}
+
+#undef F
+#undef G
+#undef H
+#undef I
+#undef STEP
+#undef SET
+#undef GET
+
+void MD5_Init(MD5_CTX *ctx) {
+ ctx->a = 0x67452301;
+ ctx->b = 0xefcdab89;
+ ctx->c = 0x98badcfe;
+ ctx->d = 0x10325476;
+
+ ctx->lo = 0;
+ ctx->hi = 0;
+}
+
+void MD5_Update(MD5_CTX *ctx, const void *data, ulong_t size) {
+ MD5_u32plus saved_lo;
+ ulong_t used, free;
+
+ saved_lo = ctx->lo;
+ if ((ctx->lo = (saved_lo + size) & 0x1fffffff) < saved_lo)
+ ctx->hi++;
+ ctx->hi += size >> 29;
+
+ used = saved_lo & 0x3f;
+
+ if (used) {
+ free = 64 - used;
+
+ if (size < free) {
+ internal_memcpy(&ctx->buffer[used], data, size);
+ return;
+ }
+
+ internal_memcpy(&ctx->buffer[used], data, free);
+ data = (const unsigned char *)data + free;
+ size -= free;
+ body(ctx, ctx->buffer, 64);
+ }
+
+ if (size >= 64) {
+ data = body(ctx, data, size & ~(ulong_t)0x3f);
+ size &= 0x3f;
+ }
+
+ internal_memcpy(ctx->buffer, data, size);
+}
+
+void MD5_Final(unsigned char *result, MD5_CTX *ctx) {
+ ulong_t used, free;
+
+ used = ctx->lo & 0x3f;
+
+ ctx->buffer[used++] = 0x80;
+
+ free = 64 - used;
+
+ if (free < 8) {
+ internal_memset(&ctx->buffer[used], 0, free);
+ body(ctx, ctx->buffer, 64);
+ used = 0;
+ free = 64;
+ }
+
+ internal_memset(&ctx->buffer[used], 0, free - 8);
+
+ ctx->lo <<= 3;
+ ctx->buffer[56] = ctx->lo;
+ ctx->buffer[57] = ctx->lo >> 8;
+ ctx->buffer[58] = ctx->lo >> 16;
+ ctx->buffer[59] = ctx->lo >> 24;
+ ctx->buffer[60] = ctx->hi;
+ ctx->buffer[61] = ctx->hi >> 8;
+ ctx->buffer[62] = ctx->hi >> 16;
+ ctx->buffer[63] = ctx->hi >> 24;
+
+ body(ctx, ctx->buffer, 64);
+
+ result[0] = ctx->a;
+ result[1] = ctx->a >> 8;
+ result[2] = ctx->a >> 16;
+ result[3] = ctx->a >> 24;
+ result[4] = ctx->b;
+ result[5] = ctx->b >> 8;
+ result[6] = ctx->b >> 16;
+ result[7] = ctx->b >> 24;
+ result[8] = ctx->c;
+ result[9] = ctx->c >> 8;
+ result[10] = ctx->c >> 16;
+ result[11] = ctx->c >> 24;
+ result[12] = ctx->d;
+ result[13] = ctx->d >> 8;
+ result[14] = ctx->d >> 16;
+ result[15] = ctx->d >> 24;
+
+ internal_memset(ctx, 0, sizeof(*ctx));
+}
+
+MD5Hash md5_hash(const void *data, uptr size) {
+ MD5Hash res;
+ MD5_CTX ctx;
+ MD5_Init(&ctx);
+ MD5_Update(&ctx, data, size);
+ MD5_Final((unsigned char*)&res.hash[0], &ctx);
+ return res;
+}
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mman.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mman.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mman.cc (revision 351984)
@@ -0,0 +1,396 @@
+//===-- tsan_mman.cc ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_mman.h"
+#include "tsan_rtl.h"
+#include "tsan_report.h"
+#include "tsan_flags.h"
+
+// May be overriden by front-end.
+SANITIZER_WEAK_DEFAULT_IMPL
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+
+SANITIZER_WEAK_DEFAULT_IMPL
+void __sanitizer_free_hook(void *ptr) {
+ (void)ptr;
+}
+
+namespace __tsan {
+
+struct MapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { }
+ void OnUnmap(uptr p, uptr size) const {
+ // We are about to unmap a chunk of user memory.
+ // Mark the corresponding shadow memory as not needed.
+ DontNeedShadowFor(p, size);
+ // Mark the corresponding meta shadow memory as not needed.
+ // Note the block does not contain any meta info at this point
+ // (this happens after free).
+ const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
+ const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
+ // Block came from LargeMmapAllocator, so must be large.
+ // We rely on this in the calculations below.
+ CHECK_GE(size, 2 * kPageSize);
+ uptr diff = RoundUp(p, kPageSize) - p;
+ if (diff != 0) {
+ p += diff;
+ size -= diff;
+ }
+ diff = p + size - RoundDown(p + size, kPageSize);
+ if (diff != 0)
+ size -= diff;
+ uptr p_meta = (uptr)MemToMeta(p);
+ ReleaseMemoryPagesToOS(p_meta, p_meta + size / kMetaRatio);
+ }
+};
+
+static char allocator_placeholder[sizeof(Allocator)] ALIGNED(64);
+Allocator *allocator() {
+ return reinterpret_cast<Allocator*>(&allocator_placeholder);
+}
+
+struct GlobalProc {
+ Mutex mtx;
+ Processor *proc;
+
+ GlobalProc()
+ : mtx(MutexTypeGlobalProc, StatMtxGlobalProc)
+ , proc(ProcCreate()) {
+ }
+};
+
+static char global_proc_placeholder[sizeof(GlobalProc)] ALIGNED(64);
+GlobalProc *global_proc() {
+ return reinterpret_cast<GlobalProc*>(&global_proc_placeholder);
+}
+
+ScopedGlobalProcessor::ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc())
+ return;
+ // If we don't have a proc, use the global one.
+ // There are currently only two known case where this path is triggered:
+ // __interceptor_free
+ // __nptl_deallocate_tsd
+ // start_thread
+ // clone
+ // and:
+ // ResetRange
+ // __interceptor_munmap
+ // __deallocate_stack
+ // start_thread
+ // clone
+ // Ideally, we destroy thread state (and unwire proc) when a thread actually
+ // exits (i.e. when we join/wait it). Then we would not need the global proc
+ gp->mtx.Lock();
+ ProcWire(gp->proc, thr);
+}
+
+ScopedGlobalProcessor::~ScopedGlobalProcessor() {
+ GlobalProc *gp = global_proc();
+ ThreadState *thr = cur_thread();
+ if (thr->proc() != gp->proc)
+ return;
+ ProcUnwire(gp->proc, thr);
+ gp->mtx.Unlock();
+}
+
+void InitializeAllocator() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator()->Init(common_flags()->allocator_release_to_os_interval_ms);
+}
+
+void InitializeAllocatorLate() {
+ new(global_proc()) GlobalProc();
+}
+
+void AllocatorProcStart(Processor *proc) {
+ allocator()->InitCache(&proc->alloc_cache);
+ internal_allocator()->InitCache(&proc->internal_alloc_cache);
+}
+
+void AllocatorProcFinish(Processor *proc) {
+ allocator()->DestroyCache(&proc->alloc_cache);
+ internal_allocator()->DestroyCache(&proc->internal_alloc_cache);
+}
+
+void AllocatorPrintStats() {
+ allocator()->PrintStats();
+}
+
+static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
+ if (atomic_load_relaxed(&thr->in_signal_handler) == 0 ||
+ !flags()->report_signal_unsafe)
+ return;
+ VarSizeStackTrace stack;
+ ObtainCurrentStack(thr, pc, &stack);
+ if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeSignalUnsafe);
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+}
+
+static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
+
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz, uptr align,
+ bool signal) {
+ if (sz >= kMaxAllowedMallocSize || align >= kMaxAllowedMallocSize) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportAllocationSizeTooBig(sz, kMaxAllowedMallocSize, &stack);
+ }
+ void *p = allocator()->Allocate(&thr->proc()->alloc_cache, sz, align);
+ if (UNLIKELY(!p)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportOutOfMemory(sz, &stack);
+ }
+ if (ctx && ctx->initialized)
+ OnUserAlloc(thr, pc, (uptr)p, sz, true);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
+ return p;
+}
+
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal) {
+ ScopedGlobalProcessor sgp;
+ if (ctx && ctx->initialized)
+ OnUserFree(thr, pc, (uptr)p, true);
+ allocator()->Deallocate(&thr->proc()->alloc_cache, p);
+ if (signal)
+ SignalUnsafeCall(thr, pc);
+}
+
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, kDefaultAlignment));
+}
+
+void *user_calloc(ThreadState *thr, uptr pc, uptr size, uptr n) {
+ if (UNLIKELY(CheckForCallocOverflow(size, n))) {
+ if (AllocatorMayReturnNull())
+ return SetErrnoOnNull(nullptr);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportCallocOverflow(n, size, &stack);
+ }
+ void *p = user_alloc_internal(thr, pc, n * size);
+ if (p)
+ internal_memset(p, 0, n * size);
+ return SetErrnoOnNull(p);
+}
+
+void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr size, uptr n) {
+ if (UNLIKELY(CheckForCallocOverflow(size, n))) {
+ if (AllocatorMayReturnNull())
+ return SetErrnoOnNull(nullptr);
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportReallocArrayOverflow(size, n, &stack);
+ }
+ return user_realloc(thr, pc, p, size * n);
+}
+
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write) {
+ DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
+ ctx->metamap.AllocBlock(thr, pc, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
+ else
+ MemoryResetRange(thr, pc, (uptr)p, sz);
+}
+
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write) {
+ CHECK_NE(p, (void*)0);
+ uptr sz = ctx->metamap.FreeBlock(thr->proc(), p);
+ DPrintf("#%d: free(%p, %zu)\n", thr->tid, p, sz);
+ if (write && thr->ignore_reads_and_writes == 0)
+ MemoryRangeFreed(thr, pc, (uptr)p, sz);
+}
+
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz) {
+ // FIXME: Handle "shrinking" more efficiently,
+ // it seems that some software actually does this.
+ if (!p)
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz));
+ if (!sz) {
+ user_free(thr, pc, p);
+ return nullptr;
+ }
+ void *new_p = user_alloc_internal(thr, pc, sz);
+ if (new_p) {
+ uptr old_sz = user_alloc_usable_size(p);
+ internal_memcpy(new_p, p, min(old_sz, sz));
+ user_free(thr, pc, p);
+ }
+ return SetErrnoOnNull(new_p);
+}
+
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!IsPowerOfTwo(align))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAllocationAlignment(align, &stack);
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(align))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidPosixMemalignAlignment(align, &stack);
+ }
+ void *ptr = user_alloc_internal(thr, pc, sz, align);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by user_alloc_internal.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, align));
+ *memptr = ptr;
+ return 0;
+}
+
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(align, sz))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportInvalidAlignedAllocAlignment(sz, align, &stack);
+ }
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, align));
+}
+
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz) {
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, GetPageSizeCached()));
+}
+
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(sz, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ GET_STACK_TRACE_FATAL(thr, pc);
+ ReportPvallocOverflow(sz, &stack);
+ }
+ // pvalloc(0) should allocate one page.
+ sz = sz ? RoundUpTo(sz, PageSize) : PageSize;
+ return SetErrnoOnNull(user_alloc_internal(thr, pc, sz, PageSize));
+}
+
+uptr user_alloc_usable_size(const void *p) {
+ if (p == 0)
+ return 0;
+ MBlock *b = ctx->metamap.GetBlock((uptr)p);
+ if (!b)
+ return 0; // Not a valid pointer.
+ if (b->siz == 0)
+ return 1; // Zero-sized allocations are actually 1 byte.
+ return b->siz;
+}
+
+void invoke_malloc_hook(void *ptr, uptr size) {
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
+ return;
+ __sanitizer_malloc_hook(ptr, size);
+ RunMallocHooks(ptr, size);
+}
+
+void invoke_free_hook(void *ptr) {
+ ThreadState *thr = cur_thread();
+ if (ctx == 0 || !ctx->initialized || thr->ignore_interceptors)
+ return;
+ __sanitizer_free_hook(ptr);
+ RunFreeHooks(ptr);
+}
+
+void *internal_alloc(MBlockType typ, uptr sz) {
+ ThreadState *thr = cur_thread();
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ return InternalAlloc(sz, &thr->proc()->internal_alloc_cache);
+}
+
+void internal_free(void *p) {
+ ThreadState *thr = cur_thread();
+ if (thr->nomalloc) {
+ thr->nomalloc = 0; // CHECK calls internal_malloc().
+ CHECK(0);
+ }
+ InternalFree(p, &thr->proc()->internal_alloc_cache);
+}
+
+} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator()->GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_unmapped_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
+ return size;
+}
+
+int __sanitizer_get_ownership(const void *p) {
+ return allocator()->GetBlockBegin(p) != 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return user_alloc_usable_size(p);
+}
+
+void __tsan_on_thread_idle() {
+ ThreadState *thr = cur_thread();
+ thr->clock.ResetCached(&thr->proc()->clock_cache);
+ thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
+ allocator()->SwallowCache(&thr->proc()->alloc_cache);
+ internal_allocator()->SwallowCache(&thr->proc()->internal_alloc_cache);
+ ctx->metamap.OnProcIdle(thr->proc());
+}
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mman.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mman.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mman.h (revision 351984)
@@ -0,0 +1,90 @@
+//===-- tsan_mman.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MMAN_H
+#define TSAN_MMAN_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+const uptr kDefaultAlignment = 16;
+
+void InitializeAllocator();
+void InitializeAllocatorLate();
+void ReplaceSystemMalloc();
+void AllocatorProcStart(Processor *proc);
+void AllocatorProcFinish(Processor *proc);
+void AllocatorPrintStats();
+
+// For user allocations.
+void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,
+ uptr align = kDefaultAlignment, bool signal = true);
+// Does not accept NULL.
+void user_free(ThreadState *thr, uptr pc, void *p, bool signal = true);
+// Interceptor implementations.
+void *user_alloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_calloc(ThreadState *thr, uptr pc, uptr sz, uptr n);
+void *user_realloc(ThreadState *thr, uptr pc, void *p, uptr sz);
+void *user_reallocarray(ThreadState *thr, uptr pc, void *p, uptr sz, uptr n);
+void *user_memalign(ThreadState *thr, uptr pc, uptr align, uptr sz);
+int user_posix_memalign(ThreadState *thr, uptr pc, void **memptr, uptr align,
+ uptr sz);
+void *user_aligned_alloc(ThreadState *thr, uptr pc, uptr align, uptr sz);
+void *user_valloc(ThreadState *thr, uptr pc, uptr sz);
+void *user_pvalloc(ThreadState *thr, uptr pc, uptr sz);
+uptr user_alloc_usable_size(const void *p);
+
+// Invoking malloc/free hooks that may be installed by the user.
+void invoke_malloc_hook(void *ptr, uptr size);
+void invoke_free_hook(void *ptr);
+
+enum MBlockType {
+ MBlockScopedBuf,
+ MBlockString,
+ MBlockStackTrace,
+ MBlockShadowStack,
+ MBlockSync,
+ MBlockClock,
+ MBlockThreadContex,
+ MBlockDeadInfo,
+ MBlockRacyStacks,
+ MBlockRacyAddresses,
+ MBlockAtExit,
+ MBlockFlag,
+ MBlockReport,
+ MBlockReportMop,
+ MBlockReportThread,
+ MBlockReportMutex,
+ MBlockReportLoc,
+ MBlockReportStack,
+ MBlockSuppression,
+ MBlockExpectRace,
+ MBlockSignal,
+ MBlockJmpBuf,
+
+ // This must be the last.
+ MBlockTypeCount
+};
+
+// For internal data structures.
+void *internal_alloc(MBlockType typ, uptr sz);
+void internal_free(void *p);
+
+template<typename T>
+void DestroyAndFree(T *&p) {
+ p->~T();
+ internal_free(p);
+ p = 0;
+}
+
+} // namespace __tsan
+#endif // TSAN_MMAN_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutex.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutex.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutex.cc (revision 351984)
@@ -0,0 +1,289 @@
+//===-- tsan_mutex.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_libc.h"
+#include "tsan_mutex.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+// Simple reader-writer spin-mutex. Optimized for not-so-contended case.
+// Readers have preference, can possibly starvate writers.
+
+// The table fixes what mutexes can be locked under what mutexes.
+// E.g. if the row for MutexTypeThreads contains MutexTypeReport,
+// then Report mutex can be locked while under Threads mutex.
+// The leaf mutexes can be locked under any other mutexes.
+// Recursive locking is not supported.
+#if SANITIZER_DEBUG && !SANITIZER_GO
+const MutexType MutexTypeLeaf = (MutexType)-1;
+static MutexType CanLockTab[MutexTypeCount][MutexTypeCount] = {
+ /*0 MutexTypeInvalid*/ {},
+ /*1 MutexTypeTrace*/ {MutexTypeLeaf},
+ /*2 MutexTypeThreads*/ {MutexTypeReport},
+ /*3 MutexTypeReport*/ {MutexTypeSyncVar,
+ MutexTypeMBlock, MutexTypeJavaMBlock},
+ /*4 MutexTypeSyncVar*/ {MutexTypeDDetector},
+ /*5 MutexTypeSyncTab*/ {}, // unused
+ /*6 MutexTypeSlab*/ {MutexTypeLeaf},
+ /*7 MutexTypeAnnotations*/ {},
+ /*8 MutexTypeAtExit*/ {MutexTypeSyncVar},
+ /*9 MutexTypeMBlock*/ {MutexTypeSyncVar},
+ /*10 MutexTypeJavaMBlock*/ {MutexTypeSyncVar},
+ /*11 MutexTypeDDetector*/ {},
+ /*12 MutexTypeFired*/ {MutexTypeLeaf},
+ /*13 MutexTypeRacy*/ {MutexTypeLeaf},
+ /*14 MutexTypeGlobalProc*/ {},
+};
+
+static bool CanLockAdj[MutexTypeCount][MutexTypeCount];
+#endif
+
+void InitializeMutex() {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ // Build the "can lock" adjacency matrix.
+ // If [i][j]==true, then one can lock mutex j while under mutex i.
+ const int N = MutexTypeCount;
+ int cnt[N] = {};
+ bool leaf[N] = {};
+ for (int i = 1; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ MutexType z = CanLockTab[i][j];
+ if (z == MutexTypeInvalid)
+ continue;
+ if (z == MutexTypeLeaf) {
+ CHECK(!leaf[i]);
+ leaf[i] = true;
+ continue;
+ }
+ CHECK(!CanLockAdj[i][(int)z]);
+ CanLockAdj[i][(int)z] = true;
+ cnt[i]++;
+ }
+ }
+ for (int i = 0; i < N; i++) {
+ CHECK(!leaf[i] || cnt[i] == 0);
+ }
+ // Add leaf mutexes.
+ for (int i = 0; i < N; i++) {
+ if (!leaf[i])
+ continue;
+ for (int j = 0; j < N; j++) {
+ if (i == j || leaf[j] || j == MutexTypeInvalid)
+ continue;
+ CHECK(!CanLockAdj[j][i]);
+ CanLockAdj[j][i] = true;
+ }
+ }
+ // Build the transitive closure.
+ bool CanLockAdj2[MutexTypeCount][MutexTypeCount];
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ CanLockAdj2[i][j] = CanLockAdj[i][j];
+ }
+ }
+ for (int k = 0; k < N; k++) {
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ if (CanLockAdj2[i][k] && CanLockAdj2[k][j]) {
+ CanLockAdj2[i][j] = true;
+ }
+ }
+ }
+ }
+#if 0
+ Printf("Can lock graph:\n");
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ Printf("%d ", CanLockAdj[i][j]);
+ }
+ Printf("\n");
+ }
+ Printf("Can lock graph closure:\n");
+ for (int i = 0; i < N; i++) {
+ for (int j = 0; j < N; j++) {
+ Printf("%d ", CanLockAdj2[i][j]);
+ }
+ Printf("\n");
+ }
+#endif
+ // Verify that the graph is acyclic.
+ for (int i = 0; i < N; i++) {
+ if (CanLockAdj2[i][i]) {
+ Printf("Mutex %d participates in a cycle\n", i);
+ Die();
+ }
+ }
+#endif
+}
+
+InternalDeadlockDetector::InternalDeadlockDetector() {
+ // Rely on zero initialization because some mutexes can be locked before ctor.
+}
+
+#if SANITIZER_DEBUG && !SANITIZER_GO
+void InternalDeadlockDetector::Lock(MutexType t) {
+ // Printf("LOCK %d @%zu\n", t, seq_ + 1);
+ CHECK_GT(t, MutexTypeInvalid);
+ CHECK_LT(t, MutexTypeCount);
+ u64 max_seq = 0;
+ u64 max_idx = MutexTypeInvalid;
+ for (int i = 0; i != MutexTypeCount; i++) {
+ if (locked_[i] == 0)
+ continue;
+ CHECK_NE(locked_[i], max_seq);
+ if (max_seq < locked_[i]) {
+ max_seq = locked_[i];
+ max_idx = i;
+ }
+ }
+ locked_[t] = ++seq_;
+ if (max_idx == MutexTypeInvalid)
+ return;
+ // Printf(" last %d @%zu\n", max_idx, max_seq);
+ if (!CanLockAdj[max_idx][t]) {
+ Printf("ThreadSanitizer: internal deadlock detected\n");
+ Printf("ThreadSanitizer: can't lock %d while under %zu\n",
+ t, (uptr)max_idx);
+ CHECK(0);
+ }
+}
+
+void InternalDeadlockDetector::Unlock(MutexType t) {
+ // Printf("UNLO %d @%zu #%zu\n", t, seq_, locked_[t]);
+ CHECK(locked_[t]);
+ locked_[t] = 0;
+}
+
+void InternalDeadlockDetector::CheckNoLocks() {
+ for (int i = 0; i != MutexTypeCount; i++) {
+ CHECK_EQ(locked_[i], 0);
+ }
+}
+#endif
+
+void CheckNoLocks(ThreadState *thr) {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ thr->internal_deadlock_detector.CheckNoLocks();
+#endif
+}
+
+const uptr kUnlocked = 0;
+const uptr kWriteLock = 1;
+const uptr kReadLock = 2;
+
+class Backoff {
+ public:
+ Backoff()
+ : iter_() {
+ }
+
+ bool Do() {
+ if (iter_++ < kActiveSpinIters)
+ proc_yield(kActiveSpinCnt);
+ else
+ internal_sched_yield();
+ return true;
+ }
+
+ u64 Contention() const {
+ u64 active = iter_ % kActiveSpinIters;
+ u64 passive = iter_ - active;
+ return active + 10 * passive;
+ }
+
+ private:
+ int iter_;
+ static const int kActiveSpinIters = 10;
+ static const int kActiveSpinCnt = 20;
+};
+
+Mutex::Mutex(MutexType type, StatType stat_type) {
+ CHECK_GT(type, MutexTypeInvalid);
+ CHECK_LT(type, MutexTypeCount);
+#if SANITIZER_DEBUG
+ type_ = type;
+#endif
+#if TSAN_COLLECT_STATS
+ stat_type_ = stat_type;
+#endif
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);
+}
+
+Mutex::~Mutex() {
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
+}
+
+void Mutex::Lock() {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Lock(type_);
+#endif
+ uptr cmp = kUnlocked;
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
+ memory_order_acquire))
+ return;
+ for (Backoff backoff; backoff.Do();) {
+ if (atomic_load(&state_, memory_order_relaxed) == kUnlocked) {
+ cmp = kUnlocked;
+ if (atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
+ memory_order_acquire)) {
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
+ StatInc(cur_thread(), stat_type_, backoff.Contention());
+#endif
+ return;
+ }
+ }
+ }
+}
+
+void Mutex::Unlock() {
+ uptr prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
+ (void)prev;
+ DCHECK_NE(prev & kWriteLock, 0);
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
+#endif
+}
+
+void Mutex::ReadLock() {
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Lock(type_);
+#endif
+ uptr prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
+ if ((prev & kWriteLock) == 0)
+ return;
+ for (Backoff backoff; backoff.Do();) {
+ prev = atomic_load(&state_, memory_order_acquire);
+ if ((prev & kWriteLock) == 0) {
+#if TSAN_COLLECT_STATS && !SANITIZER_GO
+ StatInc(cur_thread(), stat_type_, backoff.Contention());
+#endif
+ return;
+ }
+ }
+}
+
+void Mutex::ReadUnlock() {
+ uptr prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
+ (void)prev;
+ DCHECK_EQ(prev & kWriteLock, 0);
+ DCHECK_GT(prev & ~kWriteLock, 0);
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ cur_thread()->internal_deadlock_detector.Unlock(type_);
+#endif
+}
+
+void Mutex::CheckLocked() {
+ CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
+}
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutex.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutex.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutex.h (revision 351984)
@@ -0,0 +1,90 @@
+//===-- tsan_mutex.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MUTEX_H
+#define TSAN_MUTEX_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+enum MutexType {
+ MutexTypeInvalid,
+ MutexTypeTrace,
+ MutexTypeThreads,
+ MutexTypeReport,
+ MutexTypeSyncVar,
+ MutexTypeSyncTab,
+ MutexTypeSlab,
+ MutexTypeAnnotations,
+ MutexTypeAtExit,
+ MutexTypeMBlock,
+ MutexTypeJavaMBlock,
+ MutexTypeDDetector,
+ MutexTypeFired,
+ MutexTypeRacy,
+ MutexTypeGlobalProc,
+
+ // This must be the last.
+ MutexTypeCount
+};
+
+class Mutex {
+ public:
+ explicit Mutex(MutexType type, StatType stat_type);
+ ~Mutex();
+
+ void Lock();
+ void Unlock();
+
+ void ReadLock();
+ void ReadUnlock();
+
+ void CheckLocked();
+
+ private:
+ atomic_uintptr_t state_;
+#if SANITIZER_DEBUG
+ MutexType type_;
+#endif
+#if TSAN_COLLECT_STATS
+ StatType stat_type_;
+#endif
+
+ Mutex(const Mutex&);
+ void operator = (const Mutex&);
+};
+
+typedef GenericScopedLock<Mutex> Lock;
+typedef GenericScopedReadLock<Mutex> ReadLock;
+
+class InternalDeadlockDetector {
+ public:
+ InternalDeadlockDetector();
+ void Lock(MutexType t);
+ void Unlock(MutexType t);
+ void CheckNoLocks();
+ private:
+ u64 seq_;
+ u64 locked_[MutexTypeCount];
+};
+
+void InitializeMutex();
+
+// Checks that the current thread does not hold any runtime locks
+// (e.g. when returning from an interceptor).
+void CheckNoLocks(ThreadState *thr);
+
+} // namespace __tsan
+
+#endif // TSAN_MUTEX_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutexset.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutexset.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutexset.cc (revision 351984)
@@ -0,0 +1,88 @@
+//===-- tsan_mutexset.cc --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_mutexset.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+const uptr MutexSet::kMaxSize;
+
+MutexSet::MutexSet() {
+ size_ = 0;
+ internal_memset(&descs_, 0, sizeof(descs_));
+}
+
+void MutexSet::Add(u64 id, bool write, u64 epoch) {
+ // Look up existing mutex with the same id.
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ descs_[i].count++;
+ descs_[i].epoch = epoch;
+ return;
+ }
+ }
+ // On overflow, find the oldest mutex and drop it.
+ if (size_ == kMaxSize) {
+ u64 minepoch = (u64)-1;
+ u64 mini = (u64)-1;
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].epoch < minepoch) {
+ minepoch = descs_[i].epoch;
+ mini = i;
+ }
+ }
+ RemovePos(mini);
+ CHECK_EQ(size_, kMaxSize - 1);
+ }
+ // Add new mutex descriptor.
+ descs_[size_].id = id;
+ descs_[size_].write = write;
+ descs_[size_].epoch = epoch;
+ descs_[size_].count = 1;
+ size_++;
+}
+
+void MutexSet::Del(u64 id, bool write) {
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ if (--descs_[i].count == 0)
+ RemovePos(i);
+ return;
+ }
+ }
+}
+
+void MutexSet::Remove(u64 id) {
+ for (uptr i = 0; i < size_; i++) {
+ if (descs_[i].id == id) {
+ RemovePos(i);
+ return;
+ }
+ }
+}
+
+void MutexSet::RemovePos(uptr i) {
+ CHECK_LT(i, size_);
+ descs_[i] = descs_[size_ - 1];
+ size_--;
+}
+
+uptr MutexSet::Size() const {
+ return size_;
+}
+
+MutexSet::Desc MutexSet::Get(uptr i) const {
+ CHECK_LT(i, size_);
+ return descs_[i];
+}
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutexset.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutexset.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_mutexset.h (revision 351984)
@@ -0,0 +1,69 @@
+//===-- tsan_mutexset.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// MutexSet holds the set of mutexes currently held by a thread.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_MUTEXSET_H
+#define TSAN_MUTEXSET_H
+
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+class MutexSet {
+ public:
+ // Holds limited number of mutexes.
+ // The oldest mutexes are discarded on overflow.
+ static const uptr kMaxSize = 16;
+ struct Desc {
+ u64 id;
+ u64 epoch;
+ int count;
+ bool write;
+ };
+
+ MutexSet();
+ // The 'id' is obtained from SyncVar::GetId().
+ void Add(u64 id, bool write, u64 epoch);
+ void Del(u64 id, bool write);
+ void Remove(u64 id); // Removes the mutex completely (if it's destroyed).
+ uptr Size() const;
+ Desc Get(uptr i) const;
+
+ void operator=(const MutexSet &other) {
+ internal_memcpy(this, &other, sizeof(*this));
+ }
+
+ private:
+#if !SANITIZER_GO
+ uptr size_;
+ Desc descs_[kMaxSize];
+#endif
+
+ void RemovePos(uptr i);
+ MutexSet(const MutexSet&);
+};
+
+// Go does not have mutexes, so do not spend memory and time.
+// (Go sync.Mutex is actually a semaphore -- can be unlocked
+// in different goroutine).
+#if SANITIZER_GO
+MutexSet::MutexSet() {}
+void MutexSet::Add(u64 id, bool write, u64 epoch) {}
+void MutexSet::Del(u64 id, bool write) {}
+void MutexSet::Remove(u64 id) {}
+void MutexSet::RemovePos(uptr i) {}
+uptr MutexSet::Size() const { return 0; }
+MutexSet::Desc MutexSet::Get(uptr i) const { return Desc(); }
+#endif
+
+} // namespace __tsan
+
+#endif // TSAN_MUTEXSET_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_new_delete.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_new_delete.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_new_delete.cc (revision 351984)
@@ -0,0 +1,199 @@
+//===-- tsan_new_delete.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "tsan_interceptors.h"
+#include "tsan_rtl.h"
+
+using namespace __tsan; // NOLINT
+
+namespace std {
+struct nothrow_t {};
+enum class align_val_t: __sanitizer::uptr {};
+} // namespace std
+
+DECLARE_REAL(void *, malloc, uptr size)
+DECLARE_REAL(void, free, void *ptr)
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(mangled_name, nothrow) \
+ if (in_symbolizer()) \
+ return InternalAlloc(size); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
+ p = user_alloc(thr, pc, size); \
+ if (!nothrow && UNLIKELY(!p)) { \
+ GET_STACK_TRACE_FATAL(thr, pc); \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ } \
+ invoke_malloc_hook(p, size); \
+ return p;
+
+#define OPERATOR_NEW_BODY_ALIGN(mangled_name, nothrow) \
+ if (in_symbolizer()) \
+ return InternalAlloc(size, nullptr, (uptr)align); \
+ void *p = 0; \
+ { \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, size); \
+ p = user_memalign(thr, pc, (uptr)align, size); \
+ if (!nothrow && UNLIKELY(!p)) { \
+ GET_STACK_TRACE_FATAL(thr, pc); \
+ ReportOutOfMemory(size, &stack); \
+ } \
+ } \
+ invoke_malloc_hook(p, size); \
+ return p;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size);
+void *operator new(__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znwm, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size);
+void *operator new[](__sanitizer::uptr size) {
+ OPERATOR_NEW_BODY(_Znam, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new(__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnwmRKSt9nothrow_t, true /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&);
+void *operator new[](__sanitizer::uptr size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(_ZnamRKSt9nothrow_t, true /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::align_val_t align);
+void *operator new(__sanitizer::uptr size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnwmSt11align_val_t, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::align_val_t align);
+void *operator new[](__sanitizer::uptr size, std::align_val_t align) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnamSt11align_val_t, false /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new(__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&);
+void *operator new(__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnwmSt11align_val_tRKSt9nothrow_t,
+ true /*nothrow*/);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *operator new[](__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&);
+void *operator new[](__sanitizer::uptr size, std::align_val_t align,
+ std::nothrow_t const&) {
+ OPERATOR_NEW_BODY_ALIGN(_ZnamSt11align_val_tRKSt9nothrow_t,
+ true /*nothrow*/);
+}
+
+#define OPERATOR_DELETE_BODY(mangled_name) \
+ if (ptr == 0) return; \
+ if (in_symbolizer()) \
+ return InternalFree(ptr); \
+ invoke_free_hook(ptr); \
+ SCOPED_INTERCEPTOR_RAW(mangled_name, ptr); \
+ user_free(thr, pc, ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT;
+void operator delete(void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT;
+void operator delete[](void *ptr) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPv);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&);
+void operator delete(void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdlPvRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&);
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdaPvRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, __sanitizer::uptr size) NOEXCEPT;
+void operator delete(void *ptr, __sanitizer::uptr size) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPvm);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, __sanitizer::uptr size) NOEXCEPT;
+void operator delete[](void *ptr, __sanitizer::uptr size) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPvm);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT;
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPvSt11align_val_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT;
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPvSt11align_val_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&);
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdlPvSt11align_val_tRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const&);
+void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY(_ZdaPvSt11align_val_tRKSt9nothrow_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete(void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT;
+void operator delete(void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdlPvmSt11align_val_t);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void operator delete[](void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT;
+void operator delete[](void *ptr, __sanitizer::uptr size,
+ std::align_val_t align) NOEXCEPT {
+ OPERATOR_DELETE_BODY(_ZdaPvmSt11align_val_t);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_new_delete.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform.h (revision 351984)
@@ -0,0 +1,1025 @@
+//===-- tsan_platform.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Platform-specific code.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_PLATFORM_H
+#define TSAN_PLATFORM_H
+
+#if !defined(__LP64__) && !defined(_WIN64)
+# error "Only 64-bit is supported"
+#endif
+
+#include "tsan_defs.h"
+#include "tsan_trace.h"
+
+namespace __tsan {
+
+#if !SANITIZER_GO
+
+#if defined(__x86_64__)
+/*
+C/C++ on linux/x86_64 and freebsd/x86_64
+0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB)
+0040 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 2000 0000 0000: shadow
+2000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 5500 0000 0000: -
+5500 0000 0000 - 5680 0000 0000: pie binaries without ASLR or on 4.1+ kernels
+5680 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 7d00 0000 0000: -
+7b00 0000 0000 - 7c00 0000 0000: heap
+7c00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+
+C/C++ on netbsd/amd64 can reuse the same mapping:
+ * The address space starts from 0x1000 (option with 0x0) and ends with
+ 0x7f7ffffff000.
+ * LoAppMem-kHeapMemEnd can be reused as it is.
+ * No VDSO support.
+ * No MidAppMem region.
+ * No additional HeapMem region.
+ * HiAppMem contains the stack, loader, shared libraries and heap.
+ * Stack on NetBSD/amd64 has prereserved 128MB.
+ * Heap grows downwards (top-down).
+ * ASLR must be disabled per-process or globally.
+
+*/
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x200000000000ull;
+ static const uptr kHeapMemBeg = 0x7b0000000000ull;
+ static const uptr kHeapMemEnd = 0x7c0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x008000000000ull;
+ static const uptr kMidAppMemBeg = 0x550000000000ull;
+ static const uptr kMidAppMemEnd = 0x568000000000ull;
+ static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull;
+ static const uptr kAppMemMsk = 0x780000000000ull;
+ static const uptr kAppMemXor = 0x040000000000ull;
+ static const uptr kVdsoBeg = 0xf000000000000000ull;
+};
+
+#define TSAN_MID_APP_RANGE 1
+#elif defined(__mips64)
+/*
+C/C++ on linux/mips64 (40-bit VMA)
+0000 0000 00 - 0100 0000 00: - (4 GB)
+0100 0000 00 - 0200 0000 00: main binary (4 GB)
+0200 0000 00 - 2000 0000 00: - (120 GB)
+2000 0000 00 - 4000 0000 00: shadow (128 GB)
+4000 0000 00 - 5000 0000 00: metainfo (memory blocks and sync objects) (64 GB)
+5000 0000 00 - aa00 0000 00: - (360 GB)
+aa00 0000 00 - ab00 0000 00: main binary (PIE) (4 GB)
+ab00 0000 00 - b000 0000 00: - (20 GB)
+b000 0000 00 - b200 0000 00: traces (8 GB)
+b200 0000 00 - fe00 0000 00: - (304 GB)
+fe00 0000 00 - ff00 0000 00: heap (4 GB)
+ff00 0000 00 - ff80 0000 00: - (2 GB)
+ff80 0000 00 - ffff ffff ff: modules and main thread stack (<2 GB)
+*/
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x4000000000ull;
+ static const uptr kMetaShadowEnd = 0x5000000000ull;
+ static const uptr kTraceMemBeg = 0xb000000000ull;
+ static const uptr kTraceMemEnd = 0xb200000000ull;
+ static const uptr kShadowBeg = 0x2000000000ull;
+ static const uptr kShadowEnd = 0x4000000000ull;
+ static const uptr kHeapMemBeg = 0xfe00000000ull;
+ static const uptr kHeapMemEnd = 0xff00000000ull;
+ static const uptr kLoAppMemBeg = 0x0100000000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kMidAppMemBeg = 0xaa00000000ull;
+ static const uptr kMidAppMemEnd = 0xab00000000ull;
+ static const uptr kHiAppMemBeg = 0xff80000000ull;
+ static const uptr kHiAppMemEnd = 0xffffffffffull;
+ static const uptr kAppMemMsk = 0xf800000000ull;
+ static const uptr kAppMemXor = 0x0800000000ull;
+ static const uptr kVdsoBeg = 0xfffff00000ull;
+};
+
+#define TSAN_MID_APP_RANGE 1
+#elif defined(__aarch64__) && defined(__APPLE__)
+/*
+C/C++ on Darwin/iOS/ARM64 (36-bit VMA, 64 GB VM)
+0000 0000 00 - 0100 0000 00: - (4 GB)
+0100 0000 00 - 0200 0000 00: main binary, modules, thread stacks (4 GB)
+0200 0000 00 - 0300 0000 00: heap (4 GB)
+0300 0000 00 - 0400 0000 00: - (4 GB)
+0400 0000 00 - 0c00 0000 00: shadow memory (32 GB)
+0c00 0000 00 - 0d00 0000 00: - (4 GB)
+0d00 0000 00 - 0e00 0000 00: metainfo (4 GB)
+0e00 0000 00 - 0f00 0000 00: - (4 GB)
+0f00 0000 00 - 0fc0 0000 00: traces (3 GB)
+0fc0 0000 00 - 1000 0000 00: -
+*/
+struct Mapping {
+ static const uptr kLoAppMemBeg = 0x0100000000ull;
+ static const uptr kLoAppMemEnd = 0x0200000000ull;
+ static const uptr kHeapMemBeg = 0x0200000000ull;
+ static const uptr kHeapMemEnd = 0x0300000000ull;
+ static const uptr kShadowBeg = 0x0400000000ull;
+ static const uptr kShadowEnd = 0x0c00000000ull;
+ static const uptr kMetaShadowBeg = 0x0d00000000ull;
+ static const uptr kMetaShadowEnd = 0x0e00000000ull;
+ static const uptr kTraceMemBeg = 0x0f00000000ull;
+ static const uptr kTraceMemEnd = 0x0fc0000000ull;
+ static const uptr kHiAppMemBeg = 0x0fc0000000ull;
+ static const uptr kHiAppMemEnd = 0x0fc0000000ull;
+ static const uptr kAppMemMsk = 0x0ull;
+ static const uptr kAppMemXor = 0x0ull;
+ static const uptr kVdsoBeg = 0x7000000000000000ull;
+};
+
+#elif defined(__aarch64__)
+// AArch64 supports multiple VMA which leads to multiple address transformation
+// functions. To support these multiple VMAS transformations and mappings TSAN
+// runtime for AArch64 uses an external memory read (vmaSize) to select which
+// mapping to use. Although slower, it make a same instrumented binary run on
+// multiple kernels.
+
+/*
+C/C++ on linux/aarch64 (39-bit VMA)
+0000 0010 00 - 0100 0000 00: main binary
+0100 0000 00 - 0800 0000 00: -
+0800 0000 00 - 2000 0000 00: shadow memory
+2000 0000 00 - 3100 0000 00: -
+3100 0000 00 - 3400 0000 00: metainfo
+3400 0000 00 - 5500 0000 00: -
+5500 0000 00 - 5600 0000 00: main binary (PIE)
+5600 0000 00 - 6000 0000 00: -
+6000 0000 00 - 6200 0000 00: traces
+6200 0000 00 - 7d00 0000 00: -
+7c00 0000 00 - 7d00 0000 00: heap
+7d00 0000 00 - 7fff ffff ff: modules and main thread stack
+*/
+struct Mapping39 {
+ static const uptr kLoAppMemBeg = 0x0000001000ull;
+ static const uptr kLoAppMemEnd = 0x0100000000ull;
+ static const uptr kShadowBeg = 0x0800000000ull;
+ static const uptr kShadowEnd = 0x2000000000ull;
+ static const uptr kMetaShadowBeg = 0x3100000000ull;
+ static const uptr kMetaShadowEnd = 0x3400000000ull;
+ static const uptr kMidAppMemBeg = 0x5500000000ull;
+ static const uptr kMidAppMemEnd = 0x5600000000ull;
+ static const uptr kTraceMemBeg = 0x6000000000ull;
+ static const uptr kTraceMemEnd = 0x6200000000ull;
+ static const uptr kHeapMemBeg = 0x7c00000000ull;
+ static const uptr kHeapMemEnd = 0x7d00000000ull;
+ static const uptr kHiAppMemBeg = 0x7e00000000ull;
+ static const uptr kHiAppMemEnd = 0x7fffffffffull;
+ static const uptr kAppMemMsk = 0x7800000000ull;
+ static const uptr kAppMemXor = 0x0200000000ull;
+ static const uptr kVdsoBeg = 0x7f00000000ull;
+};
+
+/*
+C/C++ on linux/aarch64 (42-bit VMA)
+00000 0010 00 - 01000 0000 00: main binary
+01000 0000 00 - 10000 0000 00: -
+10000 0000 00 - 20000 0000 00: shadow memory
+20000 0000 00 - 26000 0000 00: -
+26000 0000 00 - 28000 0000 00: metainfo
+28000 0000 00 - 2aa00 0000 00: -
+2aa00 0000 00 - 2ab00 0000 00: main binary (PIE)
+2ab00 0000 00 - 36200 0000 00: -
+36200 0000 00 - 36240 0000 00: traces
+36240 0000 00 - 3e000 0000 00: -
+3e000 0000 00 - 3f000 0000 00: heap
+3f000 0000 00 - 3ffff ffff ff: modules and main thread stack
+*/
+struct Mapping42 {
+ static const uptr kLoAppMemBeg = 0x00000001000ull;
+ static const uptr kLoAppMemEnd = 0x01000000000ull;
+ static const uptr kShadowBeg = 0x10000000000ull;
+ static const uptr kShadowEnd = 0x20000000000ull;
+ static const uptr kMetaShadowBeg = 0x26000000000ull;
+ static const uptr kMetaShadowEnd = 0x28000000000ull;
+ static const uptr kMidAppMemBeg = 0x2aa00000000ull;
+ static const uptr kMidAppMemEnd = 0x2ab00000000ull;
+ static const uptr kTraceMemBeg = 0x36200000000ull;
+ static const uptr kTraceMemEnd = 0x36400000000ull;
+ static const uptr kHeapMemBeg = 0x3e000000000ull;
+ static const uptr kHeapMemEnd = 0x3f000000000ull;
+ static const uptr kHiAppMemBeg = 0x3f000000000ull;
+ static const uptr kHiAppMemEnd = 0x3ffffffffffull;
+ static const uptr kAppMemMsk = 0x3c000000000ull;
+ static const uptr kAppMemXor = 0x04000000000ull;
+ static const uptr kVdsoBeg = 0x37f00000000ull;
+};
+
+struct Mapping48 {
+ static const uptr kLoAppMemBeg = 0x0000000001000ull;
+ static const uptr kLoAppMemEnd = 0x0000200000000ull;
+ static const uptr kShadowBeg = 0x0002000000000ull;
+ static const uptr kShadowEnd = 0x0004000000000ull;
+ static const uptr kMetaShadowBeg = 0x0005000000000ull;
+ static const uptr kMetaShadowEnd = 0x0006000000000ull;
+ static const uptr kMidAppMemBeg = 0x0aaaa00000000ull;
+ static const uptr kMidAppMemEnd = 0x0aaaf00000000ull;
+ static const uptr kTraceMemBeg = 0x0f06000000000ull;
+ static const uptr kTraceMemEnd = 0x0f06200000000ull;
+ static const uptr kHeapMemBeg = 0x0ffff00000000ull;
+ static const uptr kHeapMemEnd = 0x0ffff00000000ull;
+ static const uptr kHiAppMemBeg = 0x0ffff00000000ull;
+ static const uptr kHiAppMemEnd = 0x1000000000000ull;
+ static const uptr kAppMemMsk = 0x0fff800000000ull;
+ static const uptr kAppMemXor = 0x0000800000000ull;
+ static const uptr kVdsoBeg = 0xffff000000000ull;
+};
+
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
+// Indicates that mapping defines a mid range memory segment.
+#define TSAN_MID_APP_RANGE 1
+#elif defined(__powerpc64__)
+// PPC64 supports multiple VMA which leads to multiple address transformation
+// functions. To support these multiple VMAS transformations and mappings TSAN
+// runtime for PPC64 uses an external memory read (vmaSize) to select which
+// mapping to use. Although slower, it make a same instrumented binary run on
+// multiple kernels.
+
+/*
+C/C++ on linux/powerpc64 (44-bit VMA)
+0000 0000 0100 - 0001 0000 0000: main binary
+0001 0000 0000 - 0001 0000 0000: -
+0001 0000 0000 - 0b00 0000 0000: shadow
+0b00 0000 0000 - 0b00 0000 0000: -
+0b00 0000 0000 - 0d00 0000 0000: metainfo (memory blocks and sync objects)
+0d00 0000 0000 - 0d00 0000 0000: -
+0d00 0000 0000 - 0f00 0000 0000: traces
+0f00 0000 0000 - 0f00 0000 0000: -
+0f00 0000 0000 - 0f50 0000 0000: heap
+0f50 0000 0000 - 0f60 0000 0000: -
+0f60 0000 0000 - 1000 0000 0000: modules and main thread stack
+*/
+struct Mapping44 {
+ static const uptr kMetaShadowBeg = 0x0b0000000000ull;
+ static const uptr kMetaShadowEnd = 0x0d0000000000ull;
+ static const uptr kTraceMemBeg = 0x0d0000000000ull;
+ static const uptr kTraceMemEnd = 0x0f0000000000ull;
+ static const uptr kShadowBeg = 0x000100000000ull;
+ static const uptr kShadowEnd = 0x0b0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000000100ull;
+ static const uptr kLoAppMemEnd = 0x000100000000ull;
+ static const uptr kHeapMemBeg = 0x0f0000000000ull;
+ static const uptr kHeapMemEnd = 0x0f5000000000ull;
+ static const uptr kHiAppMemBeg = 0x0f6000000000ull;
+ static const uptr kHiAppMemEnd = 0x100000000000ull; // 44 bits
+ static const uptr kAppMemMsk = 0x0f0000000000ull;
+ static const uptr kAppMemXor = 0x002100000000ull;
+ static const uptr kVdsoBeg = 0x3c0000000000000ull;
+};
+
+/*
+C/C++ on linux/powerpc64 (46-bit VMA)
+0000 0000 1000 - 0100 0000 0000: main binary
+0100 0000 0000 - 0200 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
+2000 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2200 0000 0000: traces
+2200 0000 0000 - 3d00 0000 0000: -
+3d00 0000 0000 - 3e00 0000 0000: heap
+3e00 0000 0000 - 3e80 0000 0000: -
+3e80 0000 0000 - 4000 0000 0000: modules and main thread stack
+*/
+struct Mapping46 {
+ static const uptr kMetaShadowBeg = 0x100000000000ull;
+ static const uptr kMetaShadowEnd = 0x200000000000ull;
+ static const uptr kTraceMemBeg = 0x200000000000ull;
+ static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x3d0000000000ull;
+ static const uptr kHeapMemEnd = 0x3e0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kHiAppMemBeg = 0x3e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x400000000000ull; // 46 bits
+ static const uptr kAppMemMsk = 0x3c0000000000ull;
+ static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kVdsoBeg = 0x7800000000000000ull;
+};
+
+/*
+C/C++ on linux/powerpc64 (47-bit VMA)
+0000 0000 1000 - 0100 0000 0000: main binary
+0100 0000 0000 - 0200 0000 0000: -
+0100 0000 0000 - 1000 0000 0000: shadow
+1000 0000 0000 - 1000 0000 0000: -
+1000 0000 0000 - 2000 0000 0000: metainfo (memory blocks and sync objects)
+2000 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2200 0000 0000: traces
+2200 0000 0000 - 7d00 0000 0000: -
+7d00 0000 0000 - 7e00 0000 0000: heap
+7e00 0000 0000 - 7e80 0000 0000: -
+7e80 0000 0000 - 8000 0000 0000: modules and main thread stack
+*/
+struct Mapping47 {
+ static const uptr kMetaShadowBeg = 0x100000000000ull;
+ static const uptr kMetaShadowEnd = 0x200000000000ull;
+ static const uptr kTraceMemBeg = 0x200000000000ull;
+ static const uptr kTraceMemEnd = 0x220000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x100000000000ull;
+ static const uptr kHeapMemBeg = 0x7d0000000000ull;
+ static const uptr kHeapMemEnd = 0x7e0000000000ull;
+ static const uptr kLoAppMemBeg = 0x000000001000ull;
+ static const uptr kLoAppMemEnd = 0x010000000000ull;
+ static const uptr kHiAppMemBeg = 0x7e8000000000ull;
+ static const uptr kHiAppMemEnd = 0x800000000000ull; // 47 bits
+ static const uptr kAppMemMsk = 0x7c0000000000ull;
+ static const uptr kAppMemXor = 0x020000000000ull;
+ static const uptr kVdsoBeg = 0x7800000000000000ull;
+};
+
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
+#endif
+
+#elif SANITIZER_GO && !SANITIZER_WINDOWS && defined(__x86_64__)
+
+/* Go on linux, darwin and freebsd on x86_64
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+#elif SANITIZER_GO && SANITIZER_WINDOWS
+
+/* Go on windows
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00f8 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 0100 0000 0000: -
+0100 0000 0000 - 0500 0000 0000: shadow
+0500 0000 0000 - 0560 0000 0000: -
+0560 0000 0000 - 0760 0000 0000: traces
+0760 0000 0000 - 07d0 0000 0000: metainfo (memory blocks and sync objects)
+07d0 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x076000000000ull;
+ static const uptr kMetaShadowEnd = 0x07d000000000ull;
+ static const uptr kTraceMemBeg = 0x056000000000ull;
+ static const uptr kTraceMemEnd = 0x076000000000ull;
+ static const uptr kShadowBeg = 0x010000000000ull;
+ static const uptr kShadowEnd = 0x050000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+#elif SANITIZER_GO && defined(__powerpc64__)
+
+/* Only Mapping46 and Mapping47 are currently supported for powercp64 on Go. */
+
+/* Go on linux/powerpc64 (46-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 2380 0000 0000: shadow
+2380 0000 0000 - 2400 0000 0000: -
+2400 0000 0000 - 3400 0000 0000: metainfo (memory blocks and sync objects)
+3400 0000 0000 - 3600 0000 0000: -
+3600 0000 0000 - 3800 0000 0000: traces
+3800 0000 0000 - 4000 0000 0000: -
+*/
+
+struct Mapping46 {
+ static const uptr kMetaShadowBeg = 0x240000000000ull;
+ static const uptr kMetaShadowEnd = 0x340000000000ull;
+ static const uptr kTraceMemBeg = 0x360000000000ull;
+ static const uptr kTraceMemEnd = 0x380000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x238000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+/* Go on linux/powerpc64 (47-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 3000 0000 0000: shadow
+3000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping47 {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+#elif SANITIZER_GO && defined(__aarch64__)
+
+/* Go on linux/aarch64 (48-bit VMA)
+0000 0000 1000 - 0000 1000 0000: executable
+0000 1000 0000 - 00c0 0000 0000: -
+00c0 0000 0000 - 00e0 0000 0000: heap
+00e0 0000 0000 - 2000 0000 0000: -
+2000 0000 0000 - 3000 0000 0000: shadow
+3000 0000 0000 - 3000 0000 0000: -
+3000 0000 0000 - 4000 0000 0000: metainfo (memory blocks and sync objects)
+4000 0000 0000 - 6000 0000 0000: -
+6000 0000 0000 - 6200 0000 0000: traces
+6200 0000 0000 - 8000 0000 0000: -
+*/
+
+struct Mapping {
+ static const uptr kMetaShadowBeg = 0x300000000000ull;
+ static const uptr kMetaShadowEnd = 0x400000000000ull;
+ static const uptr kTraceMemBeg = 0x600000000000ull;
+ static const uptr kTraceMemEnd = 0x620000000000ull;
+ static const uptr kShadowBeg = 0x200000000000ull;
+ static const uptr kShadowEnd = 0x300000000000ull;
+ static const uptr kAppMemBeg = 0x000000001000ull;
+ static const uptr kAppMemEnd = 0x00e000000000ull;
+};
+
+// Indicates the runtime will define the memory regions at runtime.
+#define TSAN_RUNTIME_VMA 1
+
+#else
+# error "Unknown platform"
+#endif
+
+
+#ifdef TSAN_RUNTIME_VMA
+extern uptr vmaSize;
+#endif
+
+
+enum MappingType {
+ MAPPING_LO_APP_BEG,
+ MAPPING_LO_APP_END,
+ MAPPING_HI_APP_BEG,
+ MAPPING_HI_APP_END,
+#ifdef TSAN_MID_APP_RANGE
+ MAPPING_MID_APP_BEG,
+ MAPPING_MID_APP_END,
+#endif
+ MAPPING_HEAP_BEG,
+ MAPPING_HEAP_END,
+ MAPPING_APP_BEG,
+ MAPPING_APP_END,
+ MAPPING_SHADOW_BEG,
+ MAPPING_SHADOW_END,
+ MAPPING_META_SHADOW_BEG,
+ MAPPING_META_SHADOW_END,
+ MAPPING_TRACE_BEG,
+ MAPPING_TRACE_END,
+ MAPPING_VDSO_BEG,
+};
+
+template<typename Mapping, int Type>
+uptr MappingImpl(void) {
+ switch (Type) {
+#if !SANITIZER_GO
+ case MAPPING_LO_APP_BEG: return Mapping::kLoAppMemBeg;
+ case MAPPING_LO_APP_END: return Mapping::kLoAppMemEnd;
+# ifdef TSAN_MID_APP_RANGE
+ case MAPPING_MID_APP_BEG: return Mapping::kMidAppMemBeg;
+ case MAPPING_MID_APP_END: return Mapping::kMidAppMemEnd;
+# endif
+ case MAPPING_HI_APP_BEG: return Mapping::kHiAppMemBeg;
+ case MAPPING_HI_APP_END: return Mapping::kHiAppMemEnd;
+ case MAPPING_HEAP_BEG: return Mapping::kHeapMemBeg;
+ case MAPPING_HEAP_END: return Mapping::kHeapMemEnd;
+ case MAPPING_VDSO_BEG: return Mapping::kVdsoBeg;
+#else
+ case MAPPING_APP_BEG: return Mapping::kAppMemBeg;
+ case MAPPING_APP_END: return Mapping::kAppMemEnd;
+#endif
+ case MAPPING_SHADOW_BEG: return Mapping::kShadowBeg;
+ case MAPPING_SHADOW_END: return Mapping::kShadowEnd;
+ case MAPPING_META_SHADOW_BEG: return Mapping::kMetaShadowBeg;
+ case MAPPING_META_SHADOW_END: return Mapping::kMetaShadowEnd;
+ case MAPPING_TRACE_BEG: return Mapping::kTraceMemBeg;
+ case MAPPING_TRACE_END: return Mapping::kTraceMemEnd;
+ }
+}
+
+template<int Type>
+uptr MappingArchImpl(void) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return MappingImpl<Mapping39, Type>();
+ case 42: return MappingImpl<Mapping42, Type>();
+ case 48: return MappingImpl<Mapping48, Type>();
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return MappingImpl<Mapping44, Type>();
+#endif
+ case 46: return MappingImpl<Mapping46, Type>();
+ case 47: return MappingImpl<Mapping47, Type>();
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return MappingImpl<Mapping, Type>();
+#endif
+}
+
+#if !SANITIZER_GO
+ALWAYS_INLINE
+uptr LoAppMemBeg(void) {
+ return MappingArchImpl<MAPPING_LO_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr LoAppMemEnd(void) {
+ return MappingArchImpl<MAPPING_LO_APP_END>();
+}
+
+#ifdef TSAN_MID_APP_RANGE
+ALWAYS_INLINE
+uptr MidAppMemBeg(void) {
+ return MappingArchImpl<MAPPING_MID_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr MidAppMemEnd(void) {
+ return MappingArchImpl<MAPPING_MID_APP_END>();
+}
+#endif
+
+ALWAYS_INLINE
+uptr HeapMemBeg(void) {
+ return MappingArchImpl<MAPPING_HEAP_BEG>();
+}
+ALWAYS_INLINE
+uptr HeapMemEnd(void) {
+ return MappingArchImpl<MAPPING_HEAP_END>();
+}
+
+ALWAYS_INLINE
+uptr HiAppMemBeg(void) {
+ return MappingArchImpl<MAPPING_HI_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr HiAppMemEnd(void) {
+ return MappingArchImpl<MAPPING_HI_APP_END>();
+}
+
+ALWAYS_INLINE
+uptr VdsoBeg(void) {
+ return MappingArchImpl<MAPPING_VDSO_BEG>();
+}
+
+#else
+
+ALWAYS_INLINE
+uptr AppMemBeg(void) {
+ return MappingArchImpl<MAPPING_APP_BEG>();
+}
+ALWAYS_INLINE
+uptr AppMemEnd(void) {
+ return MappingArchImpl<MAPPING_APP_END>();
+}
+
+#endif
+
+static inline
+bool GetUserRegion(int i, uptr *start, uptr *end) {
+ switch (i) {
+ default:
+ return false;
+#if !SANITIZER_GO
+ case 0:
+ *start = LoAppMemBeg();
+ *end = LoAppMemEnd();
+ return true;
+ case 1:
+ *start = HiAppMemBeg();
+ *end = HiAppMemEnd();
+ return true;
+ case 2:
+ *start = HeapMemBeg();
+ *end = HeapMemEnd();
+ return true;
+# ifdef TSAN_MID_APP_RANGE
+ case 3:
+ *start = MidAppMemBeg();
+ *end = MidAppMemEnd();
+ return true;
+# endif
+#else
+ case 0:
+ *start = AppMemBeg();
+ *end = AppMemEnd();
+ return true;
+#endif
+ }
+}
+
+ALWAYS_INLINE
+uptr ShadowBeg(void) {
+ return MappingArchImpl<MAPPING_SHADOW_BEG>();
+}
+ALWAYS_INLINE
+uptr ShadowEnd(void) {
+ return MappingArchImpl<MAPPING_SHADOW_END>();
+}
+
+ALWAYS_INLINE
+uptr MetaShadowBeg(void) {
+ return MappingArchImpl<MAPPING_META_SHADOW_BEG>();
+}
+ALWAYS_INLINE
+uptr MetaShadowEnd(void) {
+ return MappingArchImpl<MAPPING_META_SHADOW_END>();
+}
+
+ALWAYS_INLINE
+uptr TraceMemBeg(void) {
+ return MappingArchImpl<MAPPING_TRACE_BEG>();
+}
+ALWAYS_INLINE
+uptr TraceMemEnd(void) {
+ return MappingArchImpl<MAPPING_TRACE_END>();
+}
+
+
+template<typename Mapping>
+bool IsAppMemImpl(uptr mem) {
+#if !SANITIZER_GO
+ return (mem >= Mapping::kHeapMemBeg && mem < Mapping::kHeapMemEnd) ||
+# ifdef TSAN_MID_APP_RANGE
+ (mem >= Mapping::kMidAppMemBeg && mem < Mapping::kMidAppMemEnd) ||
+# endif
+ (mem >= Mapping::kLoAppMemBeg && mem < Mapping::kLoAppMemEnd) ||
+ (mem >= Mapping::kHiAppMemBeg && mem < Mapping::kHiAppMemEnd);
+#else
+ return mem >= Mapping::kAppMemBeg && mem < Mapping::kAppMemEnd;
+#endif
+}
+
+ALWAYS_INLINE
+bool IsAppMem(uptr mem) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return IsAppMemImpl<Mapping39>(mem);
+ case 42: return IsAppMemImpl<Mapping42>(mem);
+ case 48: return IsAppMemImpl<Mapping48>(mem);
+ }
+ DCHECK(0);
+ return false;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return IsAppMemImpl<Mapping44>(mem);
+#endif
+ case 46: return IsAppMemImpl<Mapping46>(mem);
+ case 47: return IsAppMemImpl<Mapping47>(mem);
+ }
+ DCHECK(0);
+ return false;
+#else
+ return IsAppMemImpl<Mapping>(mem);
+#endif
+}
+
+
+template<typename Mapping>
+bool IsShadowMemImpl(uptr mem) {
+ return mem >= Mapping::kShadowBeg && mem <= Mapping::kShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsShadowMem(uptr mem) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return IsShadowMemImpl<Mapping39>(mem);
+ case 42: return IsShadowMemImpl<Mapping42>(mem);
+ case 48: return IsShadowMemImpl<Mapping48>(mem);
+ }
+ DCHECK(0);
+ return false;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return IsShadowMemImpl<Mapping44>(mem);
+#endif
+ case 46: return IsShadowMemImpl<Mapping46>(mem);
+ case 47: return IsShadowMemImpl<Mapping47>(mem);
+ }
+ DCHECK(0);
+ return false;
+#else
+ return IsShadowMemImpl<Mapping>(mem);
+#endif
+}
+
+
+template<typename Mapping>
+bool IsMetaMemImpl(uptr mem) {
+ return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd;
+}
+
+ALWAYS_INLINE
+bool IsMetaMem(uptr mem) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return IsMetaMemImpl<Mapping39>(mem);
+ case 42: return IsMetaMemImpl<Mapping42>(mem);
+ case 48: return IsMetaMemImpl<Mapping48>(mem);
+ }
+ DCHECK(0);
+ return false;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return IsMetaMemImpl<Mapping44>(mem);
+#endif
+ case 46: return IsMetaMemImpl<Mapping46>(mem);
+ case 47: return IsMetaMemImpl<Mapping47>(mem);
+ }
+ DCHECK(0);
+ return false;
+#else
+ return IsMetaMemImpl<Mapping>(mem);
+#endif
+}
+
+
+template<typename Mapping>
+uptr MemToShadowImpl(uptr x) {
+ DCHECK(IsAppMem(x));
+#if !SANITIZER_GO
+ return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1)))
+ ^ Mapping::kAppMemXor) * kShadowCnt;
+#else
+# ifndef SANITIZER_WINDOWS
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) | Mapping::kShadowBeg;
+# else
+ return ((x & ~(kShadowCell - 1)) * kShadowCnt) + Mapping::kShadowBeg;
+# endif
+#endif
+}
+
+ALWAYS_INLINE
+uptr MemToShadow(uptr x) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return MemToShadowImpl<Mapping39>(x);
+ case 42: return MemToShadowImpl<Mapping42>(x);
+ case 48: return MemToShadowImpl<Mapping48>(x);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return MemToShadowImpl<Mapping44>(x);
+#endif
+ case 46: return MemToShadowImpl<Mapping46>(x);
+ case 47: return MemToShadowImpl<Mapping47>(x);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return MemToShadowImpl<Mapping>(x);
+#endif
+}
+
+
+template<typename Mapping>
+u32 *MemToMetaImpl(uptr x) {
+ DCHECK(IsAppMem(x));
+#if !SANITIZER_GO
+ return (u32*)(((((x) & ~(Mapping::kAppMemMsk | (kMetaShadowCell - 1)))) /
+ kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
+#else
+# ifndef SANITIZER_WINDOWS
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) | Mapping::kMetaShadowBeg);
+# else
+ return (u32*)(((x & ~(kMetaShadowCell - 1)) / \
+ kMetaShadowCell * kMetaShadowSize) + Mapping::kMetaShadowBeg);
+# endif
+#endif
+}
+
+ALWAYS_INLINE
+u32 *MemToMeta(uptr x) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return MemToMetaImpl<Mapping39>(x);
+ case 42: return MemToMetaImpl<Mapping42>(x);
+ case 48: return MemToMetaImpl<Mapping48>(x);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return MemToMetaImpl<Mapping44>(x);
+#endif
+ case 46: return MemToMetaImpl<Mapping46>(x);
+ case 47: return MemToMetaImpl<Mapping47>(x);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return MemToMetaImpl<Mapping>(x);
+#endif
+}
+
+
+template<typename Mapping>
+uptr ShadowToMemImpl(uptr s) {
+ DCHECK(IsShadowMem(s));
+#if !SANITIZER_GO
+ // The shadow mapping is non-linear and we've lost some bits, so we don't have
+ // an easy way to restore the original app address. But the mapping is a
+ // bijection, so we try to restore the address as belonging to low/mid/high
+ // range consecutively and see if shadow->app->shadow mapping gives us the
+ // same address.
+ uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor;
+ if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
+# ifdef TSAN_MID_APP_RANGE
+ p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) +
+ (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk);
+ if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd &&
+ MemToShadow(p) == s)
+ return p;
+# endif
+ return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk;
+#else // #if !SANITIZER_GO
+# ifndef SANITIZER_WINDOWS
+ return (s & ~Mapping::kShadowBeg) / kShadowCnt;
+# else
+ return (s - Mapping::kShadowBeg) / kShadowCnt;
+# endif // SANITIZER_WINDOWS
+#endif
+}
+
+ALWAYS_INLINE
+uptr ShadowToMem(uptr s) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return ShadowToMemImpl<Mapping39>(s);
+ case 42: return ShadowToMemImpl<Mapping42>(s);
+ case 48: return ShadowToMemImpl<Mapping48>(s);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return ShadowToMemImpl<Mapping44>(s);
+#endif
+ case 46: return ShadowToMemImpl<Mapping46>(s);
+ case 47: return ShadowToMemImpl<Mapping47>(s);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return ShadowToMemImpl<Mapping>(s);
+#endif
+}
+
+
+
+// The additional page is to catch shadow stack overflow as paging fault.
+// Windows wants 64K alignment for mmaps.
+const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace)
+ + (64 << 10) + (64 << 10) - 1) & ~((64 << 10) - 1);
+
+template<typename Mapping>
+uptr GetThreadTraceImpl(int tid) {
+ uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize;
+ DCHECK_LT(p, Mapping::kTraceMemEnd);
+ return p;
+}
+
+ALWAYS_INLINE
+uptr GetThreadTrace(int tid) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return GetThreadTraceImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceImpl<Mapping48>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return GetThreadTraceImpl<Mapping44>(tid);
+#endif
+ case 46: return GetThreadTraceImpl<Mapping46>(tid);
+ case 47: return GetThreadTraceImpl<Mapping47>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return GetThreadTraceImpl<Mapping>(tid);
+#endif
+}
+
+
+template<typename Mapping>
+uptr GetThreadTraceHeaderImpl(int tid) {
+ uptr p = Mapping::kTraceMemBeg + (uptr)tid * kTotalTraceSize
+ + kTraceSize * sizeof(Event);
+ DCHECK_LT(p, Mapping::kTraceMemEnd);
+ return p;
+}
+
+ALWAYS_INLINE
+uptr GetThreadTraceHeader(int tid) {
+#if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO
+ switch (vmaSize) {
+ case 39: return GetThreadTraceHeaderImpl<Mapping39>(tid);
+ case 42: return GetThreadTraceHeaderImpl<Mapping42>(tid);
+ case 48: return GetThreadTraceHeaderImpl<Mapping48>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#elif defined(__powerpc64__)
+ switch (vmaSize) {
+#if !SANITIZER_GO
+ case 44: return GetThreadTraceHeaderImpl<Mapping44>(tid);
+#endif
+ case 46: return GetThreadTraceHeaderImpl<Mapping46>(tid);
+ case 47: return GetThreadTraceHeaderImpl<Mapping47>(tid);
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return GetThreadTraceHeaderImpl<Mapping>(tid);
+#endif
+}
+
+void InitializePlatform();
+void InitializePlatformEarly();
+void CheckAndProtect();
+void InitializeShadowMemoryPlatform();
+void FlushShadowMemory();
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive);
+int ExtractResolvFDs(void *state, int *fds, int nfd);
+int ExtractRecvmsgFDs(void *msg, int *fds, int nfd);
+uptr ExtractLongJmpSp(uptr *env);
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size);
+
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg);
+
+void DestroyThreadState();
+
+} // namespace __tsan
+
+#endif // TSAN_PLATFORM_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_linux.cc (revision 351984)
@@ -0,0 +1,515 @@
+//===-- tsan_platform_linux.cc --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Linux- and FreeBSD-specific code.
+//===----------------------------------------------------------------------===//
+
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#if SANITIZER_LINUX
+#include <sys/personality.h>
+#include <setjmp.h>
+#endif
+#include <sys/syscall.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sched.h>
+#include <dlfcn.h>
+#if SANITIZER_LINUX
+#define __need_res_state
+#include <resolv.h>
+#endif
+
+#ifdef sa_handler
+# undef sa_handler
+#endif
+
+#ifdef sa_sigaction
+# undef sa_sigaction
+#endif
+
+#if SANITIZER_FREEBSD
+extern "C" void *__libc_stack_end;
+void *__libc_stack_end = 0;
+#endif
+
+#if SANITIZER_LINUX && defined(__aarch64__) && !SANITIZER_GO
+# define INIT_LONGJMP_XOR_KEY 1
+#else
+# define INIT_LONGJMP_XOR_KEY 0
+#endif
+
+#if INIT_LONGJMP_XOR_KEY
+#include "interception/interception.h"
+// Must be declared outside of other namespaces.
+DECLARE_REAL(int, _setjmp, void *env)
+#endif
+
+namespace __tsan {
+
+#if INIT_LONGJMP_XOR_KEY
+static void InitializeLongjmpXorKey();
+static uptr longjmp_xor_key;
+#endif
+
+#ifdef TSAN_RUNTIME_VMA
+// Runtime detected VMA size.
+uptr vmaSize;
+#endif
+
+enum {
+ MemTotal = 0,
+ MemShadow = 1,
+ MemMeta = 2,
+ MemFile = 3,
+ MemMmap = 4,
+ MemTrace = 5,
+ MemHeap = 6,
+ MemOther = 7,
+ MemCount = 8,
+};
+
+void FillProfileCallback(uptr p, uptr rss, bool file,
+ uptr *mem, uptr stats_size) {
+ mem[MemTotal] += rss;
+ if (p >= ShadowBeg() && p < ShadowEnd())
+ mem[MemShadow] += rss;
+ else if (p >= MetaShadowBeg() && p < MetaShadowEnd())
+ mem[MemMeta] += rss;
+#if !SANITIZER_GO
+ else if (p >= HeapMemBeg() && p < HeapMemEnd())
+ mem[MemHeap] += rss;
+ else if (p >= LoAppMemBeg() && p < LoAppMemEnd())
+ mem[file ? MemFile : MemMmap] += rss;
+ else if (p >= HiAppMemBeg() && p < HiAppMemEnd())
+ mem[file ? MemFile : MemMmap] += rss;
+#else
+ else if (p >= AppMemBeg() && p < AppMemEnd())
+ mem[file ? MemFile : MemMmap] += rss;
+#endif
+ else if (p >= TraceMemBeg() && p < TraceMemEnd())
+ mem[MemTrace] += rss;
+ else
+ mem[MemOther] += rss;
+}
+
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+ uptr mem[MemCount];
+ internal_memset(mem, 0, sizeof(mem[0]) * MemCount);
+ __sanitizer::GetMemoryProfile(FillProfileCallback, mem, 7);
+ StackDepotStats *stacks = StackDepotGetStats();
+ internal_snprintf(buf, buf_size,
+ "RSS %zd MB: shadow:%zd meta:%zd file:%zd mmap:%zd"
+ " trace:%zd heap:%zd other:%zd stacks=%zd[%zd] nthr=%zd/%zd\n",
+ mem[MemTotal] >> 20, mem[MemShadow] >> 20, mem[MemMeta] >> 20,
+ mem[MemFile] >> 20, mem[MemMmap] >> 20, mem[MemTrace] >> 20,
+ mem[MemHeap] >> 20, mem[MemOther] >> 20,
+ stacks->allocated >> 20, stacks->n_uniq_ids,
+ nlive, nthread);
+}
+
+#if SANITIZER_LINUX
+void FlushShadowMemoryCallback(
+ const SuspendedThreadsList &suspended_threads_list,
+ void *argument) {
+ ReleaseMemoryPagesToOS(ShadowBeg(), ShadowEnd());
+}
+#endif
+
+void FlushShadowMemory() {
+#if SANITIZER_LINUX
+ StopTheWorld(FlushShadowMemoryCallback, 0);
+#endif
+}
+
+#if !SANITIZER_GO
+// Mark shadow for .rodata sections with the special kShadowRodata marker.
+// Accesses to .rodata can't race, so this saves time, memory and trace space.
+static void MapRodata() {
+ // First create temp file.
+ const char *tmpdir = GetEnv("TMPDIR");
+ if (tmpdir == 0)
+ tmpdir = GetEnv("TEST_TMPDIR");
+#ifdef P_tmpdir
+ if (tmpdir == 0)
+ tmpdir = P_tmpdir;
+#endif
+ if (tmpdir == 0)
+ return;
+ char name[256];
+ internal_snprintf(name, sizeof(name), "%s/tsan.rodata.%d",
+ tmpdir, (int)internal_getpid());
+ uptr openrv = internal_open(name, O_RDWR | O_CREAT | O_EXCL, 0600);
+ if (internal_iserror(openrv))
+ return;
+ internal_unlink(name); // Unlink it now, so that we can reuse the buffer.
+ fd_t fd = openrv;
+ // Fill the file with kShadowRodata.
+ const uptr kMarkerSize = 512 * 1024 / sizeof(u64);
+ InternalMmapVector<u64> marker(kMarkerSize);
+ // volatile to prevent insertion of memset
+ for (volatile u64 *p = marker.data(); p < marker.data() + kMarkerSize; p++)
+ *p = kShadowRodata;
+ internal_write(fd, marker.data(), marker.size() * sizeof(u64));
+ // Map the file into memory.
+ uptr page = internal_mmap(0, GetPageSizeCached(), PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, fd, 0);
+ if (internal_iserror(page)) {
+ internal_close(fd);
+ return;
+ }
+ // Map the file into shadow of .rodata sections.
+ MemoryMappingLayout proc_maps(/*cache_enabled*/true);
+ // Reusing the buffer 'name'.
+ MemoryMappedSegment segment(name, ARRAY_SIZE(name));
+ while (proc_maps.Next(&segment)) {
+ if (segment.filename[0] != 0 && segment.filename[0] != '[' &&
+ segment.IsReadable() && segment.IsExecutable() &&
+ !segment.IsWritable() && IsAppMem(segment.start)) {
+ // Assume it's .rodata
+ char *shadow_start = (char *)MemToShadow(segment.start);
+ char *shadow_end = (char *)MemToShadow(segment.end);
+ for (char *p = shadow_start; p < shadow_end;
+ p += marker.size() * sizeof(u64)) {
+ internal_mmap(p, Min<uptr>(marker.size() * sizeof(u64), shadow_end - p),
+ PROT_READ, MAP_PRIVATE | MAP_FIXED, fd, 0);
+ }
+ }
+ }
+ internal_close(fd);
+}
+
+void InitializeShadowMemoryPlatform() {
+ MapRodata();
+}
+
+#endif // #if !SANITIZER_GO
+
+void InitializePlatformEarly() {
+#ifdef TSAN_RUNTIME_VMA
+ vmaSize =
+ (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+#if defined(__aarch64__)
+# if !SANITIZER_GO
+ if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 39, 42 and 48\n", vmaSize);
+ Die();
+ }
+#else
+ if (vmaSize != 48) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 48\n", vmaSize);
+ Die();
+ }
+#endif
+#elif defined(__powerpc64__)
+# if !SANITIZER_GO
+ if (vmaSize != 44 && vmaSize != 46 && vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 44, 46, and 47\n", vmaSize);
+ Die();
+ }
+# else
+ if (vmaSize != 46 && vmaSize != 47) {
+ Printf("FATAL: ThreadSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %zd - Supported 46, and 47\n", vmaSize);
+ Die();
+ }
+# endif
+#endif
+#endif
+}
+
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
+
+ // Go maps shadow memory lazily and works fine with limited address space.
+ // Unlimited stack is not a problem as well, because the executable
+ // is not compiled with -pie.
+#if !SANITIZER_GO
+ {
+ bool reexec = false;
+ // TSan doesn't play well with unlimited stack size (as stack
+ // overlaps with shadow memory). If we detect unlimited stack size,
+ // we re-exec the program with limited stack size as a best effort.
+ if (StackSizeIsUnlimited()) {
+ const uptr kMaxStackSize = 32 * 1024 * 1024;
+ VReport(1, "Program is run with unlimited stack size, which wouldn't "
+ "work with ThreadSanitizer.\n"
+ "Re-execing with stack size limited to %zd bytes.\n",
+ kMaxStackSize);
+ SetStackSizeLimitInBytes(kMaxStackSize);
+ reexec = true;
+ }
+
+ if (!AddressSpaceIsUnlimited()) {
+ Report("WARNING: Program is run with limited virtual address space,"
+ " which wouldn't work with ThreadSanitizer.\n");
+ Report("Re-execing with unlimited virtual address space.\n");
+ SetAddressSpaceUnlimited();
+ reexec = true;
+ }
+#if SANITIZER_LINUX && defined(__aarch64__)
+ // After patch "arm64: mm: support ARCH_MMAP_RND_BITS." is introduced in
+ // linux kernel, the random gap between stack and mapped area is increased
+ // from 128M to 36G on 39-bit aarch64. As it is almost impossible to cover
+ // this big range, we should disable randomized virtual space on aarch64.
+ int old_personality = personality(0xffffffff);
+ if (old_personality != -1 && (old_personality & ADDR_NO_RANDOMIZE) == 0) {
+ VReport(1, "WARNING: Program is run with randomized virtual address "
+ "space, which wouldn't work with ThreadSanitizer.\n"
+ "Re-execing with fixed virtual address space.\n");
+ CHECK_NE(personality(old_personality | ADDR_NO_RANDOMIZE), -1);
+ reexec = true;
+ }
+ // Initialize the xor key used in {sig}{set,long}jump.
+ InitializeLongjmpXorKey();
+#endif
+ if (reexec)
+ ReExec();
+ }
+
+ CheckAndProtect();
+ InitTlsSize();
+#endif // !SANITIZER_GO
+}
+
+#if !SANITIZER_GO
+// Extract file descriptors passed to glibc internal __res_iclose function.
+// This is required to properly "close" the fds, because we do not see internal
+// closes within glibc. The code is a pure hack.
+int ExtractResolvFDs(void *state, int *fds, int nfd) {
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+ int cnt = 0;
+ struct __res_state *statp = (struct __res_state*)state;
+ for (int i = 0; i < MAXNS && cnt < nfd; i++) {
+ if (statp->_u._ext.nsaddrs[i] && statp->_u._ext.nssocks[i] != -1)
+ fds[cnt++] = statp->_u._ext.nssocks[i];
+ }
+ return cnt;
+#else
+ return 0;
+#endif
+}
+
+// Extract file descriptors passed via UNIX domain sockets.
+// This is requried to properly handle "open" of these fds.
+// see 'man recvmsg' and 'man 3 cmsg'.
+int ExtractRecvmsgFDs(void *msgp, int *fds, int nfd) {
+ int res = 0;
+ msghdr *msg = (msghdr*)msgp;
+ struct cmsghdr *cmsg = CMSG_FIRSTHDR(msg);
+ for (; cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
+ if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS)
+ continue;
+ int n = (cmsg->cmsg_len - CMSG_LEN(0)) / sizeof(fds[0]);
+ for (int i = 0; i < n; i++) {
+ fds[res++] = ((int*)CMSG_DATA(cmsg))[i];
+ if (res == nfd)
+ return res;
+ }
+ }
+ return res;
+}
+
+// Reverse operation of libc stack pointer mangling
+static uptr UnmangleLongJmpSp(uptr mangled_sp) {
+#if defined(__x86_64__)
+# if SANITIZER_LINUX
+ // Reverse of:
+ // xor %fs:0x30, %rsi
+ // rol $0x11, %rsi
+ uptr sp;
+ asm("ror $0x11, %0 \n"
+ "xor %%fs:0x30, %0 \n"
+ : "=r" (sp)
+ : "0" (mangled_sp));
+ return sp;
+# else
+ return mangled_sp;
+# endif
+#elif defined(__aarch64__)
+# if SANITIZER_LINUX
+ return mangled_sp ^ longjmp_xor_key;
+# else
+ return mangled_sp;
+# endif
+#elif defined(__powerpc64__)
+ // Reverse of:
+ // ld r4, -28696(r13)
+ // xor r4, r3, r4
+ uptr xor_key;
+ asm("ld %0, -28696(%%r13)" : "=r" (xor_key));
+ return mangled_sp ^ xor_key;
+#elif defined(__mips__)
+ return mangled_sp;
+#else
+ #error "Unknown platform"
+#endif
+}
+
+#ifdef __powerpc__
+# define LONG_JMP_SP_ENV_SLOT 0
+#elif SANITIZER_FREEBSD
+# define LONG_JMP_SP_ENV_SLOT 2
+#elif SANITIZER_NETBSD
+# define LONG_JMP_SP_ENV_SLOT 6
+#elif SANITIZER_LINUX
+# ifdef __aarch64__
+# define LONG_JMP_SP_ENV_SLOT 13
+# elif defined(__mips64)
+# define LONG_JMP_SP_ENV_SLOT 1
+# else
+# define LONG_JMP_SP_ENV_SLOT 6
+# endif
+#endif
+
+uptr ExtractLongJmpSp(uptr *env) {
+ uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT];
+ return UnmangleLongJmpSp(mangled_sp);
+}
+
+#if INIT_LONGJMP_XOR_KEY
+// GLIBC mangles the function pointers in jmp_buf (used in {set,long}*jmp
+// functions) by XORing them with a random key. For AArch64 it is a global
+// variable rather than a TCB one (as for x86_64/powerpc). We obtain the key by
+// issuing a setjmp and XORing the SP pointer values to derive the key.
+static void InitializeLongjmpXorKey() {
+ // 1. Call REAL(setjmp), which stores the mangled SP in env.
+ jmp_buf env;
+ REAL(_setjmp)(env);
+
+ // 2. Retrieve vanilla/mangled SP.
+ uptr sp;
+ asm("mov %0, sp" : "=r" (sp));
+ uptr mangled_sp = ((uptr *)&env)[LONG_JMP_SP_ENV_SLOT];
+
+ // 3. xor SPs to obtain key.
+ longjmp_xor_key = mangled_sp ^ sp;
+}
+#endif
+
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
+ // Check that the thr object is in tls;
+ const uptr thr_beg = (uptr)thr;
+ const uptr thr_end = (uptr)thr + sizeof(*thr);
+ CHECK_GE(thr_beg, tls_addr);
+ CHECK_LE(thr_beg, tls_addr + tls_size);
+ CHECK_GE(thr_end, tls_addr);
+ CHECK_LE(thr_end, tls_addr + tls_size);
+ // Since the thr object is huge, skip it.
+ MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, thr_beg - tls_addr);
+ MemoryRangeImitateWrite(thr, /*pc=*/2, thr_end,
+ tls_addr + tls_size - thr_end);
+}
+
+// Note: this function runs with async signals enabled,
+// so it must not touch any tsan state.
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
+}
+#endif // !SANITIZER_GO
+
+#if !SANITIZER_GO
+void ReplaceSystemMalloc() { }
+#endif
+
+#if !SANITIZER_GO
+#if SANITIZER_ANDROID
+// On Android, one thread can call intercepted functions after
+// DestroyThreadState(), so add a fake thread state for "dead" threads.
+static ThreadState *dead_thread_state = nullptr;
+
+ThreadState *cur_thread() {
+ ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
+ if (thr == nullptr) {
+ __sanitizer_sigset_t emptyset;
+ internal_sigfillset(&emptyset);
+ __sanitizer_sigset_t oldset;
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
+ thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
+ if (thr == nullptr) {
+ thr = reinterpret_cast<ThreadState*>(MmapOrDie(sizeof(ThreadState),
+ "ThreadState"));
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(thr);
+ if (dead_thread_state == nullptr) {
+ dead_thread_state = reinterpret_cast<ThreadState*>(
+ MmapOrDie(sizeof(ThreadState), "ThreadState"));
+ dead_thread_state->fast_state.SetIgnoreBit();
+ dead_thread_state->ignore_interceptors = 1;
+ dead_thread_state->is_dead = true;
+ *const_cast<int*>(&dead_thread_state->tid) = -1;
+ CHECK_EQ(0, internal_mprotect(dead_thread_state, sizeof(ThreadState),
+ PROT_READ));
+ }
+ }
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
+ }
+ return thr;
+}
+
+void set_cur_thread(ThreadState *thr) {
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(thr);
+}
+
+void cur_thread_finalize() {
+ __sanitizer_sigset_t emptyset;
+ internal_sigfillset(&emptyset);
+ __sanitizer_sigset_t oldset;
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &emptyset, &oldset));
+ ThreadState* thr = reinterpret_cast<ThreadState*>(*get_android_tls_ptr());
+ if (thr != dead_thread_state) {
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(dead_thread_state);
+ UnmapOrDie(thr, sizeof(ThreadState));
+ }
+ CHECK_EQ(0, internal_sigprocmask(SIG_SETMASK, &oldset, nullptr));
+}
+#endif // SANITIZER_ANDROID
+#endif // if !SANITIZER_GO
+
+} // namespace __tsan
+
+#endif // SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_mac.cc (revision 351984)
@@ -0,0 +1,316 @@
+//===-- tsan_platform_mac.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Mac-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+
+#include <mach/mach.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <errno.h>
+#include <sched.h>
+
+namespace __tsan {
+
+#if !SANITIZER_GO
+static void *SignalSafeGetOrAllocate(uptr *dst, uptr size) {
+ atomic_uintptr_t *a = (atomic_uintptr_t *)dst;
+ void *val = (void *)atomic_load_relaxed(a);
+ atomic_signal_fence(memory_order_acquire); // Turns the previous load into
+ // acquire wrt signals.
+ if (UNLIKELY(val == nullptr)) {
+ val = (void *)internal_mmap(nullptr, size, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ CHECK(val);
+ void *cmp = nullptr;
+ if (!atomic_compare_exchange_strong(a, (uintptr_t *)&cmp, (uintptr_t)val,
+ memory_order_acq_rel)) {
+ internal_munmap(val, size);
+ val = cmp;
+ }
+ }
+ return val;
+}
+
+// On OS X, accessing TLVs via __thread or manually by using pthread_key_* is
+// problematic, because there are several places where interceptors are called
+// when TLVs are not accessible (early process startup, thread cleanup, ...).
+// The following provides a "poor man's TLV" implementation, where we use the
+// shadow memory of the pointer returned by pthread_self() to store a pointer to
+// the ThreadState object. The main thread's ThreadState is stored separately
+// in a static variable, because we need to access it even before the
+// shadow memory is set up.
+static uptr main_thread_identity = 0;
+ALIGNED(64) static char main_thread_state[sizeof(ThreadState)];
+static ThreadState *main_thread_state_loc = (ThreadState *)main_thread_state;
+
+static ThreadState **cur_thread_location() {
+ uptr thread_identity = (uptr)pthread_self();
+ if (thread_identity == main_thread_identity || main_thread_identity == 0)
+ return &main_thread_state_loc;
+ return (ThreadState **)MemToShadow(thread_identity);
+}
+
+ThreadState *cur_thread() {
+ return (ThreadState *)SignalSafeGetOrAllocate(
+ (uptr *)cur_thread_location(), sizeof(ThreadState));
+}
+
+void set_cur_thread(ThreadState *thr) {
+ *cur_thread_location() = thr;
+}
+
+// TODO(kuba.brecka): This is not async-signal-safe. In particular, we call
+// munmap first and then clear `fake_tls`; if we receive a signal in between,
+// handler will try to access the unmapped ThreadState.
+void cur_thread_finalize() {
+ ThreadState **thr_state_loc = cur_thread_location();
+ if (thr_state_loc == &main_thread_state_loc) {
+ // Calling dispatch_main() or xpc_main() actually invokes pthread_exit to
+ // exit the main thread. Let's keep the main thread's ThreadState.
+ return;
+ }
+ internal_munmap(*thr_state_loc, sizeof(ThreadState));
+ *thr_state_loc = nullptr;
+}
+#endif
+
+void FlushShadowMemory() {
+}
+
+static void RegionMemUsage(uptr start, uptr end, uptr *res, uptr *dirty) {
+ vm_address_t address = start;
+ vm_address_t end_address = end;
+ uptr resident_pages = 0;
+ uptr dirty_pages = 0;
+ while (address < end_address) {
+ vm_size_t vm_region_size;
+ mach_msg_type_number_t count = VM_REGION_EXTENDED_INFO_COUNT;
+ vm_region_extended_info_data_t vm_region_info;
+ mach_port_t object_name;
+ kern_return_t ret = vm_region_64(
+ mach_task_self(), &address, &vm_region_size, VM_REGION_EXTENDED_INFO,
+ (vm_region_info_t)&vm_region_info, &count, &object_name);
+ if (ret != KERN_SUCCESS) break;
+
+ resident_pages += vm_region_info.pages_resident;
+ dirty_pages += vm_region_info.pages_dirtied;
+
+ address += vm_region_size;
+ }
+ *res = resident_pages * GetPageSizeCached();
+ *dirty = dirty_pages * GetPageSizeCached();
+}
+
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+ uptr shadow_res, shadow_dirty;
+ uptr meta_res, meta_dirty;
+ uptr trace_res, trace_dirty;
+ RegionMemUsage(ShadowBeg(), ShadowEnd(), &shadow_res, &shadow_dirty);
+ RegionMemUsage(MetaShadowBeg(), MetaShadowEnd(), &meta_res, &meta_dirty);
+ RegionMemUsage(TraceMemBeg(), TraceMemEnd(), &trace_res, &trace_dirty);
+
+#if !SANITIZER_GO
+ uptr low_res, low_dirty;
+ uptr high_res, high_dirty;
+ uptr heap_res, heap_dirty;
+ RegionMemUsage(LoAppMemBeg(), LoAppMemEnd(), &low_res, &low_dirty);
+ RegionMemUsage(HiAppMemBeg(), HiAppMemEnd(), &high_res, &high_dirty);
+ RegionMemUsage(HeapMemBeg(), HeapMemEnd(), &heap_res, &heap_dirty);
+#else // !SANITIZER_GO
+ uptr app_res, app_dirty;
+ RegionMemUsage(AppMemBeg(), AppMemEnd(), &app_res, &app_dirty);
+#endif
+
+ StackDepotStats *stacks = StackDepotGetStats();
+ internal_snprintf(buf, buf_size,
+ "shadow (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "meta (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "traces (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#if !SANITIZER_GO
+ "low app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "high app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+ "heap (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#else // !SANITIZER_GO
+ "app (0x%016zx-0x%016zx): resident %zd kB, dirty %zd kB\n"
+#endif
+ "stacks: %zd unique IDs, %zd kB allocated\n"
+ "threads: %zd total, %zd live\n"
+ "------------------------------\n",
+ ShadowBeg(), ShadowEnd(), shadow_res / 1024, shadow_dirty / 1024,
+ MetaShadowBeg(), MetaShadowEnd(), meta_res / 1024, meta_dirty / 1024,
+ TraceMemBeg(), TraceMemEnd(), trace_res / 1024, trace_dirty / 1024,
+#if !SANITIZER_GO
+ LoAppMemBeg(), LoAppMemEnd(), low_res / 1024, low_dirty / 1024,
+ HiAppMemBeg(), HiAppMemEnd(), high_res / 1024, high_dirty / 1024,
+ HeapMemBeg(), HeapMemEnd(), heap_res / 1024, heap_dirty / 1024,
+#else // !SANITIZER_GO
+ AppMemBeg(), AppMemEnd(), app_res / 1024, app_dirty / 1024,
+#endif
+ stacks->n_uniq_ids, stacks->allocated / 1024,
+ nthread, nlive);
+}
+
+#if !SANITIZER_GO
+void InitializeShadowMemoryPlatform() { }
+
+// On OS X, GCD worker threads are created without a call to pthread_create. We
+// need to properly register these threads with ThreadCreate and ThreadStart.
+// These threads don't have a parent thread, as they are created "spuriously".
+// We're using a libpthread API that notifies us about a newly created thread.
+// The `thread == pthread_self()` check indicates this is actually a worker
+// thread. If it's just a regular thread, this hook is called on the parent
+// thread.
+typedef void (*pthread_introspection_hook_t)(unsigned int event,
+ pthread_t thread, void *addr,
+ size_t size);
+extern "C" pthread_introspection_hook_t pthread_introspection_hook_install(
+ pthread_introspection_hook_t hook);
+static const uptr PTHREAD_INTROSPECTION_THREAD_CREATE = 1;
+static const uptr PTHREAD_INTROSPECTION_THREAD_TERMINATE = 3;
+static pthread_introspection_hook_t prev_pthread_introspection_hook;
+static void my_pthread_introspection_hook(unsigned int event, pthread_t thread,
+ void *addr, size_t size) {
+ if (event == PTHREAD_INTROSPECTION_THREAD_CREATE) {
+ if (thread == pthread_self()) {
+ // The current thread is a newly created GCD worker thread.
+ ThreadState *thr = cur_thread();
+ Processor *proc = ProcCreate();
+ ProcWire(proc, thr);
+ ThreadState *parent_thread_state = nullptr; // No parent.
+ int tid = ThreadCreate(parent_thread_state, 0, (uptr)thread, true);
+ CHECK_NE(tid, 0);
+ ThreadStart(thr, tid, GetTid(), ThreadType::Worker);
+ }
+ } else if (event == PTHREAD_INTROSPECTION_THREAD_TERMINATE) {
+ if (thread == pthread_self()) {
+ ThreadState *thr = cur_thread();
+ if (thr->tctx) {
+ DestroyThreadState();
+ }
+ }
+ }
+
+ if (prev_pthread_introspection_hook != nullptr)
+ prev_pthread_introspection_hook(event, thread, addr, size);
+}
+#endif
+
+void InitializePlatformEarly() {
+#if defined(__aarch64__)
+ uptr max_vm = GetMaxUserVirtualAddress() + 1;
+ if (max_vm != Mapping::kHiAppMemEnd) {
+ Printf("ThreadSanitizer: unsupported vm address limit %p, expected %p.\n",
+ max_vm, Mapping::kHiAppMemEnd);
+ Die();
+ }
+#endif
+}
+
+static uptr longjmp_xor_key = 0;
+
+void InitializePlatform() {
+ DisableCoreDumperIfNecessary();
+#if !SANITIZER_GO
+ CheckAndProtect();
+
+ CHECK_EQ(main_thread_identity, 0);
+ main_thread_identity = (uptr)pthread_self();
+
+ prev_pthread_introspection_hook =
+ pthread_introspection_hook_install(&my_pthread_introspection_hook);
+#endif
+
+ if (GetMacosVersion() >= MACOS_VERSION_MOJAVE) {
+ // Libsystem currently uses a process-global key; this might change.
+ const unsigned kTLSLongjmpXorKeySlot = 0x7;
+ longjmp_xor_key = (uptr)pthread_getspecific(kTLSLongjmpXorKeySlot);
+ }
+}
+
+#ifdef __aarch64__
+# define LONG_JMP_SP_ENV_SLOT \
+ ((GetMacosVersion() >= MACOS_VERSION_MOJAVE) ? 12 : 13)
+#else
+# define LONG_JMP_SP_ENV_SLOT 2
+#endif
+
+uptr ExtractLongJmpSp(uptr *env) {
+ uptr mangled_sp = env[LONG_JMP_SP_ENV_SLOT];
+ uptr sp = mangled_sp ^ longjmp_xor_key;
+ return sp;
+}
+
+#if !SANITIZER_GO
+void ImitateTlsWrite(ThreadState *thr, uptr tls_addr, uptr tls_size) {
+ // The pointer to the ThreadState object is stored in the shadow memory
+ // of the tls.
+ uptr tls_end = tls_addr + tls_size;
+ uptr thread_identity = (uptr)pthread_self();
+ if (thread_identity == main_thread_identity) {
+ MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr, tls_size);
+ } else {
+ uptr thr_state_start = thread_identity;
+ uptr thr_state_end = thr_state_start + sizeof(uptr);
+ CHECK_GE(thr_state_start, tls_addr);
+ CHECK_LE(thr_state_start, tls_addr + tls_size);
+ CHECK_GE(thr_state_end, tls_addr);
+ CHECK_LE(thr_state_end, tls_addr + tls_size);
+ MemoryRangeImitateWrite(thr, /*pc=*/2, tls_addr,
+ thr_state_start - tls_addr);
+ MemoryRangeImitateWrite(thr, /*pc=*/2, thr_state_end,
+ tls_end - thr_state_end);
+ }
+}
+#endif
+
+#if !SANITIZER_GO
+// Note: this function runs with async signals enabled,
+// so it must not touch any tsan state.
+int call_pthread_cancel_with_cleanup(int(*fn)(void *c, void *m,
+ void *abstime), void *c, void *m, void *abstime,
+ void(*cleanup)(void *arg), void *arg) {
+ // pthread_cleanup_push/pop are hardcore macros mess.
+ // We can't intercept nor call them w/o including pthread.h.
+ int res;
+ pthread_cleanup_push(cleanup, arg);
+ res = fn(c, m, abstime);
+ pthread_cleanup_pop(0);
+ return res;
+}
+#endif
+
+} // namespace __tsan
+
+#endif // SANITIZER_MAC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_posix.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_posix.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_posix.cc (revision 351984)
@@ -0,0 +1,174 @@
+//===-- tsan_platform_posix.cc --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// POSIX-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_POSIX
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+static const char kShadowMemoryMappingWarning[] =
+ "FATAL: %s can not madvise shadow region [%zx, %zx] with %s (errno: %d)\n";
+static const char kShadowMemoryMappingHint[] =
+ "HINT: if %s is not supported in your environment, you may set "
+ "TSAN_OPTIONS=%s=0\n";
+
+static void NoHugePagesInShadow(uptr addr, uptr size) {
+ if (common_flags()->no_huge_pages_for_shadow)
+ if (!NoHugePagesInRegion(addr, size)) {
+ Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size,
+ "MADV_NOHUGEPAGE", errno);
+ Printf(kShadowMemoryMappingHint, "MADV_NOHUGEPAGE",
+ "no_huge_pages_for_shadow");
+ Die();
+ }
+}
+
+static void DontDumpShadow(uptr addr, uptr size) {
+ if (common_flags()->use_madv_dontdump)
+ if (!DontDumpShadowMemory(addr, size)) {
+ Printf(kShadowMemoryMappingWarning, SanitizerToolName, addr, addr + size,
+ "MADV_DONTDUMP", errno);
+ Printf(kShadowMemoryMappingHint, "MADV_DONTDUMP", "use_madv_dontdump");
+ Die();
+ }
+}
+
+#if !SANITIZER_GO
+void InitializeShadowMemory() {
+ // Map memory shadow.
+ if (!MmapFixedNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), "shadow")) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
+ Die();
+ }
+ // This memory range is used for thread stacks and large user mmaps.
+ // Frequently a thread uses only a small part of stack and similarly
+ // a program uses a small part of large mmap. On some programs
+ // we see 20% memory usage reduction without huge pages for this range.
+ // FIXME: don't use constants here.
+#if defined(__x86_64__)
+ const uptr kMadviseRangeBeg = 0x7f0000000000ull;
+ const uptr kMadviseRangeSize = 0x010000000000ull;
+#elif defined(__mips64)
+ const uptr kMadviseRangeBeg = 0xff00000000ull;
+ const uptr kMadviseRangeSize = 0x0100000000ull;
+#elif defined(__aarch64__) && defined(__APPLE__)
+ uptr kMadviseRangeBeg = LoAppMemBeg();
+ uptr kMadviseRangeSize = LoAppMemEnd() - LoAppMemBeg();
+#elif defined(__aarch64__)
+ uptr kMadviseRangeBeg = 0;
+ uptr kMadviseRangeSize = 0;
+ if (vmaSize == 39) {
+ kMadviseRangeBeg = 0x7d00000000ull;
+ kMadviseRangeSize = 0x0300000000ull;
+ } else if (vmaSize == 42) {
+ kMadviseRangeBeg = 0x3f000000000ull;
+ kMadviseRangeSize = 0x01000000000ull;
+ } else {
+ DCHECK(0);
+ }
+#elif defined(__powerpc64__)
+ uptr kMadviseRangeBeg = 0;
+ uptr kMadviseRangeSize = 0;
+ if (vmaSize == 44) {
+ kMadviseRangeBeg = 0x0f60000000ull;
+ kMadviseRangeSize = 0x0010000000ull;
+ } else if (vmaSize == 46) {
+ kMadviseRangeBeg = 0x3f0000000000ull;
+ kMadviseRangeSize = 0x010000000000ull;
+ } else {
+ DCHECK(0);
+ }
+#endif
+ NoHugePagesInShadow(MemToShadow(kMadviseRangeBeg),
+ kMadviseRangeSize * kShadowMultiplier);
+ DontDumpShadow(ShadowBeg(), ShadowEnd() - ShadowBeg());
+ DPrintf("memory shadow: %zx-%zx (%zuGB)\n",
+ ShadowBeg(), ShadowEnd(),
+ (ShadowEnd() - ShadowBeg()) >> 30);
+
+ // Map meta shadow.
+ const uptr meta = MetaShadowBeg();
+ const uptr meta_size = MetaShadowEnd() - meta;
+ if (!MmapFixedNoReserve(meta, meta_size, "meta shadow")) {
+ Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n");
+ Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
+ Die();
+ }
+ NoHugePagesInShadow(meta, meta_size);
+ DontDumpShadow(meta, meta_size);
+ DPrintf("meta shadow: %zx-%zx (%zuGB)\n",
+ meta, meta + meta_size, meta_size >> 30);
+
+ InitializeShadowMemoryPlatform();
+}
+
+static void ProtectRange(uptr beg, uptr end) {
+ CHECK_LE(beg, end);
+ if (beg == end)
+ return;
+ if (beg != (uptr)MmapFixedNoAccess(beg, end - beg)) {
+ Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end);
+ Printf("FATAL: Make sure you are not using unlimited stack\n");
+ Die();
+ }
+}
+
+void CheckAndProtect() {
+ // Ensure that the binary is indeed compiled with -pie.
+ MemoryMappingLayout proc_maps(true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ if (IsAppMem(segment.start)) continue;
+ if (segment.start >= HeapMemEnd() && segment.start < HeapEnd()) continue;
+ if (segment.protection == 0) // Zero page or mprotected.
+ continue;
+ if (segment.start >= VdsoBeg()) // vdso
+ break;
+ Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n",
+ segment.start, segment.end);
+ Die();
+ }
+
+#if defined(__aarch64__) && defined(__APPLE__)
+ ProtectRange(HeapMemEnd(), ShadowBeg());
+ ProtectRange(ShadowEnd(), MetaShadowBeg());
+ ProtectRange(MetaShadowEnd(), TraceMemBeg());
+#else
+ ProtectRange(LoAppMemEnd(), ShadowBeg());
+ ProtectRange(ShadowEnd(), MetaShadowBeg());
+#ifdef TSAN_MID_APP_RANGE
+ ProtectRange(MetaShadowEnd(), MidAppMemBeg());
+ ProtectRange(MidAppMemEnd(), TraceMemBeg());
+#else
+ ProtectRange(MetaShadowEnd(), TraceMemBeg());
+#endif
+ // Memory for traces is mapped lazily in MapThreadTrace.
+ // Protect the whole range for now, so that user does not map something here.
+ ProtectRange(TraceMemBeg(), TraceMemEnd());
+ ProtectRange(TraceMemEnd(), HeapMemBeg());
+ ProtectRange(HeapEnd(), HiAppMemBeg());
+#endif
+}
+#endif
+
+} // namespace __tsan
+
+#endif // SANITIZER_POSIX
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_posix.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_windows.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_windows.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_platform_windows.cc (revision 351984)
@@ -0,0 +1,37 @@
+//===-- tsan_platform_windows.cc ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Windows-specific code.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_WINDOWS
+
+#include "tsan_platform.h"
+
+#include <stdlib.h>
+
+namespace __tsan {
+
+void FlushShadowMemory() {
+}
+
+void WriteMemoryProfile(char *buf, uptr buf_size, uptr nthread, uptr nlive) {
+}
+
+void InitializePlatformEarly() {
+}
+
+void InitializePlatform() {
+}
+
+} // namespace __tsan
+
+#endif // SANITIZER_WINDOWS
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_preinit.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_preinit.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_preinit.cc (revision 351984)
@@ -0,0 +1,26 @@
+//===-- tsan_preinit.cc ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer.
+//
+// Call __tsan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "tsan_interface.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+
+// The symbol is called __local_tsan_preinit, because it's not intended to be
+// exported.
+// This code linked into the main executable when -fsanitize=thread is in
+// the link flags. It can only use exported interface functions.
+__attribute__((section(".preinit_array"), used))
+void (*__local_tsan_preinit)(void) = __tsan_init;
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_preinit.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_report.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_report.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_report.cc (revision 351984)
@@ -0,0 +1,486 @@
+//===-- tsan_report.cc ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_report.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
+
+namespace __tsan {
+
+ReportStack::ReportStack() : frames(nullptr), suppressable(false) {}
+
+ReportStack *ReportStack::New() {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportStack));
+ return new(mem) ReportStack();
+}
+
+ReportLocation::ReportLocation(ReportLocationType type)
+ : type(type), global(), heap_chunk_start(0), heap_chunk_size(0), tid(0),
+ fd(0), suppressable(false), stack(nullptr) {}
+
+ReportLocation *ReportLocation::New(ReportLocationType type) {
+ void *mem = internal_alloc(MBlockReportStack, sizeof(ReportLocation));
+ return new(mem) ReportLocation(type);
+}
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() { }
+ const char *Access() { return Blue(); }
+ const char *ThreadDescription() { return Cyan(); }
+ const char *Location() { return Green(); }
+ const char *Sleep() { return Yellow(); }
+ const char *Mutex() { return Magenta(); }
+};
+
+ReportDesc::ReportDesc()
+ : tag(kExternalTagNone)
+ , stacks()
+ , mops()
+ , locs()
+ , mutexes()
+ , threads()
+ , unique_tids()
+ , sleep()
+ , count() {
+}
+
+ReportMop::ReportMop()
+ : mset() {
+}
+
+ReportDesc::~ReportDesc() {
+ // FIXME(dvyukov): it must be leaking a lot of memory.
+}
+
+#if !SANITIZER_GO
+
+const int kThreadBufSize = 32;
+const char *thread_name(char *buf, int tid) {
+ if (tid == 0)
+ return "main thread";
+ internal_snprintf(buf, kThreadBufSize, "thread T%d", tid);
+ return buf;
+}
+
+static const char *ReportTypeString(ReportType typ, uptr tag) {
+ switch (typ) {
+ case ReportTypeRace:
+ return "data race";
+ case ReportTypeVptrRace:
+ return "data race on vptr (ctor/dtor vs virtual call)";
+ case ReportTypeUseAfterFree:
+ return "heap-use-after-free";
+ case ReportTypeVptrUseAfterFree:
+ return "heap-use-after-free (virtual call vs free)";
+ case ReportTypeExternalRace: {
+ const char *str = GetReportHeaderFromTag(tag);
+ return str ? str : "race on external object";
+ }
+ case ReportTypeThreadLeak:
+ return "thread leak";
+ case ReportTypeMutexDestroyLocked:
+ return "destroy of a locked mutex";
+ case ReportTypeMutexDoubleLock:
+ return "double lock of a mutex";
+ case ReportTypeMutexInvalidAccess:
+ return "use of an invalid mutex (e.g. uninitialized or destroyed)";
+ case ReportTypeMutexBadUnlock:
+ return "unlock of an unlocked mutex (or by a wrong thread)";
+ case ReportTypeMutexBadReadLock:
+ return "read lock of a write locked mutex";
+ case ReportTypeMutexBadReadUnlock:
+ return "read unlock of a write locked mutex";
+ case ReportTypeSignalUnsafe:
+ return "signal-unsafe call inside of a signal";
+ case ReportTypeErrnoInSignal:
+ return "signal handler spoils errno";
+ case ReportTypeDeadlock:
+ return "lock-order-inversion (potential deadlock)";
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+#if SANITIZER_MAC
+static const char *const kInterposedFunctionPrefix = "wrap_";
+#else
+static const char *const kInterposedFunctionPrefix = "__interceptor_";
+#endif
+
+void PrintStack(const ReportStack *ent) {
+ if (ent == 0 || ent->frames == 0) {
+ Printf(" [failed to restore the stack]\n\n");
+ return;
+ }
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame && frame->info.address; frame = frame->next, i++) {
+ InternalScopedString res(2 * GetPageSizeCached());
+ RenderFrame(&res, common_flags()->stack_trace_format, i, frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix, kInterposedFunctionPrefix);
+ Printf("%s\n", res.data());
+ }
+ Printf("\n");
+}
+
+static void PrintMutexSet(Vector<ReportMopMutex> const& mset) {
+ for (uptr i = 0; i < mset.Size(); i++) {
+ if (i == 0)
+ Printf(" (mutexes:");
+ const ReportMopMutex m = mset[i];
+ Printf(" %s M%llu", m.write ? "write" : "read", m.id);
+ Printf(i == mset.Size() - 1 ? ")" : ",");
+ }
+}
+
+static const char *MopDesc(bool first, bool write, bool atomic) {
+ return atomic ? (first ? (write ? "Atomic write" : "Atomic read")
+ : (write ? "Previous atomic write" : "Previous atomic read"))
+ : (first ? (write ? "Write" : "Read")
+ : (write ? "Previous write" : "Previous read"));
+}
+
+static const char *ExternalMopDesc(bool first, bool write) {
+ return first ? (write ? "Modifying" : "Read-only")
+ : (write ? "Previous modifying" : "Previous read-only");
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ Decorator d;
+ char thrbuf[kThreadBufSize];
+ Printf("%s", d.Access());
+ if (mop->external_tag == kExternalTagNone) {
+ Printf(" %s of size %d at %p by %s",
+ MopDesc(first, mop->write, mop->atomic), mop->size,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ } else {
+ const char *object_type = GetObjectTypeFromTag(mop->external_tag);
+ if (object_type == nullptr)
+ object_type = "external object";
+ Printf(" %s access of %s at %p by %s",
+ ExternalMopDesc(first, mop->write), object_type,
+ (void *)mop->addr, thread_name(thrbuf, mop->tid));
+ }
+ PrintMutexSet(mop->mset);
+ Printf(":\n");
+ Printf("%s", d.Default());
+ PrintStack(mop->stack);
+}
+
+static void PrintLocation(const ReportLocation *loc) {
+ Decorator d;
+ char thrbuf[kThreadBufSize];
+ bool print_stack = false;
+ Printf("%s", d.Location());
+ if (loc->type == ReportLocationGlobal) {
+ const DataInfo &global = loc->global;
+ if (global.size != 0)
+ Printf(" Location is global '%s' of size %zu at %p (%s+%p)\n\n",
+ global.name, global.size, global.start,
+ StripModuleName(global.module), global.module_offset);
+ else
+ Printf(" Location is global '%s' at %p (%s+%p)\n\n", global.name,
+ global.start, StripModuleName(global.module),
+ global.module_offset);
+ } else if (loc->type == ReportLocationHeap) {
+ char thrbuf[kThreadBufSize];
+ const char *object_type = GetObjectTypeFromTag(loc->external_tag);
+ if (!object_type) {
+ Printf(" Location is heap block of size %zu at %p allocated by %s:\n",
+ loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
+ } else {
+ Printf(" Location is %s of size %zu at %p allocated by %s:\n",
+ object_type, loc->heap_chunk_size, loc->heap_chunk_start,
+ thread_name(thrbuf, loc->tid));
+ }
+ print_stack = true;
+ } else if (loc->type == ReportLocationStack) {
+ Printf(" Location is stack of %s.\n\n", thread_name(thrbuf, loc->tid));
+ } else if (loc->type == ReportLocationTLS) {
+ Printf(" Location is TLS of %s.\n\n", thread_name(thrbuf, loc->tid));
+ } else if (loc->type == ReportLocationFD) {
+ Printf(" Location is file descriptor %d created by %s at:\n",
+ loc->fd, thread_name(thrbuf, loc->tid));
+ print_stack = true;
+ }
+ Printf("%s", d.Default());
+ if (print_stack)
+ PrintStack(loc->stack);
+}
+
+static void PrintMutexShort(const ReportMutex *rm, const char *after) {
+ Decorator d;
+ Printf("%sM%zd%s%s", d.Mutex(), rm->id, d.Default(), after);
+}
+
+static void PrintMutexShortWithAddress(const ReportMutex *rm,
+ const char *after) {
+ Decorator d;
+ Printf("%sM%zd (%p)%s%s", d.Mutex(), rm->id, rm->addr, d.Default(), after);
+}
+
+static void PrintMutex(const ReportMutex *rm) {
+ Decorator d;
+ if (rm->destroyed) {
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%llu is already destroyed.\n\n", rm->id);
+ Printf("%s", d.Default());
+ } else {
+ Printf("%s", d.Mutex());
+ Printf(" Mutex M%llu (%p) created at:\n", rm->id, rm->addr);
+ Printf("%s", d.Default());
+ PrintStack(rm->stack);
+ }
+}
+
+static void PrintThread(const ReportThread *rt) {
+ Decorator d;
+ if (rt->id == 0) // Little sense in describing the main thread.
+ return;
+ Printf("%s", d.ThreadDescription());
+ Printf(" Thread T%d", rt->id);
+ if (rt->name && rt->name[0] != '\0')
+ Printf(" '%s'", rt->name);
+ char thrbuf[kThreadBufSize];
+ const char *thread_status = rt->running ? "running" : "finished";
+ if (rt->thread_type == ThreadType::Worker) {
+ Printf(" (tid=%zu, %s) is a GCD worker thread\n", rt->os_id, thread_status);
+ Printf("\n");
+ Printf("%s", d.Default());
+ return;
+ }
+ Printf(" (tid=%zu, %s) created by %s", rt->os_id, thread_status,
+ thread_name(thrbuf, rt->parent_tid));
+ if (rt->stack)
+ Printf(" at:");
+ Printf("\n");
+ Printf("%s", d.Default());
+ PrintStack(rt->stack);
+}
+
+static void PrintSleep(const ReportStack *s) {
+ Decorator d;
+ Printf("%s", d.Sleep());
+ Printf(" As if synchronized via sleep:\n");
+ Printf("%s", d.Default());
+ PrintStack(s);
+}
+
+static ReportStack *ChooseSummaryStack(const ReportDesc *rep) {
+ if (rep->mops.Size())
+ return rep->mops[0]->stack;
+ if (rep->stacks.Size())
+ return rep->stacks[0];
+ if (rep->mutexes.Size())
+ return rep->mutexes[0]->stack;
+ if (rep->threads.Size())
+ return rep->threads[0]->stack;
+ return 0;
+}
+
+static bool FrameIsInternal(const SymbolizedStack *frame) {
+ if (frame == 0)
+ return false;
+ const char *file = frame->info.file;
+ const char *module = frame->info.module;
+ if (file != 0 &&
+ (internal_strstr(file, "tsan_interceptors.cc") ||
+ internal_strstr(file, "sanitizer_common_interceptors.inc") ||
+ internal_strstr(file, "tsan_interface_")))
+ return true;
+ if (module != 0 && (internal_strstr(module, "libclang_rt.tsan_")))
+ return true;
+ return false;
+}
+
+static SymbolizedStack *SkipTsanInternalFrames(SymbolizedStack *frames) {
+ while (FrameIsInternal(frames) && frames->next)
+ frames = frames->next;
+ return frames;
+}
+
+void PrintReport(const ReportDesc *rep) {
+ Decorator d;
+ Printf("==================\n");
+ const char *rep_typ_str = ReportTypeString(rep->typ, rep->tag);
+ Printf("%s", d.Warning());
+ Printf("WARNING: ThreadSanitizer: %s (pid=%d)\n", rep_typ_str,
+ (int)internal_getpid());
+ Printf("%s", d.Default());
+
+ if (rep->typ == ReportTypeDeadlock) {
+ char thrbuf[kThreadBufSize];
+ Printf(" Cycle in lock order graph: ");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutexShortWithAddress(rep->mutexes[i], " => ");
+ PrintMutexShort(rep->mutexes[0], "\n\n");
+ CHECK_GT(rep->mutexes.Size(), 0U);
+ CHECK_EQ(rep->mutexes.Size() * (flags()->second_deadlock_stack ? 2 : 1),
+ rep->stacks.Size());
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[(i + 1) % rep->mutexes.Size()],
+ " acquired here while holding mutex ");
+ PrintMutexShort(rep->mutexes[i], " in ");
+ Printf("%s", d.ThreadDescription());
+ Printf("%s:\n", thread_name(thrbuf, rep->unique_tids[i]));
+ Printf("%s", d.Default());
+ if (flags()->second_deadlock_stack) {
+ PrintStack(rep->stacks[2*i]);
+ Printf(" Mutex ");
+ PrintMutexShort(rep->mutexes[i],
+ " previously acquired by the same thread here:\n");
+ PrintStack(rep->stacks[2*i+1]);
+ } else {
+ PrintStack(rep->stacks[i]);
+ if (i == 0)
+ Printf(" Hint: use TSAN_OPTIONS=second_deadlock_stack=1 "
+ "to get more informative warning message\n\n");
+ }
+ }
+ } else {
+ for (uptr i = 0; i < rep->stacks.Size(); i++) {
+ if (i)
+ Printf(" and:\n");
+ PrintStack(rep->stacks[i]);
+ }
+ }
+
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+
+ if (rep->sleep)
+ PrintSleep(rep->sleep);
+
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
+
+ if (rep->typ != ReportTypeDeadlock) {
+ for (uptr i = 0; i < rep->mutexes.Size(); i++)
+ PrintMutex(rep->mutexes[i]);
+ }
+
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+
+ if (rep->typ == ReportTypeThreadLeak && rep->count > 1)
+ Printf(" And %d more similar thread leaks.\n\n", rep->count - 1);
+
+ if (ReportStack *stack = ChooseSummaryStack(rep)) {
+ if (SymbolizedStack *frame = SkipTsanInternalFrames(stack->frames))
+ ReportErrorSummary(rep_typ_str, frame->info);
+ }
+
+ if (common_flags()->print_module_map == 2) PrintModuleMap();
+
+ Printf("==================\n");
+}
+
+#else // #if !SANITIZER_GO
+
+const int kMainThreadId = 1;
+
+void PrintStack(const ReportStack *ent) {
+ if (ent == 0 || ent->frames == 0) {
+ Printf(" [failed to restore the stack]\n");
+ return;
+ }
+ SymbolizedStack *frame = ent->frames;
+ for (int i = 0; frame; frame = frame->next, i++) {
+ const AddressInfo &info = frame->info;
+ Printf(" %s()\n %s:%d +0x%zx\n", info.function,
+ StripPathPrefix(info.file, common_flags()->strip_path_prefix),
+ info.line, (void *)info.module_offset);
+ }
+}
+
+static void PrintMop(const ReportMop *mop, bool first) {
+ Printf("\n");
+ Printf("%s at %p by ",
+ (first ? (mop->write ? "Write" : "Read")
+ : (mop->write ? "Previous write" : "Previous read")), mop->addr);
+ if (mop->tid == kMainThreadId)
+ Printf("main goroutine:\n");
+ else
+ Printf("goroutine %d:\n", mop->tid);
+ PrintStack(mop->stack);
+}
+
+static void PrintLocation(const ReportLocation *loc) {
+ switch (loc->type) {
+ case ReportLocationHeap: {
+ Printf("\n");
+ Printf("Heap block of size %zu at %p allocated by ",
+ loc->heap_chunk_size, loc->heap_chunk_start);
+ if (loc->tid == kMainThreadId)
+ Printf("main goroutine:\n");
+ else
+ Printf("goroutine %d:\n", loc->tid);
+ PrintStack(loc->stack);
+ break;
+ }
+ case ReportLocationGlobal: {
+ Printf("\n");
+ Printf("Global var %s of size %zu at %p declared at %s:%zu\n",
+ loc->global.name, loc->global.size, loc->global.start,
+ loc->global.file, loc->global.line);
+ break;
+ }
+ default:
+ break;
+ }
+}
+
+static void PrintThread(const ReportThread *rt) {
+ if (rt->id == kMainThreadId)
+ return;
+ Printf("\n");
+ Printf("Goroutine %d (%s) created at:\n",
+ rt->id, rt->running ? "running" : "finished");
+ PrintStack(rt->stack);
+}
+
+void PrintReport(const ReportDesc *rep) {
+ Printf("==================\n");
+ if (rep->typ == ReportTypeRace) {
+ Printf("WARNING: DATA RACE");
+ for (uptr i = 0; i < rep->mops.Size(); i++)
+ PrintMop(rep->mops[i], i == 0);
+ for (uptr i = 0; i < rep->locs.Size(); i++)
+ PrintLocation(rep->locs[i]);
+ for (uptr i = 0; i < rep->threads.Size(); i++)
+ PrintThread(rep->threads[i]);
+ } else if (rep->typ == ReportTypeDeadlock) {
+ Printf("WARNING: DEADLOCK\n");
+ for (uptr i = 0; i < rep->mutexes.Size(); i++) {
+ Printf("Goroutine %d lock mutex %d while holding mutex %d:\n",
+ 999, rep->mutexes[i]->id,
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i]);
+ Printf("\n");
+ Printf("Mutex %d was previously locked here:\n",
+ rep->mutexes[(i+1) % rep->mutexes.Size()]->id);
+ PrintStack(rep->stacks[2*i + 1]);
+ Printf("\n");
+ }
+ }
+ Printf("==================\n");
+}
+
+#endif
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_report.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_report.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_report.h (revision 351984)
@@ -0,0 +1,135 @@
+//===-- tsan_report.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_REPORT_H
+#define TSAN_REPORT_H
+
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+enum ReportType {
+ ReportTypeRace,
+ ReportTypeVptrRace,
+ ReportTypeUseAfterFree,
+ ReportTypeVptrUseAfterFree,
+ ReportTypeExternalRace,
+ ReportTypeThreadLeak,
+ ReportTypeMutexDestroyLocked,
+ ReportTypeMutexDoubleLock,
+ ReportTypeMutexInvalidAccess,
+ ReportTypeMutexBadUnlock,
+ ReportTypeMutexBadReadLock,
+ ReportTypeMutexBadReadUnlock,
+ ReportTypeSignalUnsafe,
+ ReportTypeErrnoInSignal,
+ ReportTypeDeadlock
+};
+
+struct ReportStack {
+ SymbolizedStack *frames;
+ bool suppressable;
+ static ReportStack *New();
+
+ private:
+ ReportStack();
+};
+
+struct ReportMopMutex {
+ u64 id;
+ bool write;
+};
+
+struct ReportMop {
+ int tid;
+ uptr addr;
+ int size;
+ bool write;
+ bool atomic;
+ uptr external_tag;
+ Vector<ReportMopMutex> mset;
+ ReportStack *stack;
+
+ ReportMop();
+};
+
+enum ReportLocationType {
+ ReportLocationGlobal,
+ ReportLocationHeap,
+ ReportLocationStack,
+ ReportLocationTLS,
+ ReportLocationFD
+};
+
+struct ReportLocation {
+ ReportLocationType type;
+ DataInfo global;
+ uptr heap_chunk_start;
+ uptr heap_chunk_size;
+ uptr external_tag;
+ int tid;
+ int fd;
+ bool suppressable;
+ ReportStack *stack;
+
+ static ReportLocation *New(ReportLocationType type);
+ private:
+ explicit ReportLocation(ReportLocationType type);
+};
+
+struct ReportThread {
+ int id;
+ tid_t os_id;
+ bool running;
+ ThreadType thread_type;
+ char *name;
+ u32 parent_tid;
+ ReportStack *stack;
+};
+
+struct ReportMutex {
+ u64 id;
+ uptr addr;
+ bool destroyed;
+ ReportStack *stack;
+};
+
+class ReportDesc {
+ public:
+ ReportType typ;
+ uptr tag;
+ Vector<ReportStack*> stacks;
+ Vector<ReportMop*> mops;
+ Vector<ReportLocation*> locs;
+ Vector<ReportMutex*> mutexes;
+ Vector<ReportThread*> threads;
+ Vector<int> unique_tids;
+ ReportStack *sleep;
+ int count;
+
+ ReportDesc();
+ ~ReportDesc();
+
+ private:
+ ReportDesc(const ReportDesc&);
+ void operator = (const ReportDesc&);
+};
+
+// Format and output the report to the console/log. No additional logic.
+void PrintReport(const ReportDesc *rep);
+void PrintStack(const ReportStack *stack);
+
+} // namespace __tsan
+
+#endif // TSAN_REPORT_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl.h (revision 351984)
@@ -0,0 +1,888 @@
+//===-- tsan_rtl.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Main internal TSan header file.
+//
+// Ground rules:
+// - C++ run-time should not be used (static CTORs, RTTI, exceptions, static
+// function-scope locals)
+// - All functions/classes/etc reside in namespace __tsan, except for those
+// declared in tsan_interface.h.
+// - Platform-specific files should be used instead of ifdefs (*).
+// - No system headers included in header files (*).
+// - Platform specific headres included only into platform-specific files (*).
+//
+// (*) Except when inlining is critical for performance.
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_RTL_H
+#define TSAN_RTL_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_asm.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common/sanitizer_libignore.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_vector.h"
+#include "tsan_clock.h"
+#include "tsan_defs.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
+#include "tsan_sync.h"
+#include "tsan_trace.h"
+#include "tsan_report.h"
+#include "tsan_platform.h"
+#include "tsan_mutexset.h"
+#include "tsan_ignoreset.h"
+#include "tsan_stack_trace.h"
+
+#if SANITIZER_WORDSIZE != 64
+# error "ThreadSanitizer is supported only on 64-bit platforms"
+#endif
+
+namespace __tsan {
+
+#if !SANITIZER_GO
+struct MapUnmapCallback;
+#if defined(__mips64) || defined(__aarch64__) || defined(__powerpc__)
+
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 0;
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator32<AP32> PrimaryAllocator;
+#else
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = Mapping::kHeapMemBeg;
+ static const uptr kSpaceSize = Mapping::kHeapMemEnd - Mapping::kHeapMemBeg;
+ static const uptr kMetadataSize = 0;
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef __tsan::MapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+#endif
+typedef CombinedAllocator<PrimaryAllocator> Allocator;
+typedef Allocator::AllocatorCache AllocatorCache;
+Allocator *allocator();
+#endif
+
+void TsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2);
+
+const u64 kShadowRodata = (u64)-1; // .rodata shadow marker
+
+// FastState (from most significant bit):
+// ignore : 1
+// tid : kTidBits
+// unused : -
+// history_size : 3
+// epoch : kClkBits
+class FastState {
+ public:
+ FastState(u64 tid, u64 epoch) {
+ x_ = tid << kTidShift;
+ x_ |= epoch;
+ DCHECK_EQ(tid, this->tid());
+ DCHECK_EQ(epoch, this->epoch());
+ DCHECK_EQ(GetIgnoreBit(), false);
+ }
+
+ explicit FastState(u64 x)
+ : x_(x) {
+ }
+
+ u64 raw() const {
+ return x_;
+ }
+
+ u64 tid() const {
+ u64 res = (x_ & ~kIgnoreBit) >> kTidShift;
+ return res;
+ }
+
+ u64 TidWithIgnore() const {
+ u64 res = x_ >> kTidShift;
+ return res;
+ }
+
+ u64 epoch() const {
+ u64 res = x_ & ((1ull << kClkBits) - 1);
+ return res;
+ }
+
+ void IncrementEpoch() {
+ u64 old_epoch = epoch();
+ x_ += 1;
+ DCHECK_EQ(old_epoch + 1, epoch());
+ (void)old_epoch;
+ }
+
+ void SetIgnoreBit() { x_ |= kIgnoreBit; }
+ void ClearIgnoreBit() { x_ &= ~kIgnoreBit; }
+ bool GetIgnoreBit() const { return (s64)x_ < 0; }
+
+ void SetHistorySize(int hs) {
+ CHECK_GE(hs, 0);
+ CHECK_LE(hs, 7);
+ x_ = (x_ & ~(kHistoryMask << kHistoryShift)) | (u64(hs) << kHistoryShift);
+ }
+
+ ALWAYS_INLINE
+ int GetHistorySize() const {
+ return (int)((x_ >> kHistoryShift) & kHistoryMask);
+ }
+
+ void ClearHistorySize() {
+ SetHistorySize(0);
+ }
+
+ ALWAYS_INLINE
+ u64 GetTracePos() const {
+ const int hs = GetHistorySize();
+ // When hs == 0, the trace consists of 2 parts.
+ const u64 mask = (1ull << (kTracePartSizeBits + hs + 1)) - 1;
+ return epoch() & mask;
+ }
+
+ private:
+ friend class Shadow;
+ static const int kTidShift = 64 - kTidBits - 1;
+ static const u64 kIgnoreBit = 1ull << 63;
+ static const u64 kFreedBit = 1ull << 63;
+ static const u64 kHistoryShift = kClkBits;
+ static const u64 kHistoryMask = 7;
+ u64 x_;
+};
+
+// Shadow (from most significant bit):
+// freed : 1
+// tid : kTidBits
+// is_atomic : 1
+// is_read : 1
+// size_log : 2
+// addr0 : 3
+// epoch : kClkBits
+class Shadow : public FastState {
+ public:
+ explicit Shadow(u64 x)
+ : FastState(x) {
+ }
+
+ explicit Shadow(const FastState &s)
+ : FastState(s.x_) {
+ ClearHistorySize();
+ }
+
+ void SetAddr0AndSizeLog(u64 addr0, unsigned kAccessSizeLog) {
+ DCHECK_EQ((x_ >> kClkBits) & 31, 0);
+ DCHECK_LE(addr0, 7);
+ DCHECK_LE(kAccessSizeLog, 3);
+ x_ |= ((kAccessSizeLog << 3) | addr0) << kClkBits;
+ DCHECK_EQ(kAccessSizeLog, size_log());
+ DCHECK_EQ(addr0, this->addr0());
+ }
+
+ void SetWrite(unsigned kAccessIsWrite) {
+ DCHECK_EQ(x_ & kReadBit, 0);
+ if (!kAccessIsWrite)
+ x_ |= kReadBit;
+ DCHECK_EQ(kAccessIsWrite, IsWrite());
+ }
+
+ void SetAtomic(bool kIsAtomic) {
+ DCHECK(!IsAtomic());
+ if (kIsAtomic)
+ x_ |= kAtomicBit;
+ DCHECK_EQ(IsAtomic(), kIsAtomic);
+ }
+
+ bool IsAtomic() const {
+ return x_ & kAtomicBit;
+ }
+
+ bool IsZero() const {
+ return x_ == 0;
+ }
+
+ static inline bool TidsAreEqual(const Shadow s1, const Shadow s2) {
+ u64 shifted_xor = (s1.x_ ^ s2.x_) >> kTidShift;
+ DCHECK_EQ(shifted_xor == 0, s1.TidWithIgnore() == s2.TidWithIgnore());
+ return shifted_xor == 0;
+ }
+
+ static ALWAYS_INLINE
+ bool Addr0AndSizeAreEqual(const Shadow s1, const Shadow s2) {
+ u64 masked_xor = ((s1.x_ ^ s2.x_) >> kClkBits) & 31;
+ return masked_xor == 0;
+ }
+
+ static ALWAYS_INLINE bool TwoRangesIntersect(Shadow s1, Shadow s2,
+ unsigned kS2AccessSize) {
+ bool res = false;
+ u64 diff = s1.addr0() - s2.addr0();
+ if ((s64)diff < 0) { // s1.addr0 < s2.addr0 // NOLINT
+ // if (s1.addr0() + size1) > s2.addr0()) return true;
+ if (s1.size() > -diff)
+ res = true;
+ } else {
+ // if (s2.addr0() + kS2AccessSize > s1.addr0()) return true;
+ if (kS2AccessSize > diff)
+ res = true;
+ }
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s1, s2));
+ DCHECK_EQ(res, TwoRangesIntersectSlow(s2, s1));
+ return res;
+ }
+
+ u64 ALWAYS_INLINE addr0() const { return (x_ >> kClkBits) & 7; }
+ u64 ALWAYS_INLINE size() const { return 1ull << size_log(); }
+ bool ALWAYS_INLINE IsWrite() const { return !IsRead(); }
+ bool ALWAYS_INLINE IsRead() const { return x_ & kReadBit; }
+
+ // The idea behind the freed bit is as follows.
+ // When the memory is freed (or otherwise unaccessible) we write to the shadow
+ // values with tid/epoch related to the free and the freed bit set.
+ // During memory accesses processing the freed bit is considered
+ // as msb of tid. So any access races with shadow with freed bit set
+ // (it is as if write from a thread with which we never synchronized before).
+ // This allows us to detect accesses to freed memory w/o additional
+ // overheads in memory access processing and at the same time restore
+ // tid/epoch of free.
+ void MarkAsFreed() {
+ x_ |= kFreedBit;
+ }
+
+ bool IsFreed() const {
+ return x_ & kFreedBit;
+ }
+
+ bool GetFreedAndReset() {
+ bool res = x_ & kFreedBit;
+ x_ &= ~kFreedBit;
+ return res;
+ }
+
+ bool ALWAYS_INLINE IsBothReadsOrAtomic(bool kIsWrite, bool kIsAtomic) const {
+ bool v = x_ & ((u64(kIsWrite ^ 1) << kReadShift)
+ | (u64(kIsAtomic) << kAtomicShift));
+ DCHECK_EQ(v, (!IsWrite() && !kIsWrite) || (IsAtomic() && kIsAtomic));
+ return v;
+ }
+
+ bool ALWAYS_INLINE IsRWNotWeaker(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3)
+ <= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() < kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() <= !kIsWrite));
+ return v;
+ }
+
+ bool ALWAYS_INLINE IsRWWeakerOrEqual(bool kIsWrite, bool kIsAtomic) const {
+ bool v = ((x_ >> kReadShift) & 3)
+ >= u64((kIsWrite ^ 1) | (kIsAtomic << 1));
+ DCHECK_EQ(v, (IsAtomic() > kIsAtomic) ||
+ (IsAtomic() == kIsAtomic && !IsWrite() >= !kIsWrite));
+ return v;
+ }
+
+ private:
+ static const u64 kReadShift = 5 + kClkBits;
+ static const u64 kReadBit = 1ull << kReadShift;
+ static const u64 kAtomicShift = 6 + kClkBits;
+ static const u64 kAtomicBit = 1ull << kAtomicShift;
+
+ u64 size_log() const { return (x_ >> (3 + kClkBits)) & 3; }
+
+ static bool TwoRangesIntersectSlow(const Shadow s1, const Shadow s2) {
+ if (s1.addr0() == s2.addr0()) return true;
+ if (s1.addr0() < s2.addr0() && s1.addr0() + s1.size() > s2.addr0())
+ return true;
+ if (s2.addr0() < s1.addr0() && s2.addr0() + s2.size() > s1.addr0())
+ return true;
+ return false;
+ }
+};
+
+struct ThreadSignalContext;
+
+struct JmpBuf {
+ uptr sp;
+ int int_signal_send;
+ bool in_blocking_func;
+ uptr in_signal_handler;
+ uptr *shadow_stack_pos;
+};
+
+// A Processor represents a physical thread, or a P for Go.
+// It is used to store internal resources like allocate cache, and does not
+// participate in race-detection logic (invisible to end user).
+// In C++ it is tied to an OS thread just like ThreadState, however ideally
+// it should be tied to a CPU (this way we will have fewer allocator caches).
+// In Go it is tied to a P, so there are significantly fewer Processor's than
+// ThreadState's (which are tied to Gs).
+// A ThreadState must be wired with a Processor to handle events.
+struct Processor {
+ ThreadState *thr; // currently wired thread, or nullptr
+#if !SANITIZER_GO
+ AllocatorCache alloc_cache;
+ InternalAllocatorCache internal_alloc_cache;
+#endif
+ DenseSlabAllocCache block_cache;
+ DenseSlabAllocCache sync_cache;
+ DenseSlabAllocCache clock_cache;
+ DDPhysicalThread *dd_pt;
+};
+
+#if !SANITIZER_GO
+// ScopedGlobalProcessor temporary setups a global processor for the current
+// thread, if it does not have one. Intended for interceptors that can run
+// at the very thread end, when we already destroyed the thread processor.
+struct ScopedGlobalProcessor {
+ ScopedGlobalProcessor();
+ ~ScopedGlobalProcessor();
+};
+#endif
+
+// This struct is stored in TLS.
+struct ThreadState {
+ FastState fast_state;
+ // Synch epoch represents the threads's epoch before the last synchronization
+ // action. It allows to reduce number of shadow state updates.
+ // For example, fast_synch_epoch=100, last write to addr X was at epoch=150,
+ // if we are processing write to X from the same thread at epoch=200,
+ // we do nothing, because both writes happen in the same 'synch epoch'.
+ // That is, if another memory access does not race with the former write,
+ // it does not race with the latter as well.
+ // QUESTION: can we can squeeze this into ThreadState::Fast?
+ // E.g. ThreadState::Fast is a 44-bit, 32 are taken by synch_epoch and 12 are
+ // taken by epoch between synchs.
+ // This way we can save one load from tls.
+ u64 fast_synch_epoch;
+ // Technically `current` should be a separate THREADLOCAL variable;
+ // but it is placed here in order to share cache line with previous fields.
+ ThreadState* current;
+ // This is a slow path flag. On fast path, fast_state.GetIgnoreBit() is read.
+ // We do not distinguish beteween ignoring reads and writes
+ // for better performance.
+ int ignore_reads_and_writes;
+ int ignore_sync;
+ int suppress_reports;
+ // Go does not support ignores.
+#if !SANITIZER_GO
+ IgnoreSet mop_ignore_set;
+ IgnoreSet sync_ignore_set;
+#endif
+ // C/C++ uses fixed size shadow stack embed into Trace.
+ // Go uses malloc-allocated shadow stack with dynamic size.
+ uptr *shadow_stack;
+ uptr *shadow_stack_end;
+ uptr *shadow_stack_pos;
+ u64 *racy_shadow_addr;
+ u64 racy_state[2];
+ MutexSet mset;
+ ThreadClock clock;
+#if !SANITIZER_GO
+ Vector<JmpBuf> jmp_bufs;
+ int ignore_interceptors;
+#endif
+#if TSAN_COLLECT_STATS
+ u64 stat[StatCnt];
+#endif
+ const int tid;
+ const int unique_id;
+ bool in_symbolizer;
+ bool in_ignored_lib;
+ bool is_inited;
+ bool is_dead;
+ bool is_freeing;
+ bool is_vptr_access;
+ const uptr stk_addr;
+ const uptr stk_size;
+ const uptr tls_addr;
+ const uptr tls_size;
+ ThreadContext *tctx;
+
+#if SANITIZER_DEBUG && !SANITIZER_GO
+ InternalDeadlockDetector internal_deadlock_detector;
+#endif
+ DDLogicalThread *dd_lt;
+
+ // Current wired Processor, or nullptr. Required to handle any events.
+ Processor *proc1;
+#if !SANITIZER_GO
+ Processor *proc() { return proc1; }
+#else
+ Processor *proc();
+#endif
+
+ atomic_uintptr_t in_signal_handler;
+ ThreadSignalContext *signal_ctx;
+
+#if !SANITIZER_GO
+ u32 last_sleep_stack_id;
+ ThreadClock last_sleep_clock;
+#endif
+
+ // Set in regions of runtime that must be signal-safe and fork-safe.
+ // If set, malloc must not be called.
+ int nomalloc;
+
+ const ReportDesc *current_report;
+
+ explicit ThreadState(Context *ctx, int tid, int unique_id, u64 epoch,
+ unsigned reuse_count,
+ uptr stk_addr, uptr stk_size,
+ uptr tls_addr, uptr tls_size);
+};
+
+#if !SANITIZER_GO
+#if SANITIZER_MAC || SANITIZER_ANDROID
+ThreadState *cur_thread();
+void set_cur_thread(ThreadState *thr);
+void cur_thread_finalize();
+INLINE void cur_thread_init() { }
+#else
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL char cur_thread_placeholder[];
+INLINE ThreadState *cur_thread() {
+ return reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current;
+}
+INLINE void cur_thread_init() {
+ ThreadState *thr = reinterpret_cast<ThreadState *>(cur_thread_placeholder);
+ if (UNLIKELY(!thr->current))
+ thr->current = thr;
+}
+INLINE void set_cur_thread(ThreadState *thr) {
+ reinterpret_cast<ThreadState *>(cur_thread_placeholder)->current = thr;
+}
+INLINE void cur_thread_finalize() { }
+#endif // SANITIZER_MAC || SANITIZER_ANDROID
+#endif // SANITIZER_GO
+
+class ThreadContext : public ThreadContextBase {
+ public:
+ explicit ThreadContext(int tid);
+ ~ThreadContext();
+ ThreadState *thr;
+ u32 creation_stack_id;
+ SyncClock sync;
+ // Epoch at which the thread had started.
+ // If we see an event from the thread stamped by an older epoch,
+ // the event is from a dead thread that shared tid with this thread.
+ u64 epoch0;
+ u64 epoch1;
+
+ // Override superclass callbacks.
+ void OnDead() override;
+ void OnJoined(void *arg) override;
+ void OnFinished() override;
+ void OnStarted(void *arg) override;
+ void OnCreated(void *arg) override;
+ void OnReset() override;
+ void OnDetached(void *arg) override;
+};
+
+struct RacyStacks {
+ MD5Hash hash[2];
+ bool operator==(const RacyStacks &other) const {
+ if (hash[0] == other.hash[0] && hash[1] == other.hash[1])
+ return true;
+ if (hash[0] == other.hash[1] && hash[1] == other.hash[0])
+ return true;
+ return false;
+ }
+};
+
+struct RacyAddress {
+ uptr addr_min;
+ uptr addr_max;
+};
+
+struct FiredSuppression {
+ ReportType type;
+ uptr pc_or_addr;
+ Suppression *supp;
+};
+
+struct Context {
+ Context();
+
+ bool initialized;
+#if !SANITIZER_GO
+ bool after_multithreaded_fork;
+#endif
+
+ MetaMap metamap;
+
+ Mutex report_mtx;
+ int nreported;
+ int nmissed_expected;
+ atomic_uint64_t last_symbolize_time_ns;
+
+ void *background_thread;
+ atomic_uint32_t stop_background_thread;
+
+ ThreadRegistry *thread_registry;
+
+ Mutex racy_mtx;
+ Vector<RacyStacks> racy_stacks;
+ Vector<RacyAddress> racy_addresses;
+ // Number of fired suppressions may be large enough.
+ Mutex fired_suppressions_mtx;
+ InternalMmapVector<FiredSuppression> fired_suppressions;
+ DDetector *dd;
+
+ ClockAlloc clock_alloc;
+
+ Flags flags;
+
+ u64 stat[StatCnt];
+ u64 int_alloc_cnt[MBlockTypeCount];
+ u64 int_alloc_siz[MBlockTypeCount];
+};
+
+extern Context *ctx; // The one and the only global runtime context.
+
+ALWAYS_INLINE Flags *flags() {
+ return &ctx->flags;
+}
+
+struct ScopedIgnoreInterceptors {
+ ScopedIgnoreInterceptors() {
+#if !SANITIZER_GO
+ cur_thread()->ignore_interceptors++;
+#endif
+ }
+
+ ~ScopedIgnoreInterceptors() {
+#if !SANITIZER_GO
+ cur_thread()->ignore_interceptors--;
+#endif
+ }
+};
+
+const char *GetObjectTypeFromTag(uptr tag);
+const char *GetReportHeaderFromTag(uptr tag);
+uptr TagFromShadowStackFrame(uptr pc);
+
+class ScopedReportBase {
+ public:
+ void AddMemoryAccess(uptr addr, uptr external_tag, Shadow s, StackTrace stack,
+ const MutexSet *mset);
+ void AddStack(StackTrace stack, bool suppressable = false);
+ void AddThread(const ThreadContext *tctx, bool suppressable = false);
+ void AddThread(int unique_tid, bool suppressable = false);
+ void AddUniqueTid(int unique_tid);
+ void AddMutex(const SyncVar *s);
+ u64 AddMutex(u64 id);
+ void AddLocation(uptr addr, uptr size);
+ void AddSleep(u32 stack_id);
+ void SetCount(int count);
+
+ const ReportDesc *GetReport() const;
+
+ protected:
+ ScopedReportBase(ReportType typ, uptr tag);
+ ~ScopedReportBase();
+
+ private:
+ ReportDesc *rep_;
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore_interceptors_;
+
+ void AddDeadMutex(u64 id);
+
+ ScopedReportBase(const ScopedReportBase &) = delete;
+ void operator=(const ScopedReportBase &) = delete;
+};
+
+class ScopedReport : public ScopedReportBase {
+ public:
+ explicit ScopedReport(ReportType typ, uptr tag = kExternalTagNone);
+ ~ScopedReport();
+
+ private:
+ ScopedErrorReportLock lock_;
+};
+
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack);
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset, uptr *tag = nullptr);
+
+// The stack could look like:
+// <start> | <main> | <foo> | tag | <bar>
+// This will extract the tag and keep:
+// <start> | <main> | <foo> | <bar>
+template<typename StackTraceTy>
+void ExtractTagFromStack(StackTraceTy *stack, uptr *tag = nullptr) {
+ if (stack->size < 2) return;
+ uptr possible_tag_pc = stack->trace[stack->size - 2];
+ uptr possible_tag = TagFromShadowStackFrame(possible_tag_pc);
+ if (possible_tag == kExternalTagNone) return;
+ stack->trace_buffer[stack->size - 2] = stack->trace_buffer[stack->size - 1];
+ stack->size -= 1;
+ if (tag) *tag = possible_tag;
+}
+
+template<typename StackTraceTy>
+void ObtainCurrentStack(ThreadState *thr, uptr toppc, StackTraceTy *stack,
+ uptr *tag = nullptr) {
+ uptr size = thr->shadow_stack_pos - thr->shadow_stack;
+ uptr start = 0;
+ if (size + !!toppc > kStackTraceMax) {
+ start = size + !!toppc - kStackTraceMax;
+ size = kStackTraceMax - !!toppc;
+ }
+ stack->Init(&thr->shadow_stack[start], size, toppc);
+ ExtractTagFromStack(stack, tag);
+}
+
+#define GET_STACK_TRACE_FATAL(thr, pc) \
+ VarSizeStackTrace stack; \
+ ObtainCurrentStack(thr, pc, &stack); \
+ stack.ReverseOrder();
+
+#if TSAN_COLLECT_STATS
+void StatAggregate(u64 *dst, u64 *src);
+void StatOutput(u64 *stat);
+#endif
+
+void ALWAYS_INLINE StatInc(ThreadState *thr, StatType typ, u64 n = 1) {
+#if TSAN_COLLECT_STATS
+ thr->stat[typ] += n;
+#endif
+}
+void ALWAYS_INLINE StatSet(ThreadState *thr, StatType typ, u64 n) {
+#if TSAN_COLLECT_STATS
+ thr->stat[typ] = n;
+#endif
+}
+
+void MapShadow(uptr addr, uptr size);
+void MapThreadTrace(uptr addr, uptr size, const char *name);
+void DontNeedShadowFor(uptr addr, uptr size);
+void InitializeShadowMemory();
+void InitializeInterceptors();
+void InitializeLibIgnore();
+void InitializeDynamicAnnotations();
+
+void ForkBefore(ThreadState *thr, uptr pc);
+void ForkParentAfter(ThreadState *thr, uptr pc);
+void ForkChildAfter(ThreadState *thr, uptr pc);
+
+void ReportRace(ThreadState *thr);
+bool OutputReport(ThreadState *thr, const ScopedReport &srep);
+bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace);
+bool IsExpectedReport(uptr addr, uptr size);
+void PrintMatchedBenignRaces();
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 1
+# define DPrintf Printf
+#else
+# define DPrintf(...)
+#endif
+
+#if defined(TSAN_DEBUG_OUTPUT) && TSAN_DEBUG_OUTPUT >= 2
+# define DPrintf2 Printf
+#else
+# define DPrintf2(...)
+#endif
+
+u32 CurrentStackId(ThreadState *thr, uptr pc);
+ReportStack *SymbolizeStackId(u32 stack_id);
+void PrintCurrentStack(ThreadState *thr, uptr pc);
+void PrintCurrentStackSlow(uptr pc); // uses libunwind
+
+void Initialize(ThreadState *thr);
+void MaybeSpawnBackgroundThread();
+int Finalize(ThreadState *thr);
+
+void OnUserAlloc(ThreadState *thr, uptr pc, uptr p, uptr sz, bool write);
+void OnUserFree(ThreadState *thr, uptr pc, uptr p, bool write);
+
+void MemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic);
+void MemoryAccessImpl(ThreadState *thr, uptr addr,
+ int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic,
+ u64 *shadow_mem, Shadow cur);
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, bool is_write);
+void MemoryAccessRangeStep(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, uptr step, bool is_write);
+void UnalignedMemoryAccess(ThreadState *thr, uptr pc, uptr addr,
+ int size, bool kAccessIsWrite, bool kIsAtomic);
+
+const int kSizeLog1 = 0;
+const int kSizeLog2 = 1;
+const int kSizeLog4 = 2;
+const int kSizeLog8 = 3;
+
+void ALWAYS_INLINE MemoryRead(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, false);
+}
+
+void ALWAYS_INLINE MemoryWrite(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, false);
+}
+
+void ALWAYS_INLINE MemoryReadAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, false, true);
+}
+
+void ALWAYS_INLINE MemoryWriteAtomic(ThreadState *thr, uptr pc,
+ uptr addr, int kAccessSizeLog) {
+ MemoryAccess(thr, pc, addr, kAccessSizeLog, true, true);
+}
+
+void MemoryResetRange(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeFreed(ThreadState *thr, uptr pc, uptr addr, uptr size);
+void MemoryRangeImitateWrite(ThreadState *thr, uptr pc, uptr addr, uptr size);
+
+void ThreadIgnoreBegin(ThreadState *thr, uptr pc, bool save_stack = true);
+void ThreadIgnoreEnd(ThreadState *thr, uptr pc);
+void ThreadIgnoreSyncBegin(ThreadState *thr, uptr pc, bool save_stack = true);
+void ThreadIgnoreSyncEnd(ThreadState *thr, uptr pc);
+
+void FuncEntry(ThreadState *thr, uptr pc);
+void FuncExit(ThreadState *thr);
+
+int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached);
+void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+ ThreadType thread_type);
+void ThreadFinish(ThreadState *thr);
+int ThreadTid(ThreadState *thr, uptr pc, uptr uid);
+void ThreadJoin(ThreadState *thr, uptr pc, int tid);
+void ThreadDetach(ThreadState *thr, uptr pc, int tid);
+void ThreadFinalize(ThreadState *thr);
+void ThreadSetName(ThreadState *thr, const char *name);
+int ThreadCount(ThreadState *thr);
+void ProcessPendingSignals(ThreadState *thr);
+void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid);
+
+Processor *ProcCreate();
+void ProcDestroy(Processor *proc);
+void ProcWire(Processor *proc, ThreadState *thr);
+void ProcUnwire(Processor *proc, ThreadState *thr);
+
+// Note: the parameter is called flagz, because flags is already taken
+// by the global function that returns flags.
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0,
+ int rec = 1);
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz = 0);
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr);
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr); // call on EOWNERDEAD
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr);
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr);
+// AcquireGlobal synchronizes the current thread with all other threads.
+// In terms of happens-before relation, it draws a HB edge from all threads
+// (where they happen to execute right now) to the current thread. We use it to
+// handle Go finalizers. Namely, finalizer goroutine executes AcquireGlobal
+// right before executing finalizers. This provides a coarse, but simple
+// approximation of the actual required synchronization.
+void AcquireGlobal(ThreadState *thr, uptr pc);
+void Release(ThreadState *thr, uptr pc, uptr addr);
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr);
+void AfterSleep(ThreadState *thr, uptr pc);
+void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c);
+void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c);
+
+// The hacky call uses custom calling convention and an assembly thunk.
+// It is considerably faster that a normal call for the caller
+// if it is not executed (it is intended for slow paths from hot functions).
+// The trick is that the call preserves all registers and the compiler
+// does not treat it as a call.
+// If it does not work for you, use normal call.
+#if !SANITIZER_DEBUG && defined(__x86_64__) && !SANITIZER_MAC
+// The caller may not create the stack frame for itself at all,
+// so we create a reserve stack frame for it (1024b must be enough).
+#define HACKY_CALL(f) \
+ __asm__ __volatile__("sub $1024, %%rsp;" \
+ CFI_INL_ADJUST_CFA_OFFSET(1024) \
+ ".hidden " #f "_thunk;" \
+ "call " #f "_thunk;" \
+ "add $1024, %%rsp;" \
+ CFI_INL_ADJUST_CFA_OFFSET(-1024) \
+ ::: "memory", "cc");
+#else
+#define HACKY_CALL(f) f()
+#endif
+
+void TraceSwitch(ThreadState *thr);
+uptr TraceTopPC(ThreadState *thr);
+uptr TraceSize();
+uptr TraceParts();
+Trace *ThreadTrace(int tid);
+
+extern "C" void __tsan_trace_switch();
+void ALWAYS_INLINE TraceAddEvent(ThreadState *thr, FastState fs,
+ EventType typ, u64 addr) {
+ if (!kCollectHistory)
+ return;
+ DCHECK_GE((int)typ, 0);
+ DCHECK_LE((int)typ, 7);
+ DCHECK_EQ(GetLsb(addr, kEventPCBits), addr);
+ StatInc(thr, StatEvents);
+ u64 pos = fs.GetTracePos();
+ if (UNLIKELY((pos % kTracePartSize) == 0)) {
+#if !SANITIZER_GO
+ HACKY_CALL(__tsan_trace_switch);
+#else
+ TraceSwitch(thr);
+#endif
+ }
+ Event *trace = (Event*)GetThreadTrace(fs.tid());
+ Event *evp = &trace[pos];
+ Event ev = (u64)addr | ((u64)typ << kEventPCBits);
+ *evp = ev;
+}
+
+#if !SANITIZER_GO
+uptr ALWAYS_INLINE HeapEnd() {
+ return HeapMemEnd() + PrimaryAllocator::AdditionalSize();
+}
+#endif
+
+ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags);
+void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber);
+void FiberSwitch(ThreadState *thr, uptr pc, ThreadState *fiber, unsigned flags);
+
+// These need to match __tsan_switch_to_fiber_* flags defined in
+// tsan_interface.h. See documentation there as well.
+enum FiberSwitchFlags {
+ FiberSwitchFlagNoSync = 1 << 0, // __tsan_switch_to_fiber_no_sync
+};
+
+} // namespace __tsan
+
+#endif // TSAN_RTL_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_aarch64.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_aarch64.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_aarch64.S (revision 351984)
@@ -0,0 +1,245 @@
+// The content of this file is AArch64-only:
+#if defined(__aarch64__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if defined(__APPLE__)
+.align 2
+
+.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+.long _setjmp$non_lazy_ptr
+_setjmp$non_lazy_ptr:
+.indirect_symbol _setjmp
+.long 0
+
+.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+.long __setjmp$non_lazy_ptr
+__setjmp$non_lazy_ptr:
+.indirect_symbol __setjmp
+.long 0
+
+.section __DATA,__nl_symbol_ptr,non_lazy_symbol_pointers
+.long _sigsetjmp$non_lazy_ptr
+_sigsetjmp$non_lazy_ptr:
+.indirect_symbol _sigsetjmp
+.long 0
+#endif
+
+#if !defined(__APPLE__)
+.section .text
+#else
+.section __TEXT,__text
+.align 3
+#endif
+
+ASM_HIDDEN(__tsan_setjmp)
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SYMBOL_INTERCEPTOR(setjmp):
+ CFI_STARTPROC
+
+ // Save frame/link register
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save env parameter
+ str x0, [sp, 16]
+ CFI_OFFSET (0, -16)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ add x0, x29, 32
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ldr x0, [sp, 16]
+ CFI_RESTORE (0)
+
+ // Restore frame/link register
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (29)
+ CFI_RESTORE (30)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc setjmp
+#if !defined(__APPLE__)
+ adrp x1, :got:_ZN14__interception11real_setjmpE
+ ldr x1, [x1, #:got_lo12:_ZN14__interception11real_setjmpE]
+ ldr x1, [x1]
+#else
+ adrp x1, _setjmp$non_lazy_ptr@page
+ add x1, x1, _setjmp$non_lazy_ptr@pageoff
+ ldr x1, [x1]
+#endif
+ br x1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SYMBOL_INTERCEPTOR(_setjmp):
+ CFI_STARTPROC
+
+ // Save frame/link register
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save env parameter
+ str x0, [sp, 16]
+ CFI_OFFSET (0, -16)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ add x0, x29, 32
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env parameter
+ ldr x0, [sp, 16]
+ CFI_RESTORE (0)
+
+ // Restore frame/link register
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (29)
+ CFI_RESTORE (30)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc setjmp
+#if !defined(__APPLE__)
+ adrp x1, :got:_ZN14__interception12real__setjmpE
+ ldr x1, [x1, #:got_lo12:_ZN14__interception12real__setjmpE]
+ ldr x1, [x1]
+#else
+ adrp x1, __setjmp$non_lazy_ptr@page
+ add x1, x1, __setjmp$non_lazy_ptr@pageoff
+ ldr x1, [x1]
+#endif
+ br x1
+
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame/link register
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save env and savesigs parameter
+ stp x0, x1, [sp, 16]
+ CFI_OFFSET (0, -16)
+ CFI_OFFSET (1, -8)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ add x0, x29, 32
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env and savesigs parameter
+ ldp x0, x1, [sp, 16]
+ CFI_RESTORE (0)
+ CFI_RESTORE (1)
+
+ // Restore frame/link register
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (29)
+ CFI_RESTORE (30)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc sigsetjmp
+#if !defined(__APPLE__)
+ adrp x2, :got:_ZN14__interception14real_sigsetjmpE
+ ldr x2, [x2, #:got_lo12:_ZN14__interception14real_sigsetjmpE]
+ ldr x2, [x2]
+#else
+ adrp x2, _sigsetjmp$non_lazy_ptr@page
+ add x2, x2, _sigsetjmp$non_lazy_ptr@pageoff
+ ldr x2, [x2]
+#endif
+ br x2
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+
+#if !defined(__APPLE__)
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
+ CFI_STARTPROC
+
+ // Save frame/link register
+ stp x29, x30, [sp, -32]!
+ CFI_DEF_CFA_OFFSET (32)
+ CFI_OFFSET (29, -32)
+ CFI_OFFSET (30, -24)
+
+ // Adjust the SP for previous frame
+ add x29, sp, 0
+ CFI_DEF_CFA_REGISTER (29)
+
+ // Save env and savesigs parameter
+ stp x0, x1, [sp, 16]
+ CFI_OFFSET (0, -16)
+ CFI_OFFSET (1, -8)
+
+ // Obtain SP, first argument to `void __tsan_setjmp(uptr sp)`
+ add x0, x29, 32
+
+ // call tsan interceptor
+ bl ASM_SYMBOL(__tsan_setjmp)
+
+ // Restore env and savesigs parameter
+ ldp x0, x1, [sp, 16]
+ CFI_RESTORE (0)
+ CFI_RESTORE (1)
+
+ // Restore frame/link register
+ ldp x29, x30, [sp], 32
+ CFI_RESTORE (29)
+ CFI_RESTORE (30)
+ CFI_DEF_CFA (31, 0)
+
+ // tail jump to libc __sigsetjmp
+#if !defined(__APPLE__)
+ adrp x2, :got:_ZN14__interception16real___sigsetjmpE
+ ldr x2, [x2, #:got_lo12:_ZN14__interception16real___sigsetjmpE]
+ ldr x2, [x2]
+#else
+ adrp x2, ASM_SYMBOL(__sigsetjmp)@page
+ add x2, x2, ASM_SYMBOL(__sigsetjmp)@pageoff
+#endif
+ br x2
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_aarch64.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_amd64.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_amd64.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_amd64.S (revision 351984)
@@ -0,0 +1,366 @@
+// The content of this file is x86_64-only:
+#if defined(__x86_64__)
+
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if !defined(__APPLE__)
+.section .text
+#else
+.section __TEXT,__text
+#endif
+
+ASM_HIDDEN(__tsan_trace_switch)
+.globl ASM_SYMBOL(__tsan_trace_switch_thunk)
+ASM_SYMBOL(__tsan_trace_switch_thunk):
+ CFI_STARTPROC
+ # Save scratch registers.
+ push %rax
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rax, 0)
+ push %rcx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rcx, 0)
+ push %rdx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdx, 0)
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ push %r8
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r8, 0)
+ push %r9
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r9, 0)
+ push %r10
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r10, 0)
+ push %r11
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r11, 0)
+ # Align stack frame.
+ push %rbx # non-scratch
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rbx, 0)
+ mov %rsp, %rbx # save current rsp
+ CFI_DEF_CFA_REGISTER(%rbx)
+ shr $4, %rsp # clear 4 lsb, align to 16
+ shl $4, %rsp
+
+ call ASM_SYMBOL(__tsan_trace_switch)
+
+ # Unalign stack frame back.
+ mov %rbx, %rsp # restore the original rsp
+ CFI_DEF_CFA_REGISTER(%rsp)
+ pop %rbx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ # Restore scratch registers.
+ pop %r11
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r10
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r9
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r8
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rcx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rax
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rax)
+ CFI_RESTORE(%rbx)
+ CFI_RESTORE(%rcx)
+ CFI_RESTORE(%rdx)
+ CFI_RESTORE(%rsi)
+ CFI_RESTORE(%rdi)
+ CFI_RESTORE(%r8)
+ CFI_RESTORE(%r9)
+ CFI_RESTORE(%r10)
+ CFI_RESTORE(%r11)
+ ret
+ CFI_ENDPROC
+
+ASM_HIDDEN(__tsan_report_race)
+.globl ASM_SYMBOL(__tsan_report_race_thunk)
+ASM_SYMBOL(__tsan_report_race_thunk):
+ CFI_STARTPROC
+ # Save scratch registers.
+ push %rax
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rax, 0)
+ push %rcx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rcx, 0)
+ push %rdx
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdx, 0)
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ push %r8
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r8, 0)
+ push %r9
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r9, 0)
+ push %r10
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r10, 0)
+ push %r11
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%r11, 0)
+ # Align stack frame.
+ push %rbx # non-scratch
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rbx, 0)
+ mov %rsp, %rbx # save current rsp
+ CFI_DEF_CFA_REGISTER(%rbx)
+ shr $4, %rsp # clear 4 lsb, align to 16
+ shl $4, %rsp
+
+ call ASM_SYMBOL(__tsan_report_race)
+
+ # Unalign stack frame back.
+ mov %rbx, %rsp # restore the original rsp
+ CFI_DEF_CFA_REGISTER(%rsp)
+ pop %rbx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ # Restore scratch registers.
+ pop %r11
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r10
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r9
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %r8
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rdx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rcx
+ CFI_ADJUST_CFA_OFFSET(-8)
+ pop %rax
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rax)
+ CFI_RESTORE(%rbx)
+ CFI_RESTORE(%rcx)
+ CFI_RESTORE(%rdx)
+ CFI_RESTORE(%rsi)
+ CFI_RESTORE(%rdi)
+ CFI_RESTORE(%r8)
+ CFI_RESTORE(%r9)
+ CFI_RESTORE(%r10)
+ CFI_RESTORE(%r11)
+ ret
+ CFI_ENDPROC
+
+ASM_HIDDEN(__tsan_setjmp)
+#if defined(__NetBSD__)
+.comm _ZN14__interception15real___setjmp14E,8,8
+#elif !defined(__APPLE__)
+.comm _ZN14__interception11real_setjmpE,8,8
+#endif
+#if defined(__NetBSD__)
+.globl ASM_SYMBOL_INTERCEPTOR(__setjmp14)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__setjmp14))
+ASM_SYMBOL_INTERCEPTOR(__setjmp14):
+#else
+.globl ASM_SYMBOL_INTERCEPTOR(setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(setjmp))
+ASM_SYMBOL_INTERCEPTOR(setjmp):
+#endif
+ CFI_STARTPROC
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+ lea 8(%rsp), %rdi
+#elif defined(__linux__) || defined(__APPLE__)
+ lea 16(%rsp), %rdi
+#else
+# error "Unknown platform"
+#endif
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc setjmp
+ movl $0, %eax
+#if defined(__NetBSD__)
+ movq _ZN14__interception15real___setjmp14E@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#elif !defined(__APPLE__)
+ movq _ZN14__interception11real_setjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#else
+ jmp ASM_SYMBOL(setjmp)
+#endif
+ CFI_ENDPROC
+#if defined(__NetBSD__)
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__setjmp14))
+#else
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(setjmp))
+#endif
+
+.comm _ZN14__interception12real__setjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(_setjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+ASM_SYMBOL_INTERCEPTOR(_setjmp):
+ CFI_STARTPROC
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+ lea 8(%rsp), %rdi
+#elif defined(__linux__) || defined(__APPLE__)
+ lea 16(%rsp), %rdi
+#else
+# error "Unknown platform"
+#endif
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc setjmp
+ movl $0, %eax
+#if !defined(__APPLE__)
+ movq _ZN14__interception12real__setjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#else
+ jmp ASM_SYMBOL(_setjmp)
+#endif
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(_setjmp))
+
+#if defined(__NetBSD__)
+.comm _ZN14__interception18real___sigsetjmp14E,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14):
+#else
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(sigsetjmp):
+#endif
+ CFI_STARTPROC
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // save savesigs parameter
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ // align stack frame
+ sub $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(8)
+ // obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
+#if defined(__FreeBSD__) || defined(__NetBSD__)
+ lea 24(%rsp), %rdi
+#elif defined(__linux__) || defined(__APPLE__)
+ lea 32(%rsp), %rdi
+#else
+# error "Unknown platform"
+#endif
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+ // unalign stack frame
+ add $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ // restore savesigs parameter
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rsi)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc sigsetjmp
+ movl $0, %eax
+#if defined(__NetBSD__)
+ movq _ZN14__interception18real___sigsetjmp14E@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#elif !defined(__APPLE__)
+ movq _ZN14__interception14real_sigsetjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+#else
+ jmp ASM_SYMBOL(sigsetjmp)
+#endif
+ CFI_ENDPROC
+#if defined(__NetBSD__)
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp14))
+#else
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(sigsetjmp))
+#endif
+
+#if !defined(__APPLE__) && !defined(__NetBSD__)
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl ASM_SYMBOL_INTERCEPTOR(__sigsetjmp)
+ASM_TYPE_FUNCTION(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+ASM_SYMBOL_INTERCEPTOR(__sigsetjmp):
+ CFI_STARTPROC
+ // save env parameter
+ push %rdi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rdi, 0)
+ // save savesigs parameter
+ push %rsi
+ CFI_ADJUST_CFA_OFFSET(8)
+ CFI_REL_OFFSET(%rsi, 0)
+ // align stack frame
+ sub $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(8)
+ // obtain SP, store in %rdi, first argument to `void __tsan_setjmp(uptr sp)`
+#if defined(__FreeBSD__)
+ lea 24(%rsp), %rdi
+#else
+ lea 32(%rsp), %rdi
+#endif
+ // call tsan interceptor
+ call ASM_SYMBOL(__tsan_setjmp)
+ // unalign stack frame
+ add $8, %rsp
+ CFI_ADJUST_CFA_OFFSET(-8)
+ // restore savesigs parameter
+ pop %rsi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rsi)
+ // restore env parameter
+ pop %rdi
+ CFI_ADJUST_CFA_OFFSET(-8)
+ CFI_RESTORE(%rdi)
+ // tail jump to libc sigsetjmp
+ movl $0, %eax
+ movq _ZN14__interception16real___sigsetjmpE@GOTPCREL(%rip), %rdx
+ jmp *(%rdx)
+ CFI_ENDPROC
+ASM_SIZE(ASM_SYMBOL_INTERCEPTOR(__sigsetjmp))
+#endif // !defined(__APPLE__) && !defined(__NetBSD__)
+
+NO_EXEC_STACK_DIRECTIVE
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_mutex.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_mutex.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_mutex.cc (revision 351984)
@@ -0,0 +1,539 @@
+//===-- tsan_rtl_mutex.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_stackdepot.h>
+
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+#include "tsan_sync.h"
+#include "tsan_report.h"
+#include "tsan_symbolize.h"
+#include "tsan_platform.h"
+
+namespace __tsan {
+
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
+
+struct Callback : DDCallback {
+ ThreadState *thr;
+ uptr pc;
+
+ Callback(ThreadState *thr, uptr pc)
+ : thr(thr)
+ , pc(pc) {
+ DDCallback::pt = thr->proc()->dd_pt;
+ DDCallback::lt = thr->dd_lt;
+ }
+
+ u32 Unwind() override { return CurrentStackId(thr, pc); }
+ int UniqueTid() override { return thr->unique_id; }
+};
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ s->dd.ctx = s->GetId();
+}
+
+static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
+ uptr addr, u64 mid) {
+ // In Go, these misuses are either impossible, or detected by std lib,
+ // or false positives (e.g. unlock in a different thread).
+ if (SANITIZER_GO)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(typ);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+}
+
+void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ StatInc(thr, StatMutexCreate);
+ if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ s->SetFlags(flagz & MutexCreationFlagMask);
+ if (!SANITIZER_GO && s->creation_stack_id == 0)
+ s->creation_stack_id = CurrentStackId(thr, pc);
+ s->mtx.Unlock();
+}
+
+void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexDestroy);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
+ if (s == 0)
+ return;
+ if ((flagz & MutexFlagLinkerInit)
+ || s->IsFlagSet(MutexFlagLinkerInit)
+ || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
+ // Destroy is no-op for linker-initialized mutexes.
+ s->mtx.Unlock();
+ return;
+ }
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexDestroy(&cb, &s->dd);
+ ctx->dd->MutexInit(&cb, &s->dd);
+ }
+ bool unlock_locked = false;
+ if (flags()->report_destroy_locked
+ && s->owner_tid != SyncVar::kInvalidTid
+ && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ unlock_locked = true;
+ }
+ u64 mid = s->GetId();
+ u64 last_lock = s->last_lock;
+ if (!unlock_locked)
+ s->Reset(thr->proc()); // must not reset it before the report is printed
+ s->mtx.Unlock();
+ if (unlock_locked) {
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeMutexDestroyLocked);
+ rep.AddMutex(mid);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ FastState last(last_lock);
+ RestoreStack(last.tid(), last.epoch(), &trace, 0);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ OutputReport(thr, rep);
+
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
+ if (s != 0) {
+ s->Reset(thr->proc());
+ s->mtx.Unlock();
+ }
+ }
+ thr->mset.Remove(mid);
+ // Imitate a memory write to catch unlock-destroy races.
+ // Do this outside of sync mutex, because it can report a race which locks
+ // sync mutexes.
+ if (IsAppMem(addr)) {
+ CHECK(!thr->is_freeing);
+ thr->is_freeing = true;
+ MemoryWrite(thr, pc, addr, kSizeLog1);
+ thr->is_freeing = false;
+ }
+ // s will be destroyed and freed in MetaMap::FreeBlock.
+}
+
+void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ if (s->owner_tid != thr->tid) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ s->mtx.ReadUnlock();
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ } else {
+ s->mtx.ReadUnlock();
+ }
+ }
+}
+
+void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
+ DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
+ thr->tid, addr, flagz, rec);
+ if (flagz & MutexFlagRecursiveLock)
+ CHECK_GT(rec, 0);
+ else
+ rec = 1;
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ s->UpdateFlags(flagz);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
+ bool report_double_lock = false;
+ if (s->owner_tid == SyncVar::kInvalidTid) {
+ CHECK_EQ(s->recursion, 0);
+ s->owner_tid = thr->tid;
+ s->last_lock = thr->fast_state.raw();
+ } else if (s->owner_tid == thr->tid) {
+ CHECK_GT(s->recursion, 0);
+ } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_double_lock = true;
+ }
+ const bool first = s->recursion == 0;
+ s->recursion += rec;
+ if (first) {
+ StatInc(thr, StatMutexLock);
+ AcquireImpl(thr, pc, &s->clock);
+ AcquireImpl(thr, pc, &s->read_clock);
+ } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
+ StatInc(thr, StatMutexRecLock);
+ }
+ thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
+ bool pre_lock = false;
+ if (first && common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
+ }
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ // Can't touch s after this point.
+ s = 0;
+ if (report_double_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
+ if (first && pre_lock && common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+ int rec = 0;
+ bool report_bad_unlock = false;
+ if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ } else {
+ rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
+ s->recursion -= rec;
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexUnlock);
+ s->owner_tid = SyncVar::kInvalidTid;
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ } else {
+ StatInc(thr, StatMutexRecUnlock);
+ }
+ }
+ thr->mset.Del(s->GetId(), true);
+ if (common_flags()->detect_deadlocks && s->recursion == 0 &&
+ !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
+ }
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks && !report_bad_unlock) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+ return rec;
+}
+
+void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ s->mtx.ReadUnlock();
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
+ DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
+ StatInc(thr, StatMutexReadLock);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
+ s->UpdateFlags(flagz);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
+ bool report_bad_lock = false;
+ if (s->owner_tid != SyncVar::kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_lock = true;
+ }
+ }
+ AcquireImpl(thr, pc, &s->clock);
+ s->last_lock = thr->fast_state.raw();
+ thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
+ bool pre_lock = false;
+ if (common_flags()->detect_deadlocks) {
+ pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
+ !(flagz & MutexFlagTryLock);
+ Callback cb(thr, pc);
+ if (pre_lock)
+ ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
+ ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
+ }
+ u64 mid = s->GetId();
+ s->mtx.ReadUnlock();
+ // Can't touch s after this point.
+ s = 0;
+ if (report_bad_lock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
+ if (pre_lock && common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
+ StatInc(thr, StatMutexReadUnlock);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ bool report_bad_unlock = false;
+ if (s->owner_tid != SyncVar::kInvalidTid) {
+ if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ }
+ ReleaseImpl(thr, pc, &s->read_clock);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
+ }
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ // Can't touch s after this point.
+ thr->mset.Del(mid, false);
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
+ if (IsAppMem(addr))
+ MemoryReadAtomic(thr, pc, addr, kSizeLog1);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ bool write = true;
+ bool report_bad_unlock = false;
+ if (s->owner_tid == SyncVar::kInvalidTid) {
+ // Seems to be read unlock.
+ write = false;
+ StatInc(thr, StatMutexReadUnlock);
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
+ ReleaseImpl(thr, pc, &s->read_clock);
+ } else if (s->owner_tid == thr->tid) {
+ // Seems to be write unlock.
+ thr->fast_state.IncrementEpoch();
+ TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
+ CHECK_GT(s->recursion, 0);
+ s->recursion--;
+ if (s->recursion == 0) {
+ StatInc(thr, StatMutexUnlock);
+ s->owner_tid = SyncVar::kInvalidTid;
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ } else {
+ StatInc(thr, StatMutexRecUnlock);
+ }
+ } else if (!s->IsFlagSet(MutexFlagBroken)) {
+ s->SetFlags(MutexFlagBroken);
+ report_bad_unlock = true;
+ }
+ thr->mset.Del(s->GetId(), write);
+ if (common_flags()->detect_deadlocks && s->recursion == 0) {
+ Callback cb(thr, pc);
+ ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
+ }
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ // Can't touch s after this point.
+ if (report_bad_unlock)
+ ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
+ if (common_flags()->detect_deadlocks) {
+ Callback cb(thr, pc);
+ ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
+ }
+}
+
+void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ s->owner_tid = SyncVar::kInvalidTid;
+ s->recursion = 0;
+ s->mtx.Unlock();
+}
+
+void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ u64 mid = s->GetId();
+ s->mtx.Unlock();
+ ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
+}
+
+void Acquire(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
+ if (!s)
+ return;
+ AcquireImpl(thr, pc, &s->clock);
+ s->mtx.ReadUnlock();
+}
+
+static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ u64 epoch = tctx->epoch1;
+ if (tctx->status == ThreadStatusRunning)
+ epoch = tctx->thr->fast_state.epoch();
+ thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+}
+
+void AcquireGlobal(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: AcquireGlobal\n", thr->tid);
+ if (thr->ignore_sync)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
+ UpdateClockCallback, thr);
+}
+
+void Release(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: Release %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, pc, &s->clock);
+ s->mtx.Unlock();
+}
+
+void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
+ DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
+ if (thr->ignore_sync)
+ return;
+ SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseStoreImpl(thr, pc, &s->clock);
+ s->mtx.Unlock();
+}
+
+#if !SANITIZER_GO
+static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
+ ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ u64 epoch = tctx->epoch1;
+ if (tctx->status == ThreadStatusRunning)
+ epoch = tctx->thr->fast_state.epoch();
+ thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
+}
+
+void AfterSleep(ThreadState *thr, uptr pc) {
+ DPrintf("#%d: AfterSleep %zx\n", thr->tid);
+ if (thr->ignore_sync)
+ return;
+ thr->last_sleep_stack_id = CurrentStackId(thr, pc);
+ ThreadRegistryLock l(ctx->thread_registry);
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
+ UpdateSleepClockCallback, thr);
+}
+#endif
+
+void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->clock.acquire(&thr->proc()->clock_cache, c);
+ StatInc(thr, StatSyncAcquire);
+}
+
+void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.release(&thr->proc()->clock_cache, c);
+ StatInc(thr, StatSyncRelease);
+}
+
+void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
+ StatInc(thr, StatSyncRelease);
+}
+
+void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
+ if (thr->ignore_sync)
+ return;
+ thr->clock.set(thr->fast_state.epoch());
+ thr->fast_synch_epoch = thr->fast_state.epoch();
+ thr->clock.acq_rel(&thr->proc()->clock_cache, c);
+ StatInc(thr, StatSyncAcquire);
+ StatInc(thr, StatSyncRelease);
+}
+
+void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
+ if (r == 0)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ ScopedReport rep(ReportTypeDeadlock);
+ for (int i = 0; i < r->n; i++) {
+ rep.AddMutex(r->loop[i].mtx_ctx0);
+ rep.AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep.AddThread((int)r->loop[i].thr_ctx);
+ }
+ uptr dummy_pc = 0x42;
+ for (int i = 0; i < r->n; i++) {
+ for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+ u32 stk = r->loop[i].stk[j];
+ if (stk && stk != 0xffffffff) {
+ rep.AddStack(StackDepotGet(stk), true);
+ } else {
+ // Sometimes we fail to extract the stack trace (FIXME: investigate),
+ // but we should still produce some stack trace in the report.
+ rep.AddStack(StackTrace(&dummy_pc, 1), true);
+ }
+ }
+ }
+ OutputReport(thr, rep);
+}
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_proc.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_proc.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_proc.cc (revision 351984)
@@ -0,0 +1,60 @@
+//===-- tsan_rtl_proc.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+
+namespace __tsan {
+
+Processor *ProcCreate() {
+ void *mem = InternalAlloc(sizeof(Processor));
+ internal_memset(mem, 0, sizeof(Processor));
+ Processor *proc = new(mem) Processor;
+ proc->thr = nullptr;
+#if !SANITIZER_GO
+ AllocatorProcStart(proc);
+#endif
+ if (common_flags()->detect_deadlocks)
+ proc->dd_pt = ctx->dd->CreatePhysicalThread();
+ return proc;
+}
+
+void ProcDestroy(Processor *proc) {
+ CHECK_EQ(proc->thr, nullptr);
+#if !SANITIZER_GO
+ AllocatorProcFinish(proc);
+#endif
+ ctx->clock_alloc.FlushCache(&proc->clock_cache);
+ ctx->metamap.OnProcIdle(proc);
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyPhysicalThread(proc->dd_pt);
+ proc->~Processor();
+ InternalFree(proc);
+}
+
+void ProcWire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, nullptr);
+ CHECK_EQ(proc->thr, nullptr);
+ thr->proc1 = proc;
+ proc->thr = thr;
+}
+
+void ProcUnwire(Processor *proc, ThreadState *thr) {
+ CHECK_EQ(thr->proc1, proc);
+ CHECK_EQ(proc->thr, thr);
+ thr->proc1 = nullptr;
+ proc->thr = nullptr;
+}
+
+} // namespace __tsan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_proc.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_report.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_report.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_report.cc (revision 351984)
@@ -0,0 +1,756 @@
+//===-- tsan_rtl_report.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_platform.h"
+#include "tsan_rtl.h"
+#include "tsan_suppressions.h"
+#include "tsan_symbolize.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+#include "tsan_mman.h"
+#include "tsan_flags.h"
+#include "tsan_fd.h"
+
+namespace __tsan {
+
+using namespace __sanitizer; // NOLINT
+
+static ReportStack *SymbolizeStack(StackTrace trace);
+
+void TsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ // There is high probability that interceptors will check-fail as well,
+ // on the other hand there is no sense in processing interceptors
+ // since we are going to die soon.
+ ScopedIgnoreInterceptors ignore;
+#if !SANITIZER_GO
+ cur_thread()->ignore_sync++;
+ cur_thread()->ignore_reads_and_writes++;
+#endif
+ Printf("FATAL: ThreadSanitizer CHECK failed: "
+ "%s:%d \"%s\" (0x%zx, 0x%zx)\n",
+ file, line, cond, (uptr)v1, (uptr)v2);
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+ Die();
+}
+
+// Can be overriden by an application/test to intercept reports.
+#ifdef TSAN_EXTERNAL_HOOKS
+bool OnReport(const ReportDesc *rep, bool suppressed);
+#else
+SANITIZER_WEAK_CXX_DEFAULT_IMPL
+bool OnReport(const ReportDesc *rep, bool suppressed) {
+ (void)rep;
+ return suppressed;
+}
+#endif
+
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_on_report(const ReportDesc *rep) {
+ (void)rep;
+}
+
+static void StackStripMain(SymbolizedStack *frames) {
+ SymbolizedStack *last_frame = nullptr;
+ SymbolizedStack *last_frame2 = nullptr;
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ last_frame2 = last_frame;
+ last_frame = cur;
+ }
+
+ if (last_frame2 == 0)
+ return;
+#if !SANITIZER_GO
+ const char *last = last_frame->info.function;
+ const char *last2 = last_frame2->info.function;
+ // Strip frame above 'main'
+ if (last2 && 0 == internal_strcmp(last2, "main")) {
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+ // Strip our internal thread start routine.
+ } else if (last && 0 == internal_strcmp(last, "__tsan_thread_start_func")) {
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+ // Strip global ctors init.
+ } else if (last && 0 == internal_strcmp(last, "__do_global_ctors_aux")) {
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+ // If both are 0, then we probably just failed to symbolize.
+ } else if (last || last2) {
+ // Ensure that we recovered stack completely. Trimmed stack
+ // can actually happen if we do not instrument some code,
+ // so it's only a debug print. However we must try hard to not miss it
+ // due to our fault.
+ DPrintf("Bottom stack frame is missed\n");
+ }
+#else
+ // The last frame always point into runtime (gosched0, goexit0, runtime.main).
+ last_frame->ClearAll();
+ last_frame2->next = nullptr;
+#endif
+}
+
+ReportStack *SymbolizeStackId(u32 stack_id) {
+ if (stack_id == 0)
+ return 0;
+ StackTrace stack = StackDepotGet(stack_id);
+ if (stack.trace == nullptr)
+ return nullptr;
+ return SymbolizeStack(stack);
+}
+
+static ReportStack *SymbolizeStack(StackTrace trace) {
+ if (trace.size == 0)
+ return 0;
+ SymbolizedStack *top = nullptr;
+ for (uptr si = 0; si < trace.size; si++) {
+ const uptr pc = trace.trace[si];
+ uptr pc1 = pc;
+ // We obtain the return address, but we're interested in the previous
+ // instruction.
+ if ((pc & kExternalPCBit) == 0)
+ pc1 = StackTrace::GetPreviousInstructionPc(pc);
+ SymbolizedStack *ent = SymbolizeCode(pc1);
+ CHECK_NE(ent, 0);
+ SymbolizedStack *last = ent;
+ while (last->next) {
+ last->info.address = pc; // restore original pc for report
+ last = last->next;
+ }
+ last->info.address = pc; // restore original pc for report
+ last->next = top;
+ top = ent;
+ }
+ StackStripMain(top);
+
+ ReportStack *stack = ReportStack::New();
+ stack->frames = top;
+ return stack;
+}
+
+ScopedReportBase::ScopedReportBase(ReportType typ, uptr tag) {
+ ctx->thread_registry->CheckLocked();
+ void *mem = internal_alloc(MBlockReport, sizeof(ReportDesc));
+ rep_ = new(mem) ReportDesc;
+ rep_->typ = typ;
+ rep_->tag = tag;
+ ctx->report_mtx.Lock();
+}
+
+ScopedReportBase::~ScopedReportBase() {
+ ctx->report_mtx.Unlock();
+ DestroyAndFree(rep_);
+}
+
+void ScopedReportBase::AddStack(StackTrace stack, bool suppressable) {
+ ReportStack **rs = rep_->stacks.PushBack();
+ *rs = SymbolizeStack(stack);
+ (*rs)->suppressable = suppressable;
+}
+
+void ScopedReportBase::AddMemoryAccess(uptr addr, uptr external_tag, Shadow s,
+ StackTrace stack, const MutexSet *mset) {
+ void *mem = internal_alloc(MBlockReportMop, sizeof(ReportMop));
+ ReportMop *mop = new(mem) ReportMop;
+ rep_->mops.PushBack(mop);
+ mop->tid = s.tid();
+ mop->addr = addr + s.addr0();
+ mop->size = s.size();
+ mop->write = s.IsWrite();
+ mop->atomic = s.IsAtomic();
+ mop->stack = SymbolizeStack(stack);
+ mop->external_tag = external_tag;
+ if (mop->stack)
+ mop->stack->suppressable = true;
+ for (uptr i = 0; i < mset->Size(); i++) {
+ MutexSet::Desc d = mset->Get(i);
+ u64 mid = this->AddMutex(d.id);
+ ReportMopMutex mtx = {mid, d.write};
+ mop->mset.PushBack(mtx);
+ }
+}
+
+void ScopedReportBase::AddUniqueTid(int unique_tid) {
+ rep_->unique_tids.PushBack(unique_tid);
+}
+
+void ScopedReportBase::AddThread(const ThreadContext *tctx, bool suppressable) {
+ for (uptr i = 0; i < rep_->threads.Size(); i++) {
+ if ((u32)rep_->threads[i]->id == tctx->tid)
+ return;
+ }
+ void *mem = internal_alloc(MBlockReportThread, sizeof(ReportThread));
+ ReportThread *rt = new(mem) ReportThread;
+ rep_->threads.PushBack(rt);
+ rt->id = tctx->tid;
+ rt->os_id = tctx->os_id;
+ rt->running = (tctx->status == ThreadStatusRunning);
+ rt->name = internal_strdup(tctx->name);
+ rt->parent_tid = tctx->parent_tid;
+ rt->thread_type = tctx->thread_type;
+ rt->stack = 0;
+ rt->stack = SymbolizeStackId(tctx->creation_stack_id);
+ if (rt->stack)
+ rt->stack->suppressable = suppressable;
+}
+
+#if !SANITIZER_GO
+static bool FindThreadByUidLockedCallback(ThreadContextBase *tctx, void *arg) {
+ int unique_id = *(int *)arg;
+ return tctx->unique_id == (u32)unique_id;
+}
+
+static ThreadContext *FindThreadByUidLocked(int unique_id) {
+ ctx->thread_registry->CheckLocked();
+ return static_cast<ThreadContext *>(
+ ctx->thread_registry->FindThreadContextLocked(
+ FindThreadByUidLockedCallback, &unique_id));
+}
+
+static ThreadContext *FindThreadByTidLocked(int tid) {
+ ctx->thread_registry->CheckLocked();
+ return static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(tid));
+}
+
+static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
+ uptr addr = (uptr)arg;
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->status != ThreadStatusRunning)
+ return false;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ return ((addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size) ||
+ (addr >= thr->tls_addr && addr < thr->tls_addr + thr->tls_size));
+}
+
+ThreadContext *IsThreadStackOrTls(uptr addr, bool *is_stack) {
+ ctx->thread_registry->CheckLocked();
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->FindThreadContextLocked(IsInStackOrTls,
+ (void*)addr));
+ if (!tctx)
+ return 0;
+ ThreadState *thr = tctx->thr;
+ CHECK(thr);
+ *is_stack = (addr >= thr->stk_addr && addr < thr->stk_addr + thr->stk_size);
+ return tctx;
+}
+#endif
+
+void ScopedReportBase::AddThread(int unique_tid, bool suppressable) {
+#if !SANITIZER_GO
+ if (const ThreadContext *tctx = FindThreadByUidLocked(unique_tid))
+ AddThread(tctx, suppressable);
+#endif
+}
+
+void ScopedReportBase::AddMutex(const SyncVar *s) {
+ for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
+ if (rep_->mutexes[i]->id == s->uid)
+ return;
+ }
+ void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
+ ReportMutex *rm = new(mem) ReportMutex;
+ rep_->mutexes.PushBack(rm);
+ rm->id = s->uid;
+ rm->addr = s->addr;
+ rm->destroyed = false;
+ rm->stack = SymbolizeStackId(s->creation_stack_id);
+}
+
+u64 ScopedReportBase::AddMutex(u64 id) {
+ u64 uid = 0;
+ u64 mid = id;
+ uptr addr = SyncVar::SplitId(id, &uid);
+ SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
+ // Check that the mutex is still alive.
+ // Another mutex can be created at the same address,
+ // so check uid as well.
+ if (s && s->CheckId(uid)) {
+ mid = s->uid;
+ AddMutex(s);
+ } else {
+ AddDeadMutex(id);
+ }
+ if (s)
+ s->mtx.Unlock();
+ return mid;
+}
+
+void ScopedReportBase::AddDeadMutex(u64 id) {
+ for (uptr i = 0; i < rep_->mutexes.Size(); i++) {
+ if (rep_->mutexes[i]->id == id)
+ return;
+ }
+ void *mem = internal_alloc(MBlockReportMutex, sizeof(ReportMutex));
+ ReportMutex *rm = new(mem) ReportMutex;
+ rep_->mutexes.PushBack(rm);
+ rm->id = id;
+ rm->addr = 0;
+ rm->destroyed = true;
+ rm->stack = 0;
+}
+
+void ScopedReportBase::AddLocation(uptr addr, uptr size) {
+ if (addr == 0)
+ return;
+#if !SANITIZER_GO
+ int fd = -1;
+ int creat_tid = kInvalidTid;
+ u32 creat_stack = 0;
+ if (FdLocation(addr, &fd, &creat_tid, &creat_stack)) {
+ ReportLocation *loc = ReportLocation::New(ReportLocationFD);
+ loc->fd = fd;
+ loc->tid = creat_tid;
+ loc->stack = SymbolizeStackId(creat_stack);
+ rep_->locs.PushBack(loc);
+ ThreadContext *tctx = FindThreadByUidLocked(creat_tid);
+ if (tctx)
+ AddThread(tctx);
+ return;
+ }
+ MBlock *b = 0;
+ Allocator *a = allocator();
+ if (a->PointerIsMine((void*)addr)) {
+ void *block_begin = a->GetBlockBegin((void*)addr);
+ if (block_begin)
+ b = ctx->metamap.GetBlock((uptr)block_begin);
+ }
+ if (b != 0) {
+ ThreadContext *tctx = FindThreadByTidLocked(b->tid);
+ ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
+ loc->heap_chunk_start = (uptr)allocator()->GetBlockBegin((void *)addr);
+ loc->heap_chunk_size = b->siz;
+ loc->external_tag = b->tag;
+ loc->tid = tctx ? tctx->tid : b->tid;
+ loc->stack = SymbolizeStackId(b->stk);
+ rep_->locs.PushBack(loc);
+ if (tctx)
+ AddThread(tctx);
+ return;
+ }
+ bool is_stack = false;
+ if (ThreadContext *tctx = IsThreadStackOrTls(addr, &is_stack)) {
+ ReportLocation *loc =
+ ReportLocation::New(is_stack ? ReportLocationStack : ReportLocationTLS);
+ loc->tid = tctx->tid;
+ rep_->locs.PushBack(loc);
+ AddThread(tctx);
+ }
+#endif
+ if (ReportLocation *loc = SymbolizeData(addr)) {
+ loc->suppressable = true;
+ rep_->locs.PushBack(loc);
+ return;
+ }
+}
+
+#if !SANITIZER_GO
+void ScopedReportBase::AddSleep(u32 stack_id) {
+ rep_->sleep = SymbolizeStackId(stack_id);
+}
+#endif
+
+void ScopedReportBase::SetCount(int count) { rep_->count = count; }
+
+const ReportDesc *ScopedReportBase::GetReport() const { return rep_; }
+
+ScopedReport::ScopedReport(ReportType typ, uptr tag)
+ : ScopedReportBase(typ, tag) {}
+
+ScopedReport::~ScopedReport() {}
+
+void RestoreStack(int tid, const u64 epoch, VarSizeStackTrace *stk,
+ MutexSet *mset, uptr *tag) {
+ // This function restores stack trace and mutex set for the thread/epoch.
+ // It does so by getting stack trace and mutex set at the beginning of
+ // trace part, and then replaying the trace till the given epoch.
+ Trace* trace = ThreadTrace(tid);
+ ReadLock l(&trace->mtx);
+ const int partidx = (epoch / kTracePartSize) % TraceParts();
+ TraceHeader* hdr = &trace->headers[partidx];
+ if (epoch < hdr->epoch0 || epoch >= hdr->epoch0 + kTracePartSize)
+ return;
+ CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0);
+ const u64 epoch0 = RoundDown(epoch, TraceSize());
+ const u64 eend = epoch % TraceSize();
+ const u64 ebegin = RoundDown(eend, kTracePartSize);
+ DPrintf("#%d: RestoreStack epoch=%zu ebegin=%zu eend=%zu partidx=%d\n",
+ tid, (uptr)epoch, (uptr)ebegin, (uptr)eend, partidx);
+ Vector<uptr> stack;
+ stack.Resize(hdr->stack0.size + 64);
+ for (uptr i = 0; i < hdr->stack0.size; i++) {
+ stack[i] = hdr->stack0.trace[i];
+ DPrintf2(" #%02zu: pc=%zx\n", i, stack[i]);
+ }
+ if (mset)
+ *mset = hdr->mset0;
+ uptr pos = hdr->stack0.size;
+ Event *events = (Event*)GetThreadTrace(tid);
+ for (uptr i = ebegin; i <= eend; i++) {
+ Event ev = events[i];
+ EventType typ = (EventType)(ev >> kEventPCBits);
+ uptr pc = (uptr)(ev & ((1ull << kEventPCBits) - 1));
+ DPrintf2(" %zu typ=%d pc=%zx\n", i, typ, pc);
+ if (typ == EventTypeMop) {
+ stack[pos] = pc;
+ } else if (typ == EventTypeFuncEnter) {
+ if (stack.Size() < pos + 2)
+ stack.Resize(pos + 2);
+ stack[pos++] = pc;
+ } else if (typ == EventTypeFuncExit) {
+ if (pos > 0)
+ pos--;
+ }
+ if (mset) {
+ if (typ == EventTypeLock) {
+ mset->Add(pc, true, epoch0 + i);
+ } else if (typ == EventTypeUnlock) {
+ mset->Del(pc, true);
+ } else if (typ == EventTypeRLock) {
+ mset->Add(pc, false, epoch0 + i);
+ } else if (typ == EventTypeRUnlock) {
+ mset->Del(pc, false);
+ }
+ }
+ for (uptr j = 0; j <= pos; j++)
+ DPrintf2(" #%zu: %zx\n", j, stack[j]);
+ }
+ if (pos == 0 && stack[0] == 0)
+ return;
+ pos++;
+ stk->Init(&stack[0], pos);
+ ExtractTagFromStack(stk, tag);
+}
+
+static bool HandleRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
+ bool equal_stack = false;
+ RacyStacks hash;
+ bool equal_address = false;
+ RacyAddress ra0 = {addr_min, addr_max};
+ {
+ ReadLock lock(&ctx->racy_mtx);
+ if (flags()->suppress_equal_stacks) {
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
+ for (uptr i = 0; i < ctx->racy_stacks.Size(); i++) {
+ if (hash == ctx->racy_stacks[i]) {
+ VPrintf(2,
+ "ThreadSanitizer: suppressing report as doubled (stack)\n");
+ equal_stack = true;
+ break;
+ }
+ }
+ }
+ if (flags()->suppress_equal_addresses) {
+ for (uptr i = 0; i < ctx->racy_addresses.Size(); i++) {
+ RacyAddress ra2 = ctx->racy_addresses[i];
+ uptr maxbeg = max(ra0.addr_min, ra2.addr_min);
+ uptr minend = min(ra0.addr_max, ra2.addr_max);
+ if (maxbeg < minend) {
+ VPrintf(2, "ThreadSanitizer: suppressing report as doubled (addr)\n");
+ equal_address = true;
+ break;
+ }
+ }
+ }
+ }
+ if (!equal_stack && !equal_address)
+ return false;
+ if (!equal_stack) {
+ Lock lock(&ctx->racy_mtx);
+ ctx->racy_stacks.PushBack(hash);
+ }
+ if (!equal_address) {
+ Lock lock(&ctx->racy_mtx);
+ ctx->racy_addresses.PushBack(ra0);
+ }
+ return true;
+}
+
+static void AddRacyStacks(ThreadState *thr, VarSizeStackTrace traces[2],
+ uptr addr_min, uptr addr_max) {
+ Lock lock(&ctx->racy_mtx);
+ if (flags()->suppress_equal_stacks) {
+ RacyStacks hash;
+ hash.hash[0] = md5_hash(traces[0].trace, traces[0].size * sizeof(uptr));
+ hash.hash[1] = md5_hash(traces[1].trace, traces[1].size * sizeof(uptr));
+ ctx->racy_stacks.PushBack(hash);
+ }
+ if (flags()->suppress_equal_addresses) {
+ RacyAddress ra0 = {addr_min, addr_max};
+ ctx->racy_addresses.PushBack(ra0);
+ }
+}
+
+bool OutputReport(ThreadState *thr, const ScopedReport &srep) {
+ if (!flags()->report_bugs || thr->suppress_reports)
+ return false;
+ atomic_store_relaxed(&ctx->last_symbolize_time_ns, NanoTime());
+ const ReportDesc *rep = srep.GetReport();
+ CHECK_EQ(thr->current_report, nullptr);
+ thr->current_report = rep;
+ Suppression *supp = 0;
+ uptr pc_or_addr = 0;
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->mops.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->mops[i]->stack, &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->stacks.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->stacks[i], &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->threads.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->threads[i]->stack, &supp);
+ for (uptr i = 0; pc_or_addr == 0 && i < rep->locs.Size(); i++)
+ pc_or_addr = IsSuppressed(rep->typ, rep->locs[i], &supp);
+ if (pc_or_addr != 0) {
+ Lock lock(&ctx->fired_suppressions_mtx);
+ FiredSuppression s = {srep.GetReport()->typ, pc_or_addr, supp};
+ ctx->fired_suppressions.push_back(s);
+ }
+ {
+ bool old_is_freeing = thr->is_freeing;
+ thr->is_freeing = false;
+ bool suppressed = OnReport(rep, pc_or_addr != 0);
+ thr->is_freeing = old_is_freeing;
+ if (suppressed) {
+ thr->current_report = nullptr;
+ return false;
+ }
+ }
+ PrintReport(rep);
+ __tsan_on_report(rep);
+ ctx->nreported++;
+ if (flags()->halt_on_error)
+ Die();
+ thr->current_report = nullptr;
+ return true;
+}
+
+bool IsFiredSuppression(Context *ctx, ReportType type, StackTrace trace) {
+ ReadLock lock(&ctx->fired_suppressions_mtx);
+ for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
+ if (ctx->fired_suppressions[k].type != type)
+ continue;
+ for (uptr j = 0; j < trace.size; j++) {
+ FiredSuppression *s = &ctx->fired_suppressions[k];
+ if (trace.trace[j] == s->pc_or_addr) {
+ if (s->supp)
+ atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
+ return true;
+ }
+ }
+ }
+ return false;
+}
+
+static bool IsFiredSuppression(Context *ctx, ReportType type, uptr addr) {
+ ReadLock lock(&ctx->fired_suppressions_mtx);
+ for (uptr k = 0; k < ctx->fired_suppressions.size(); k++) {
+ if (ctx->fired_suppressions[k].type != type)
+ continue;
+ FiredSuppression *s = &ctx->fired_suppressions[k];
+ if (addr == s->pc_or_addr) {
+ if (s->supp)
+ atomic_fetch_add(&s->supp->hit_count, 1, memory_order_relaxed);
+ return true;
+ }
+ }
+ return false;
+}
+
+static bool RaceBetweenAtomicAndFree(ThreadState *thr) {
+ Shadow s0(thr->racy_state[0]);
+ Shadow s1(thr->racy_state[1]);
+ CHECK(!(s0.IsAtomic() && s1.IsAtomic()));
+ if (!s0.IsAtomic() && !s1.IsAtomic())
+ return true;
+ if (s0.IsAtomic() && s1.IsFreed())
+ return true;
+ if (s1.IsAtomic() && thr->is_freeing)
+ return true;
+ return false;
+}
+
+void ReportRace(ThreadState *thr) {
+ CheckNoLocks(thr);
+
+ // Symbolizer makes lots of intercepted calls. If we try to process them,
+ // at best it will cause deadlocks on internal mutexes.
+ ScopedIgnoreInterceptors ignore;
+
+ if (!flags()->report_bugs)
+ return;
+ if (!flags()->report_atomic_races && !RaceBetweenAtomicAndFree(thr))
+ return;
+
+ bool freed = false;
+ {
+ Shadow s(thr->racy_state[1]);
+ freed = s.GetFreedAndReset();
+ thr->racy_state[1] = s.raw();
+ }
+
+ uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr);
+ uptr addr_min = 0;
+ uptr addr_max = 0;
+ {
+ uptr a0 = addr + Shadow(thr->racy_state[0]).addr0();
+ uptr a1 = addr + Shadow(thr->racy_state[1]).addr0();
+ uptr e0 = a0 + Shadow(thr->racy_state[0]).size();
+ uptr e1 = a1 + Shadow(thr->racy_state[1]).size();
+ addr_min = min(a0, a1);
+ addr_max = max(e0, e1);
+ if (IsExpectedReport(addr_min, addr_max - addr_min))
+ return;
+ }
+
+ ReportType typ = ReportTypeRace;
+ if (thr->is_vptr_access && freed)
+ typ = ReportTypeVptrUseAfterFree;
+ else if (thr->is_vptr_access)
+ typ = ReportTypeVptrRace;
+ else if (freed)
+ typ = ReportTypeUseAfterFree;
+
+ if (IsFiredSuppression(ctx, typ, addr))
+ return;
+
+ const uptr kMop = 2;
+ VarSizeStackTrace traces[kMop];
+ uptr tags[kMop] = {kExternalTagNone};
+ uptr toppc = TraceTopPC(thr);
+ if (toppc >> kEventPCBits) {
+ // This is a work-around for a known issue.
+ // The scenario where this happens is rather elaborate and requires
+ // an instrumented __sanitizer_report_error_summary callback and
+ // a __tsan_symbolize_external callback and a race during a range memory
+ // access larger than 8 bytes. MemoryAccessRange adds the current PC to
+ // the trace and starts processing memory accesses. A first memory access
+ // triggers a race, we report it and call the instrumented
+ // __sanitizer_report_error_summary, which adds more stuff to the trace
+ // since it is intrumented. Then a second memory access in MemoryAccessRange
+ // also triggers a race and we get here and call TraceTopPC to get the
+ // current PC, however now it contains some unrelated events from the
+ // callback. Most likely, TraceTopPC will now return a EventTypeFuncExit
+ // event. Later we subtract -1 from it (in GetPreviousInstructionPc)
+ // and the resulting PC has kExternalPCBit set, so we pass it to
+ // __tsan_symbolize_external_ex. __tsan_symbolize_external_ex is within its
+ // rights to crash since the PC is completely bogus.
+ // test/tsan/double_race.cc contains a test case for this.
+ toppc = 0;
+ }
+ ObtainCurrentStack(thr, toppc, &traces[0], &tags[0]);
+ if (IsFiredSuppression(ctx, typ, traces[0]))
+ return;
+
+ // MutexSet is too large to live on stack.
+ Vector<u64> mset_buffer;
+ mset_buffer.Resize(sizeof(MutexSet) / sizeof(u64) + 1);
+ MutexSet *mset2 = new(&mset_buffer[0]) MutexSet();
+
+ Shadow s2(thr->racy_state[1]);
+ RestoreStack(s2.tid(), s2.epoch(), &traces[1], mset2, &tags[1]);
+ if (IsFiredSuppression(ctx, typ, traces[1]))
+ return;
+
+ if (HandleRacyStacks(thr, traces, addr_min, addr_max))
+ return;
+
+ // If any of the accesses has a tag, treat this as an "external" race.
+ uptr tag = kExternalTagNone;
+ for (uptr i = 0; i < kMop; i++) {
+ if (tags[i] != kExternalTagNone) {
+ typ = ReportTypeExternalRace;
+ tag = tags[i];
+ break;
+ }
+ }
+
+ ThreadRegistryLock l0(ctx->thread_registry);
+ ScopedReport rep(typ, tag);
+ for (uptr i = 0; i < kMop; i++) {
+ Shadow s(thr->racy_state[i]);
+ rep.AddMemoryAccess(addr, tags[i], s, traces[i],
+ i == 0 ? &thr->mset : mset2);
+ }
+
+ for (uptr i = 0; i < kMop; i++) {
+ FastState s(thr->racy_state[i]);
+ ThreadContext *tctx = static_cast<ThreadContext*>(
+ ctx->thread_registry->GetThreadLocked(s.tid()));
+ if (s.epoch() < tctx->epoch0 || s.epoch() > tctx->epoch1)
+ continue;
+ rep.AddThread(tctx);
+ }
+
+ rep.AddLocation(addr_min, addr_max - addr_min);
+
+#if !SANITIZER_GO
+ { // NOLINT
+ Shadow s(thr->racy_state[1]);
+ if (s.epoch() <= thr->last_sleep_clock.get(s.tid()))
+ rep.AddSleep(thr->last_sleep_stack_id);
+ }
+#endif
+
+ if (!OutputReport(thr, rep))
+ return;
+
+ AddRacyStacks(thr, traces, addr_min, addr_max);
+}
+
+void PrintCurrentStack(ThreadState *thr, uptr pc) {
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ PrintStack(SymbolizeStack(trace));
+}
+
+// Always inlining PrintCurrentStackSlow, because LocatePcInTrace assumes
+// __sanitizer_print_stack_trace exists in the actual unwinded stack, but
+// tail-call to PrintCurrentStackSlow breaks this assumption because
+// __sanitizer_print_stack_trace disappears after tail-call.
+// However, this solution is not reliable enough, please see dvyukov's comment
+// http://reviews.llvm.org/D19148#406208
+// Also see PR27280 comment 2 and 3 for breaking examples and analysis.
+ALWAYS_INLINE
+void PrintCurrentStackSlow(uptr pc) {
+#if !SANITIZER_GO
+ uptr bp = GET_CURRENT_FRAME();
+ BufferedStackTrace *ptrace =
+ new(internal_alloc(MBlockStackTrace, sizeof(BufferedStackTrace)))
+ BufferedStackTrace();
+ ptrace->Unwind(pc, bp, nullptr, false);
+
+ for (uptr i = 0; i < ptrace->size / 2; i++) {
+ uptr tmp = ptrace->trace_buffer[i];
+ ptrace->trace_buffer[i] = ptrace->trace_buffer[ptrace->size - i - 1];
+ ptrace->trace_buffer[ptrace->size - i - 1] = tmp;
+ }
+ PrintStack(SymbolizeStack(*ptrace));
+#endif
+}
+
+} // namespace __tsan
+
+using namespace __tsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ PrintCurrentStackSlow(StackTrace::GetCurrentPc());
+}
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_thread.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_thread.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_thread.cc (revision 351984)
@@ -0,0 +1,444 @@
+//===-- tsan_rtl_thread.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+#include "tsan_report.h"
+#include "tsan_sync.h"
+
+namespace __tsan {
+
+// ThreadContext implementation.
+
+ThreadContext::ThreadContext(int tid)
+ : ThreadContextBase(tid)
+ , thr()
+ , sync()
+ , epoch0()
+ , epoch1() {
+}
+
+#if !SANITIZER_GO
+ThreadContext::~ThreadContext() {
+}
+#endif
+
+void ThreadContext::OnDead() {
+ CHECK_EQ(sync.size(), 0);
+}
+
+void ThreadContext::OnJoined(void *arg) {
+ ThreadState *caller_thr = static_cast<ThreadState *>(arg);
+ AcquireImpl(caller_thr, 0, &sync);
+ sync.Reset(&caller_thr->proc()->clock_cache);
+}
+
+struct OnCreatedArgs {
+ ThreadState *thr;
+ uptr pc;
+};
+
+void ThreadContext::OnCreated(void *arg) {
+ thr = 0;
+ if (tid == 0)
+ return;
+ OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
+ if (!args->thr) // GCD workers don't have a parent thread.
+ return;
+ args->thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(args->thr, args->thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(args->thr, 0, &sync);
+ creation_stack_id = CurrentStackId(args->thr, args->pc);
+ if (reuse_count == 0)
+ StatInc(args->thr, StatThreadMaxTid);
+}
+
+void ThreadContext::OnReset() {
+ CHECK_EQ(sync.size(), 0);
+ uptr trace_p = GetThreadTrace(tid);
+ ReleaseMemoryPagesToOS(trace_p, trace_p + TraceSize() * sizeof(Event));
+ //!!! ReleaseMemoryToOS(GetThreadTraceHeader(tid), sizeof(Trace));
+}
+
+void ThreadContext::OnDetached(void *arg) {
+ ThreadState *thr1 = static_cast<ThreadState*>(arg);
+ sync.Reset(&thr1->proc()->clock_cache);
+}
+
+struct OnStartedArgs {
+ ThreadState *thr;
+ uptr stk_addr;
+ uptr stk_size;
+ uptr tls_addr;
+ uptr tls_size;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+ OnStartedArgs *args = static_cast<OnStartedArgs*>(arg);
+ thr = args->thr;
+ // RoundUp so that one trace part does not contain events
+ // from different threads.
+ epoch0 = RoundUp(epoch1 + 1, kTracePartSize);
+ epoch1 = (u64)-1;
+ new(thr) ThreadState(ctx, tid, unique_id, epoch0, reuse_count,
+ args->stk_addr, args->stk_size, args->tls_addr, args->tls_size);
+#if !SANITIZER_GO
+ thr->shadow_stack = &ThreadTrace(thr->tid)->shadow_stack[0];
+ thr->shadow_stack_pos = thr->shadow_stack;
+ thr->shadow_stack_end = thr->shadow_stack + kShadowStackSize;
+#else
+ // Setup dynamic shadow stack.
+ const int kInitStackSize = 8;
+ thr->shadow_stack = (uptr*)internal_alloc(MBlockShadowStack,
+ kInitStackSize * sizeof(uptr));
+ thr->shadow_stack_pos = thr->shadow_stack;
+ thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
+#endif
+ if (common_flags()->detect_deadlocks)
+ thr->dd_lt = ctx->dd->CreateLogicalThread(unique_id);
+ thr->fast_state.SetHistorySize(flags()->history_size);
+ // Commit switch to the new part of the trace.
+ // TraceAddEvent will reset stack0/mset0 in the new part for us.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+
+ thr->fast_synch_epoch = epoch0;
+ AcquireImpl(thr, 0, &sync);
+ StatInc(thr, StatSyncAcquire);
+ sync.Reset(&thr->proc()->clock_cache);
+ thr->is_inited = true;
+ DPrintf("#%d: ThreadStart epoch=%zu stk_addr=%zx stk_size=%zx "
+ "tls_addr=%zx tls_size=%zx\n",
+ tid, (uptr)epoch0, args->stk_addr, args->stk_size,
+ args->tls_addr, args->tls_size);
+}
+
+void ThreadContext::OnFinished() {
+#if SANITIZER_GO
+ internal_free(thr->shadow_stack);
+ thr->shadow_stack = nullptr;
+ thr->shadow_stack_pos = nullptr;
+ thr->shadow_stack_end = nullptr;
+#endif
+ if (!detached) {
+ thr->fast_state.IncrementEpoch();
+ // Can't increment epoch w/o writing to the trace as well.
+ TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
+ ReleaseImpl(thr, 0, &sync);
+ }
+ epoch1 = thr->fast_state.epoch();
+
+ if (common_flags()->detect_deadlocks)
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+ thr->clock.ResetCached(&thr->proc()->clock_cache);
+#if !SANITIZER_GO
+ thr->last_sleep_clock.ResetCached(&thr->proc()->clock_cache);
+#endif
+ thr->~ThreadState();
+#if TSAN_COLLECT_STATS
+ StatAggregate(ctx->stat, thr->stat);
+#endif
+ thr = 0;
+}
+
+#if !SANITIZER_GO
+struct ThreadLeak {
+ ThreadContext *tctx;
+ int count;
+};
+
+static void MaybeReportThreadLeak(ThreadContextBase *tctx_base, void *arg) {
+ Vector<ThreadLeak> &leaks = *(Vector<ThreadLeak>*)arg;
+ ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
+ if (tctx->detached || tctx->status != ThreadStatusFinished)
+ return;
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
+ leaks[i].count++;
+ return;
+ }
+ }
+ ThreadLeak leak = {tctx, 1};
+ leaks.PushBack(leak);
+}
+#endif
+
+#if !SANITIZER_GO
+static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
+ if (tctx->tid == 0) {
+ Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
+ } else {
+ Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
+ " created at:\n", tctx->tid, tctx->name);
+ PrintStack(SymbolizeStackId(tctx->creation_stack_id));
+ }
+ Printf(" One of the following ignores was not ended"
+ " (in order of probability)\n");
+ for (uptr i = 0; i < set->Size(); i++) {
+ Printf(" Ignore was enabled at:\n");
+ PrintStack(SymbolizeStackId(set->At(i)));
+ }
+ Die();
+}
+
+static void ThreadCheckIgnore(ThreadState *thr) {
+ if (ctx->after_multithreaded_fork)
+ return;
+ if (thr->ignore_reads_and_writes)
+ ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
+ if (thr->ignore_sync)
+ ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
+}
+#else
+static void ThreadCheckIgnore(ThreadState *thr) {}
+#endif
+
+void ThreadFinalize(ThreadState *thr) {
+ ThreadCheckIgnore(thr);
+#if !SANITIZER_GO
+ if (!flags()->report_thread_leaks)
+ return;
+ ThreadRegistryLock l(ctx->thread_registry);
+ Vector<ThreadLeak> leaks;
+ ctx->thread_registry->RunCallbackForEachThreadLocked(
+ MaybeReportThreadLeak, &leaks);
+ for (uptr i = 0; i < leaks.Size(); i++) {
+ ScopedReport rep(ReportTypeThreadLeak);
+ rep.AddThread(leaks[i].tctx, true);
+ rep.SetCount(leaks[i].count);
+ OutputReport(thr, rep);
+ }
+#endif
+}
+
+int ThreadCount(ThreadState *thr) {
+ uptr result;
+ ctx->thread_registry->GetNumberOfThreads(0, 0, &result);
+ return (int)result;
+}
+
+int ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
+ StatInc(thr, StatThreadCreate);
+ OnCreatedArgs args = { thr, pc };
+ u32 parent_tid = thr ? thr->tid : kInvalidTid; // No parent for GCD workers.
+ int tid =
+ ctx->thread_registry->CreateThread(uid, detached, parent_tid, &args);
+ DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent_tid, tid, uid);
+ StatSet(thr, StatThreadMaxAlive, ctx->thread_registry->GetMaxAliveThreads());
+ return tid;
+}
+
+void ThreadStart(ThreadState *thr, int tid, tid_t os_id,
+ ThreadType thread_type) {
+ uptr stk_addr = 0;
+ uptr stk_size = 0;
+ uptr tls_addr = 0;
+ uptr tls_size = 0;
+#if !SANITIZER_GO
+ if (thread_type != ThreadType::Fiber)
+ GetThreadStackAndTls(tid == 0, &stk_addr, &stk_size, &tls_addr, &tls_size);
+
+ if (tid) {
+ if (stk_addr && stk_size)
+ MemoryRangeImitateWrite(thr, /*pc=*/ 1, stk_addr, stk_size);
+
+ if (tls_addr && tls_size) ImitateTlsWrite(thr, tls_addr, tls_size);
+ }
+#endif
+
+ ThreadRegistry *tr = ctx->thread_registry;
+ OnStartedArgs args = { thr, stk_addr, stk_size, tls_addr, tls_size };
+ tr->StartThread(tid, os_id, thread_type, &args);
+
+ tr->Lock();
+ thr->tctx = (ThreadContext*)tr->GetThreadLocked(tid);
+ tr->Unlock();
+
+#if !SANITIZER_GO
+ if (ctx->after_multithreaded_fork) {
+ thr->ignore_interceptors++;
+ ThreadIgnoreBegin(thr, 0);
+ ThreadIgnoreSyncBegin(thr, 0);
+ }
+#endif
+}
+
+void ThreadFinish(ThreadState *thr) {
+ ThreadCheckIgnore(thr);
+ StatInc(thr, StatThreadFinish);
+ if (thr->stk_addr && thr->stk_size)
+ DontNeedShadowFor(thr->stk_addr, thr->stk_size);
+ if (thr->tls_addr && thr->tls_size)
+ DontNeedShadowFor(thr->tls_addr, thr->tls_size);
+ thr->is_dead = true;
+ ctx->thread_registry->FinishThread(thr->tid);
+}
+
+static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
+ uptr uid = (uptr)arg;
+ if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
+ tctx->user_id = 0;
+ return true;
+ }
+ return false;
+}
+
+int ThreadTid(ThreadState *thr, uptr pc, uptr uid) {
+ int res = ctx->thread_registry->FindThread(FindThreadByUid, (void*)uid);
+ DPrintf("#%d: ThreadTid uid=%zu tid=%d\n", thr->tid, uid, res);
+ return res;
+}
+
+void ThreadJoin(ThreadState *thr, uptr pc, int tid) {
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
+ ctx->thread_registry->JoinThread(tid, thr);
+}
+
+void ThreadDetach(ThreadState *thr, uptr pc, int tid) {
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ ctx->thread_registry->DetachThread(tid, thr);
+}
+
+void ThreadNotJoined(ThreadState *thr, uptr pc, int tid, uptr uid) {
+ CHECK_GT(tid, 0);
+ CHECK_LT(tid, kMaxTid);
+ ctx->thread_registry->SetThreadUserId(tid, uid);
+}
+
+void ThreadSetName(ThreadState *thr, const char *name) {
+ ctx->thread_registry->SetThreadName(thr->tid, name);
+}
+
+void MemoryAccessRange(ThreadState *thr, uptr pc, uptr addr,
+ uptr size, bool is_write) {
+ if (size == 0)
+ return;
+
+ u64 *shadow_mem = (u64*)MemToShadow(addr);
+ DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n",
+ thr->tid, (void*)pc, (void*)addr,
+ (int)size, is_write);
+
+#if SANITIZER_DEBUG
+ if (!IsAppMem(addr)) {
+ Printf("Access to non app mem %zx\n", addr);
+ DCHECK(IsAppMem(addr));
+ }
+ if (!IsAppMem(addr + size - 1)) {
+ Printf("Access to non app mem %zx\n", addr + size - 1);
+ DCHECK(IsAppMem(addr + size - 1));
+ }
+ if (!IsShadowMem((uptr)shadow_mem)) {
+ Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr);
+ DCHECK(IsShadowMem((uptr)shadow_mem));
+ }
+ if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) {
+ Printf("Bad shadow addr %p (%zx)\n",
+ shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1);
+ DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1)));
+ }
+#endif
+
+ StatInc(thr, StatMopRange);
+
+ if (*shadow_mem == kShadowRodata) {
+ DCHECK(!is_write);
+ // Access to .rodata section, no races here.
+ // Measurements show that it can be 10-20% of all memory accesses.
+ StatInc(thr, StatMopRangeRodata);
+ return;
+ }
+
+ FastState fast_state = thr->fast_state;
+ if (fast_state.GetIgnoreBit())
+ return;
+
+ fast_state.IncrementEpoch();
+ thr->fast_state = fast_state;
+ TraceAddEvent(thr, fast_state, EventTypeMop, pc);
+
+ bool unaligned = (addr % kShadowCell) != 0;
+
+ // Handle unaligned beginning, if any.
+ for (; addr % kShadowCell && size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
+ shadow_mem, cur);
+ }
+ if (unaligned)
+ shadow_mem += kShadowCnt;
+ // Handle middle part, if any.
+ for (; size >= kShadowCell; addr += kShadowCell, size -= kShadowCell) {
+ int const kAccessSizeLog = 3;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(0, kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
+ shadow_mem, cur);
+ shadow_mem += kShadowCnt;
+ }
+ // Handle ending, if any.
+ for (; size; addr++, size--) {
+ int const kAccessSizeLog = 0;
+ Shadow cur(fast_state);
+ cur.SetWrite(is_write);
+ cur.SetAddr0AndSizeLog(addr & (kShadowCell - 1), kAccessSizeLog);
+ MemoryAccessImpl(thr, addr, kAccessSizeLog, is_write, false,
+ shadow_mem, cur);
+ }
+}
+
+#if !SANITIZER_GO
+void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
+ Processor *proc = from->proc();
+ ProcUnwire(proc, from);
+ ProcWire(proc, to);
+ set_cur_thread(to);
+}
+
+ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
+ void *mem = internal_alloc(MBlockThreadContex, sizeof(ThreadState));
+ ThreadState *fiber = static_cast<ThreadState *>(mem);
+ internal_memset(fiber, 0, sizeof(*fiber));
+ int tid = ThreadCreate(thr, pc, 0, true);
+ FiberSwitchImpl(thr, fiber);
+ ThreadStart(fiber, tid, 0, ThreadType::Fiber);
+ FiberSwitchImpl(fiber, thr);
+ return fiber;
+}
+
+void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
+ FiberSwitchImpl(thr, fiber);
+ ThreadFinish(fiber);
+ FiberSwitchImpl(fiber, thr);
+ internal_free(fiber);
+}
+
+void FiberSwitch(ThreadState *thr, uptr pc,
+ ThreadState *fiber, unsigned flags) {
+ if (!(flags & FiberSwitchFlagNoSync))
+ Release(thr, pc, (uptr)fiber);
+ FiberSwitchImpl(thr, fiber);
+ if (!(flags & FiberSwitchFlagNoSync))
+ Acquire(fiber, pc, (uptr)fiber);
+}
+#endif
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stack_trace.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stack_trace.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stack_trace.cc (revision 351984)
@@ -0,0 +1,63 @@
+//===-- tsan_stack_trace.cc -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stack_trace.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+VarSizeStackTrace::VarSizeStackTrace()
+ : StackTrace(nullptr, 0), trace_buffer(nullptr) {}
+
+VarSizeStackTrace::~VarSizeStackTrace() {
+ ResizeBuffer(0);
+}
+
+void VarSizeStackTrace::ResizeBuffer(uptr new_size) {
+ if (trace_buffer) {
+ internal_free(trace_buffer);
+ }
+ trace_buffer =
+ (new_size > 0)
+ ? (uptr *)internal_alloc(MBlockStackTrace,
+ new_size * sizeof(trace_buffer[0]))
+ : nullptr;
+ trace = trace_buffer;
+ size = new_size;
+}
+
+void VarSizeStackTrace::Init(const uptr *pcs, uptr cnt, uptr extra_top_pc) {
+ ResizeBuffer(cnt + !!extra_top_pc);
+ internal_memcpy(trace_buffer, pcs, cnt * sizeof(trace_buffer[0]));
+ if (extra_top_pc)
+ trace_buffer[cnt] = extra_top_pc;
+}
+
+void VarSizeStackTrace::ReverseOrder() {
+ for (u32 i = 0; i < (size >> 1); i++)
+ Swap(trace_buffer[i], trace_buffer[size - 1 - i]);
+}
+
+} // namespace __tsan
+
+#if !SANITIZER_GO
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ uptr top = 0;
+ uptr bottom = 0;
+ if (StackTrace::WillUseFastUnwind(request_fast)) {
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ Unwind(max_depth, pc, bp, nullptr, top, bottom, true);
+ } else
+ Unwind(max_depth, pc, 0, context, 0, 0, false);
+}
+#endif // SANITIZER_GO
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stack_trace.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stack_trace.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stack_trace.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stack_trace.h (revision 351984)
@@ -0,0 +1,42 @@
+//===-- tsan_stack_trace.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_STACK_TRACE_H
+#define TSAN_STACK_TRACE_H
+
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_defs.h"
+
+namespace __tsan {
+
+// StackTrace which calls malloc/free to allocate the buffer for
+// addresses in stack traces.
+struct VarSizeStackTrace : public StackTrace {
+ uptr *trace_buffer; // Owned.
+
+ VarSizeStackTrace();
+ ~VarSizeStackTrace();
+ void Init(const uptr *pcs, uptr cnt, uptr extra_top_pc = 0);
+
+ // Reverses the current stack trace order, the top frame goes to the bottom,
+ // the last frame goes to the top.
+ void ReverseOrder();
+
+ private:
+ void ResizeBuffer(uptr new_size);
+
+ VarSizeStackTrace(const VarSizeStackTrace &);
+ void operator=(const VarSizeStackTrace &);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STACK_TRACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stack_trace.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stat.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stat.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stat.cc (revision 351984)
@@ -0,0 +1,186 @@
+//===-- tsan_stat.cc ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "tsan_stat.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+#if TSAN_COLLECT_STATS
+
+void StatAggregate(u64 *dst, u64 *src) {
+ for (int i = 0; i < StatCnt; i++)
+ dst[i] += src[i];
+}
+
+void StatOutput(u64 *stat) {
+ stat[StatShadowNonZero] = stat[StatShadowProcessed] - stat[StatShadowZero];
+
+ static const char *name[StatCnt] = {};
+ name[StatMop] = "Memory accesses ";
+ name[StatMopRead] = " Including reads ";
+ name[StatMopWrite] = " writes ";
+ name[StatMop1] = " Including size 1 ";
+ name[StatMop2] = " size 2 ";
+ name[StatMop4] = " size 4 ";
+ name[StatMop8] = " size 8 ";
+ name[StatMopSame] = " Including same ";
+ name[StatMopIgnored] = " Including ignored ";
+ name[StatMopRange] = " Including range ";
+ name[StatMopRodata] = " Including .rodata ";
+ name[StatMopRangeRodata] = " Including .rodata range ";
+ name[StatShadowProcessed] = "Shadow processed ";
+ name[StatShadowZero] = " Including empty ";
+ name[StatShadowNonZero] = " Including non empty ";
+ name[StatShadowSameSize] = " Including same size ";
+ name[StatShadowIntersect] = " intersect ";
+ name[StatShadowNotIntersect] = " not intersect ";
+ name[StatShadowSameThread] = " Including same thread ";
+ name[StatShadowAnotherThread] = " another thread ";
+ name[StatShadowReplace] = " Including evicted ";
+
+ name[StatFuncEnter] = "Function entries ";
+ name[StatFuncExit] = "Function exits ";
+ name[StatEvents] = "Events collected ";
+
+ name[StatThreadCreate] = "Total threads created ";
+ name[StatThreadFinish] = " threads finished ";
+ name[StatThreadReuse] = " threads reused ";
+ name[StatThreadMaxTid] = " max tid ";
+ name[StatThreadMaxAlive] = " max alive threads ";
+
+ name[StatMutexCreate] = "Mutexes created ";
+ name[StatMutexDestroy] = " destroyed ";
+ name[StatMutexLock] = " lock ";
+ name[StatMutexUnlock] = " unlock ";
+ name[StatMutexRecLock] = " recursive lock ";
+ name[StatMutexRecUnlock] = " recursive unlock ";
+ name[StatMutexReadLock] = " read lock ";
+ name[StatMutexReadUnlock] = " read unlock ";
+
+ name[StatSyncCreated] = "Sync objects created ";
+ name[StatSyncDestroyed] = " destroyed ";
+ name[StatSyncAcquire] = " acquired ";
+ name[StatSyncRelease] = " released ";
+
+ name[StatClockAcquire] = "Clock acquire ";
+ name[StatClockAcquireEmpty] = " empty clock ";
+ name[StatClockAcquireFastRelease] = " fast from release-store ";
+ name[StatClockAcquireFull] = " full (slow) ";
+ name[StatClockAcquiredSomething] = " acquired something ";
+ name[StatClockRelease] = "Clock release ";
+ name[StatClockReleaseResize] = " resize ";
+ name[StatClockReleaseFast] = " fast ";
+ name[StatClockReleaseSlow] = " dirty overflow (slow) ";
+ name[StatClockReleaseFull] = " full (slow) ";
+ name[StatClockReleaseAcquired] = " was acquired ";
+ name[StatClockReleaseClearTail] = " clear tail ";
+ name[StatClockStore] = "Clock release store ";
+ name[StatClockStoreResize] = " resize ";
+ name[StatClockStoreFast] = " fast ";
+ name[StatClockStoreFull] = " slow ";
+ name[StatClockStoreTail] = " clear tail ";
+ name[StatClockAcquireRelease] = "Clock acquire-release ";
+
+ name[StatAtomic] = "Atomic operations ";
+ name[StatAtomicLoad] = " Including load ";
+ name[StatAtomicStore] = " store ";
+ name[StatAtomicExchange] = " exchange ";
+ name[StatAtomicFetchAdd] = " fetch_add ";
+ name[StatAtomicFetchSub] = " fetch_sub ";
+ name[StatAtomicFetchAnd] = " fetch_and ";
+ name[StatAtomicFetchOr] = " fetch_or ";
+ name[StatAtomicFetchXor] = " fetch_xor ";
+ name[StatAtomicFetchNand] = " fetch_nand ";
+ name[StatAtomicCAS] = " compare_exchange ";
+ name[StatAtomicFence] = " fence ";
+ name[StatAtomicRelaxed] = " Including relaxed ";
+ name[StatAtomicConsume] = " consume ";
+ name[StatAtomicAcquire] = " acquire ";
+ name[StatAtomicRelease] = " release ";
+ name[StatAtomicAcq_Rel] = " acq_rel ";
+ name[StatAtomicSeq_Cst] = " seq_cst ";
+ name[StatAtomic1] = " Including size 1 ";
+ name[StatAtomic2] = " size 2 ";
+ name[StatAtomic4] = " size 4 ";
+ name[StatAtomic8] = " size 8 ";
+ name[StatAtomic16] = " size 16 ";
+
+ name[StatAnnotation] = "Dynamic annotations ";
+ name[StatAnnotateHappensBefore] = " HappensBefore ";
+ name[StatAnnotateHappensAfter] = " HappensAfter ";
+ name[StatAnnotateCondVarSignal] = " CondVarSignal ";
+ name[StatAnnotateCondVarSignalAll] = " CondVarSignalAll ";
+ name[StatAnnotateMutexIsNotPHB] = " MutexIsNotPHB ";
+ name[StatAnnotateCondVarWait] = " CondVarWait ";
+ name[StatAnnotateRWLockCreate] = " RWLockCreate ";
+ name[StatAnnotateRWLockCreateStatic] = " StatAnnotateRWLockCreateStatic ";
+ name[StatAnnotateRWLockDestroy] = " RWLockDestroy ";
+ name[StatAnnotateRWLockAcquired] = " RWLockAcquired ";
+ name[StatAnnotateRWLockReleased] = " RWLockReleased ";
+ name[StatAnnotateTraceMemory] = " TraceMemory ";
+ name[StatAnnotateFlushState] = " FlushState ";
+ name[StatAnnotateNewMemory] = " NewMemory ";
+ name[StatAnnotateNoOp] = " NoOp ";
+ name[StatAnnotateFlushExpectedRaces] = " FlushExpectedRaces ";
+ name[StatAnnotateEnableRaceDetection] = " EnableRaceDetection ";
+ name[StatAnnotateMutexIsUsedAsCondVar] = " MutexIsUsedAsCondVar ";
+ name[StatAnnotatePCQGet] = " PCQGet ";
+ name[StatAnnotatePCQPut] = " PCQPut ";
+ name[StatAnnotatePCQDestroy] = " PCQDestroy ";
+ name[StatAnnotatePCQCreate] = " PCQCreate ";
+ name[StatAnnotateExpectRace] = " ExpectRace ";
+ name[StatAnnotateBenignRaceSized] = " BenignRaceSized ";
+ name[StatAnnotateBenignRace] = " BenignRace ";
+ name[StatAnnotateIgnoreReadsBegin] = " IgnoreReadsBegin ";
+ name[StatAnnotateIgnoreReadsEnd] = " IgnoreReadsEnd ";
+ name[StatAnnotateIgnoreWritesBegin] = " IgnoreWritesBegin ";
+ name[StatAnnotateIgnoreWritesEnd] = " IgnoreWritesEnd ";
+ name[StatAnnotateIgnoreSyncBegin] = " IgnoreSyncBegin ";
+ name[StatAnnotateIgnoreSyncEnd] = " IgnoreSyncEnd ";
+ name[StatAnnotatePublishMemoryRange] = " PublishMemoryRange ";
+ name[StatAnnotateUnpublishMemoryRange] = " UnpublishMemoryRange ";
+ name[StatAnnotateThreadName] = " ThreadName ";
+ name[Stat__tsan_mutex_create] = " __tsan_mutex_create ";
+ name[Stat__tsan_mutex_destroy] = " __tsan_mutex_destroy ";
+ name[Stat__tsan_mutex_pre_lock] = " __tsan_mutex_pre_lock ";
+ name[Stat__tsan_mutex_post_lock] = " __tsan_mutex_post_lock ";
+ name[Stat__tsan_mutex_pre_unlock] = " __tsan_mutex_pre_unlock ";
+ name[Stat__tsan_mutex_post_unlock] = " __tsan_mutex_post_unlock ";
+ name[Stat__tsan_mutex_pre_signal] = " __tsan_mutex_pre_signal ";
+ name[Stat__tsan_mutex_post_signal] = " __tsan_mutex_post_signal ";
+ name[Stat__tsan_mutex_pre_divert] = " __tsan_mutex_pre_divert ";
+ name[Stat__tsan_mutex_post_divert] = " __tsan_mutex_post_divert ";
+
+ name[StatMtxTotal] = "Contentionz ";
+ name[StatMtxTrace] = " Trace ";
+ name[StatMtxThreads] = " Threads ";
+ name[StatMtxReport] = " Report ";
+ name[StatMtxSyncVar] = " SyncVar ";
+ name[StatMtxSyncTab] = " SyncTab ";
+ name[StatMtxSlab] = " Slab ";
+ name[StatMtxAtExit] = " Atexit ";
+ name[StatMtxAnnotations] = " Annotations ";
+ name[StatMtxMBlock] = " MBlock ";
+ name[StatMtxDeadlockDetector] = " DeadlockDetector ";
+ name[StatMtxFired] = " FiredSuppressions ";
+ name[StatMtxRacy] = " RacyStacks ";
+ name[StatMtxFD] = " FD ";
+ name[StatMtxGlobalProc] = " GlobalProc ";
+
+ Printf("Statistics:\n");
+ for (int i = 0; i < StatCnt; i++)
+ Printf("%s: %16zu\n", name[i], (uptr)stat[i]);
+}
+
+#endif
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stat.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stat.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_stat.h (revision 351984)
@@ -0,0 +1,190 @@
+//===-- tsan_stat.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef TSAN_STAT_H
+#define TSAN_STAT_H
+
+namespace __tsan {
+
+enum StatType {
+ // Memory access processing related stuff.
+ StatMop,
+ StatMopRead,
+ StatMopWrite,
+ StatMop1, // These must be consequtive.
+ StatMop2,
+ StatMop4,
+ StatMop8,
+ StatMopSame,
+ StatMopIgnored,
+ StatMopRange,
+ StatMopRodata,
+ StatMopRangeRodata,
+ StatShadowProcessed,
+ StatShadowZero,
+ StatShadowNonZero, // Derived.
+ StatShadowSameSize,
+ StatShadowIntersect,
+ StatShadowNotIntersect,
+ StatShadowSameThread,
+ StatShadowAnotherThread,
+ StatShadowReplace,
+
+ // Func processing.
+ StatFuncEnter,
+ StatFuncExit,
+
+ // Trace processing.
+ StatEvents,
+
+ // Threads.
+ StatThreadCreate,
+ StatThreadFinish,
+ StatThreadReuse,
+ StatThreadMaxTid,
+ StatThreadMaxAlive,
+
+ // Mutexes.
+ StatMutexCreate,
+ StatMutexDestroy,
+ StatMutexLock,
+ StatMutexUnlock,
+ StatMutexRecLock,
+ StatMutexRecUnlock,
+ StatMutexReadLock,
+ StatMutexReadUnlock,
+
+ // Synchronization.
+ StatSyncCreated,
+ StatSyncDestroyed,
+ StatSyncAcquire,
+ StatSyncRelease,
+
+ // Clocks - acquire.
+ StatClockAcquire,
+ StatClockAcquireEmpty,
+ StatClockAcquireFastRelease,
+ StatClockAcquireFull,
+ StatClockAcquiredSomething,
+ // Clocks - release.
+ StatClockRelease,
+ StatClockReleaseResize,
+ StatClockReleaseFast,
+ StatClockReleaseSlow,
+ StatClockReleaseFull,
+ StatClockReleaseAcquired,
+ StatClockReleaseClearTail,
+ // Clocks - release store.
+ StatClockStore,
+ StatClockStoreResize,
+ StatClockStoreFast,
+ StatClockStoreFull,
+ StatClockStoreTail,
+ // Clocks - acquire-release.
+ StatClockAcquireRelease,
+
+ // Atomics.
+ StatAtomic,
+ StatAtomicLoad,
+ StatAtomicStore,
+ StatAtomicExchange,
+ StatAtomicFetchAdd,
+ StatAtomicFetchSub,
+ StatAtomicFetchAnd,
+ StatAtomicFetchOr,
+ StatAtomicFetchXor,
+ StatAtomicFetchNand,
+ StatAtomicCAS,
+ StatAtomicFence,
+ StatAtomicRelaxed,
+ StatAtomicConsume,
+ StatAtomicAcquire,
+ StatAtomicRelease,
+ StatAtomicAcq_Rel,
+ StatAtomicSeq_Cst,
+ StatAtomic1,
+ StatAtomic2,
+ StatAtomic4,
+ StatAtomic8,
+ StatAtomic16,
+
+ // Dynamic annotations.
+ StatAnnotation,
+ StatAnnotateHappensBefore,
+ StatAnnotateHappensAfter,
+ StatAnnotateCondVarSignal,
+ StatAnnotateCondVarSignalAll,
+ StatAnnotateMutexIsNotPHB,
+ StatAnnotateCondVarWait,
+ StatAnnotateRWLockCreate,
+ StatAnnotateRWLockCreateStatic,
+ StatAnnotateRWLockDestroy,
+ StatAnnotateRWLockAcquired,
+ StatAnnotateRWLockReleased,
+ StatAnnotateTraceMemory,
+ StatAnnotateFlushState,
+ StatAnnotateNewMemory,
+ StatAnnotateNoOp,
+ StatAnnotateFlushExpectedRaces,
+ StatAnnotateEnableRaceDetection,
+ StatAnnotateMutexIsUsedAsCondVar,
+ StatAnnotatePCQGet,
+ StatAnnotatePCQPut,
+ StatAnnotatePCQDestroy,
+ StatAnnotatePCQCreate,
+ StatAnnotateExpectRace,
+ StatAnnotateBenignRaceSized,
+ StatAnnotateBenignRace,
+ StatAnnotateIgnoreReadsBegin,
+ StatAnnotateIgnoreReadsEnd,
+ StatAnnotateIgnoreWritesBegin,
+ StatAnnotateIgnoreWritesEnd,
+ StatAnnotateIgnoreSyncBegin,
+ StatAnnotateIgnoreSyncEnd,
+ StatAnnotatePublishMemoryRange,
+ StatAnnotateUnpublishMemoryRange,
+ StatAnnotateThreadName,
+ Stat__tsan_mutex_create,
+ Stat__tsan_mutex_destroy,
+ Stat__tsan_mutex_pre_lock,
+ Stat__tsan_mutex_post_lock,
+ Stat__tsan_mutex_pre_unlock,
+ Stat__tsan_mutex_post_unlock,
+ Stat__tsan_mutex_pre_signal,
+ Stat__tsan_mutex_post_signal,
+ Stat__tsan_mutex_pre_divert,
+ Stat__tsan_mutex_post_divert,
+
+ // Internal mutex contentionz.
+ StatMtxTotal,
+ StatMtxTrace,
+ StatMtxThreads,
+ StatMtxReport,
+ StatMtxSyncVar,
+ StatMtxSyncTab,
+ StatMtxSlab,
+ StatMtxAnnotations,
+ StatMtxAtExit,
+ StatMtxMBlock,
+ StatMtxDeadlockDetector,
+ StatMtxFired,
+ StatMtxRacy,
+ StatMtxFD,
+ StatMtxGlobalProc,
+
+ // This must be the last.
+ StatCnt
+};
+
+} // namespace __tsan
+
+#endif // TSAN_STAT_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_suppressions.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_suppressions.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_suppressions.cc (revision 351984)
@@ -0,0 +1,161 @@
+//===-- tsan_suppressions.cc ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "tsan_suppressions.h"
+#include "tsan_rtl.h"
+#include "tsan_flags.h"
+#include "tsan_mman.h"
+#include "tsan_platform.h"
+
+#if !SANITIZER_GO
+// Suppressions for true/false positives in standard libraries.
+static const char *const std_suppressions =
+// Libstdc++ 4.4 has data races in std::string.
+// See http://crbug.com/181502 for an example.
+"race:^_M_rep$\n"
+"race:^_M_is_leaked$\n"
+// False positive when using std <thread>.
+// Happens because we miss atomic synchronization in libstdc++.
+// See http://llvm.org/bugs/show_bug.cgi?id=17066 for details.
+"race:std::_Sp_counted_ptr_inplace<std::thread::_Impl\n";
+
+// Can be overriden in frontend.
+SANITIZER_WEAK_DEFAULT_IMPL
+const char *__tsan_default_suppressions() {
+ return 0;
+}
+#endif
+
+namespace __tsan {
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char *kSuppressionTypes[] = {
+ kSuppressionRace, kSuppressionRaceTop, kSuppressionMutex,
+ kSuppressionThread, kSuppressionSignal, kSuppressionLib,
+ kSuppressionDeadlock};
+
+void InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+#if !SANITIZER_GO
+ suppression_ctx->Parse(__tsan_default_suppressions());
+ suppression_ctx->Parse(std_suppressions);
+#endif
+}
+
+SuppressionContext *Suppressions() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+static const char *conv(ReportType typ) {
+ switch (typ) {
+ case ReportTypeRace:
+ case ReportTypeVptrRace:
+ case ReportTypeUseAfterFree:
+ case ReportTypeVptrUseAfterFree:
+ case ReportTypeExternalRace:
+ return kSuppressionRace;
+ case ReportTypeThreadLeak:
+ return kSuppressionThread;
+ case ReportTypeMutexDestroyLocked:
+ case ReportTypeMutexDoubleLock:
+ case ReportTypeMutexInvalidAccess:
+ case ReportTypeMutexBadUnlock:
+ case ReportTypeMutexBadReadLock:
+ case ReportTypeMutexBadReadUnlock:
+ return kSuppressionMutex;
+ case ReportTypeSignalUnsafe:
+ case ReportTypeErrnoInSignal:
+ return kSuppressionSignal;
+ case ReportTypeDeadlock:
+ return kSuppressionDeadlock;
+ // No default case so compiler warns us if we miss one
+ }
+ UNREACHABLE("missing case");
+}
+
+static uptr IsSuppressed(const char *stype, const AddressInfo &info,
+ Suppression **sp) {
+ if (suppression_ctx->Match(info.function, stype, sp) ||
+ suppression_ctx->Match(info.file, stype, sp) ||
+ suppression_ctx->Match(info.module, stype, sp)) {
+ VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", (*sp)->templ);
+ atomic_fetch_add(&(*sp)->hit_count, 1, memory_order_relaxed);
+ return info.address;
+ }
+ return 0;
+}
+
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp) {
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || stack == 0 ||
+ !stack->suppressable)
+ return 0;
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
+ return 0;
+ for (const SymbolizedStack *frame = stack->frames; frame;
+ frame = frame->next) {
+ uptr pc = IsSuppressed(stype, frame->info, sp);
+ if (pc != 0)
+ return pc;
+ }
+ if (0 == internal_strcmp(stype, kSuppressionRace) && stack->frames != nullptr)
+ return IsSuppressed(kSuppressionRaceTop, stack->frames->info, sp);
+ return 0;
+}
+
+uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp) {
+ CHECK(suppression_ctx);
+ if (!suppression_ctx->SuppressionCount() || loc == 0 ||
+ loc->type != ReportLocationGlobal || !loc->suppressable)
+ return 0;
+ const char *stype = conv(typ);
+ if (0 == internal_strcmp(stype, kSuppressionNone))
+ return 0;
+ Suppression *s;
+ const DataInfo &global = loc->global;
+ if (suppression_ctx->Match(global.name, stype, &s) ||
+ suppression_ctx->Match(global.module, stype, &s)) {
+ VPrintf(2, "ThreadSanitizer: matched suppression '%s'\n", s->templ);
+ atomic_fetch_add(&s->hit_count, 1, memory_order_relaxed);
+ *sp = s;
+ return global.start;
+ }
+ return 0;
+}
+
+void PrintMatchedSuppressions() {
+ InternalMmapVector<Suppression *> matched;
+ CHECK(suppression_ctx);
+ suppression_ctx->GetMatched(&matched);
+ if (!matched.size())
+ return;
+ int hit_count = 0;
+ for (uptr i = 0; i < matched.size(); i++)
+ hit_count += atomic_load_relaxed(&matched[i]->hit_count);
+ Printf("ThreadSanitizer: Matched %d suppressions (pid=%d):\n", hit_count,
+ (int)internal_getpid());
+ for (uptr i = 0; i < matched.size(); i++) {
+ Printf("%d %s:%s\n", atomic_load_relaxed(&matched[i]->hit_count),
+ matched[i]->type, matched[i]->templ);
+ }
+}
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_suppressions.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_suppressions.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_suppressions.h (revision 351984)
@@ -0,0 +1,37 @@
+//===-- tsan_suppressions.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SUPPRESSIONS_H
+#define TSAN_SUPPRESSIONS_H
+
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+const char kSuppressionNone[] = "none";
+const char kSuppressionRace[] = "race";
+const char kSuppressionRaceTop[] = "race_top";
+const char kSuppressionMutex[] = "mutex";
+const char kSuppressionThread[] = "thread";
+const char kSuppressionSignal[] = "signal";
+const char kSuppressionLib[] = "called_from_lib";
+const char kSuppressionDeadlock[] = "deadlock";
+
+void InitializeSuppressions();
+SuppressionContext *Suppressions();
+void PrintMatchedSuppressions();
+uptr IsSuppressed(ReportType typ, const ReportStack *stack, Suppression **sp);
+uptr IsSuppressed(ReportType typ, const ReportLocation *loc, Suppression **sp);
+
+} // namespace __tsan
+
+#endif // TSAN_SUPPRESSIONS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_symbolize.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_symbolize.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_symbolize.cc (revision 351984)
@@ -0,0 +1,122 @@
+//===-- tsan_symbolize.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_symbolize.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "tsan_flags.h"
+#include "tsan_report.h"
+#include "tsan_rtl.h"
+
+namespace __tsan {
+
+void EnterSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(!thr->in_symbolizer);
+ thr->in_symbolizer = true;
+ thr->ignore_interceptors++;
+}
+
+void ExitSymbolizer() {
+ ThreadState *thr = cur_thread();
+ CHECK(thr->in_symbolizer);
+ thr->in_symbolizer = false;
+ thr->ignore_interceptors--;
+}
+
+// Legacy API.
+// May be overriden by JIT/JAVA/etc,
+// whatever produces PCs marked with kExternalPCBit.
+SANITIZER_WEAK_DEFAULT_IMPL
+bool __tsan_symbolize_external(uptr pc, char *func_buf, uptr func_siz,
+ char *file_buf, uptr file_siz, int *line,
+ int *col) {
+ return false;
+}
+
+// New API: call __tsan_symbolize_external_ex only when it exists.
+// Once old clients are gone, provide dummy implementation.
+SANITIZER_WEAK_DEFAULT_IMPL
+void __tsan_symbolize_external_ex(uptr pc,
+ void (*add_frame)(void *, const char *,
+ const char *, int, int),
+ void *ctx) {}
+
+struct SymbolizedStackBuilder {
+ SymbolizedStack *head;
+ SymbolizedStack *tail;
+ uptr addr;
+};
+
+static void AddFrame(void *ctx, const char *function_name, const char *file,
+ int line, int column) {
+ SymbolizedStackBuilder *ssb = (struct SymbolizedStackBuilder *)ctx;
+ if (ssb->tail) {
+ ssb->tail->next = SymbolizedStack::New(ssb->addr);
+ ssb->tail = ssb->tail->next;
+ } else {
+ ssb->head = ssb->tail = SymbolizedStack::New(ssb->addr);
+ }
+ AddressInfo *info = &ssb->tail->info;
+ if (function_name) {
+ info->function = internal_strdup(function_name);
+ }
+ if (file) {
+ info->file = internal_strdup(file);
+ }
+ info->line = line;
+ info->column = column;
+}
+
+SymbolizedStack *SymbolizeCode(uptr addr) {
+ // Check if PC comes from non-native land.
+ if (addr & kExternalPCBit) {
+ SymbolizedStackBuilder ssb = {nullptr, nullptr, addr};
+ __tsan_symbolize_external_ex(addr, AddFrame, &ssb);
+ if (ssb.head)
+ return ssb.head;
+ // Legacy code: remove along with the declaration above
+ // once all clients using this API are gone.
+ // Declare static to not consume too much stack space.
+ // We symbolize reports in a single thread, so this is fine.
+ static char func_buf[1024];
+ static char file_buf[1024];
+ int line, col;
+ SymbolizedStack *frame = SymbolizedStack::New(addr);
+ if (__tsan_symbolize_external(addr, func_buf, sizeof(func_buf), file_buf,
+ sizeof(file_buf), &line, &col)) {
+ frame->info.function = internal_strdup(func_buf);
+ frame->info.file = internal_strdup(file_buf);
+ frame->info.line = line;
+ frame->info.column = col;
+ }
+ return frame;
+ }
+ return Symbolizer::GetOrInit()->SymbolizePC(addr);
+}
+
+ReportLocation *SymbolizeData(uptr addr) {
+ DataInfo info;
+ if (!Symbolizer::GetOrInit()->SymbolizeData(addr, &info))
+ return 0;
+ ReportLocation *ent = ReportLocation::New(ReportLocationGlobal);
+ internal_memcpy(&ent->global, &info, sizeof(info));
+ return ent;
+}
+
+void SymbolizeFlush() {
+ Symbolizer::GetOrInit()->Flush();
+}
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_symbolize.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_symbolize.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_symbolize.h (revision 351984)
@@ -0,0 +1,30 @@
+//===-- tsan_symbolize.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYMBOLIZE_H
+#define TSAN_SYMBOLIZE_H
+
+#include "tsan_defs.h"
+#include "tsan_report.h"
+
+namespace __tsan {
+
+void EnterSymbolizer();
+void ExitSymbolizer();
+SymbolizedStack *SymbolizeCode(uptr addr);
+ReportLocation *SymbolizeData(uptr addr);
+void SymbolizeFlush();
+
+ReportStack *NewReportStackEntry(uptr addr);
+
+} // namespace __tsan
+
+#endif // TSAN_SYMBOLIZE_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_sync.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_sync.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_sync.cc (revision 351984)
@@ -0,0 +1,296 @@
+//===-- tsan_sync.cc ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "tsan_sync.h"
+#include "tsan_rtl.h"
+#include "tsan_mman.h"
+
+namespace __tsan {
+
+void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s);
+
+SyncVar::SyncVar()
+ : mtx(MutexTypeSyncVar, StatMtxSyncVar) {
+ Reset(0);
+}
+
+void SyncVar::Init(ThreadState *thr, uptr pc, uptr addr, u64 uid) {
+ this->addr = addr;
+ this->uid = uid;
+ this->next = 0;
+
+ creation_stack_id = 0;
+ if (!SANITIZER_GO) // Go does not use them
+ creation_stack_id = CurrentStackId(thr, pc);
+ if (common_flags()->detect_deadlocks)
+ DDMutexInit(thr, pc, this);
+}
+
+void SyncVar::Reset(Processor *proc) {
+ uid = 0;
+ creation_stack_id = 0;
+ owner_tid = kInvalidTid;
+ last_lock = 0;
+ recursion = 0;
+ atomic_store_relaxed(&flags, 0);
+
+ if (proc == 0) {
+ CHECK_EQ(clock.size(), 0);
+ CHECK_EQ(read_clock.size(), 0);
+ } else {
+ clock.Reset(&proc->clock_cache);
+ read_clock.Reset(&proc->clock_cache);
+ }
+}
+
+MetaMap::MetaMap()
+ : block_alloc_("heap block allocator")
+ , sync_alloc_("sync allocator") {
+ atomic_store(&uid_gen_, 0, memory_order_relaxed);
+}
+
+void MetaMap::AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ u32 idx = block_alloc_.Alloc(&thr->proc()->block_cache);
+ MBlock *b = block_alloc_.Map(idx);
+ b->siz = sz;
+ b->tag = 0;
+ b->tid = thr->tid;
+ b->stk = CurrentStackId(thr, pc);
+ u32 *meta = MemToMeta(p);
+ DCHECK_EQ(*meta, 0);
+ *meta = idx | kFlagBlock;
+}
+
+uptr MetaMap::FreeBlock(Processor *proc, uptr p) {
+ MBlock* b = GetBlock(p);
+ if (b == 0)
+ return 0;
+ uptr sz = RoundUpTo(b->siz, kMetaShadowCell);
+ FreeRange(proc, p, sz);
+ return sz;
+}
+
+bool MetaMap::FreeRange(Processor *proc, uptr p, uptr sz) {
+ bool has_something = false;
+ u32 *meta = MemToMeta(p);
+ u32 *end = MemToMeta(p + sz);
+ if (end == meta)
+ end++;
+ for (; meta < end; meta++) {
+ u32 idx = *meta;
+ if (idx == 0) {
+ // Note: don't write to meta in this case -- the block can be huge.
+ continue;
+ }
+ *meta = 0;
+ has_something = true;
+ while (idx != 0) {
+ if (idx & kFlagBlock) {
+ block_alloc_.Free(&proc->block_cache, idx & ~kFlagMask);
+ break;
+ } else if (idx & kFlagSync) {
+ DCHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ u32 next = s->next;
+ s->Reset(proc);
+ sync_alloc_.Free(&proc->sync_cache, idx & ~kFlagMask);
+ idx = next;
+ } else {
+ CHECK(0);
+ }
+ }
+ }
+ return has_something;
+}
+
+// ResetRange removes all meta objects from the range.
+// It is called for large mmap-ed regions. The function is best-effort wrt
+// freeing of meta objects, because we don't want to page in the whole range
+// which can be huge. The function probes pages one-by-one until it finds a page
+// without meta objects, at this point it stops freeing meta objects. Because
+// thread stacks grow top-down, we do the same starting from end as well.
+void MetaMap::ResetRange(Processor *proc, uptr p, uptr sz) {
+ if (SANITIZER_GO) {
+ // UnmapOrDie/MmapFixedNoReserve does not work on Windows,
+ // so we do the optimization only for C/C++.
+ FreeRange(proc, p, sz);
+ return;
+ }
+ const uptr kMetaRatio = kMetaShadowCell / kMetaShadowSize;
+ const uptr kPageSize = GetPageSizeCached() * kMetaRatio;
+ if (sz <= 4 * kPageSize) {
+ // If the range is small, just do the normal free procedure.
+ FreeRange(proc, p, sz);
+ return;
+ }
+ // First, round both ends of the range to page size.
+ uptr diff = RoundUp(p, kPageSize) - p;
+ if (diff != 0) {
+ FreeRange(proc, p, diff);
+ p += diff;
+ sz -= diff;
+ }
+ diff = p + sz - RoundDown(p + sz, kPageSize);
+ if (diff != 0) {
+ FreeRange(proc, p + sz - diff, diff);
+ sz -= diff;
+ }
+ // Now we must have a non-empty page-aligned range.
+ CHECK_GT(sz, 0);
+ CHECK_EQ(p, RoundUp(p, kPageSize));
+ CHECK_EQ(sz, RoundUp(sz, kPageSize));
+ const uptr p0 = p;
+ const uptr sz0 = sz;
+ // Probe start of the range.
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p, kPageSize);
+ p += kPageSize;
+ sz -= kPageSize;
+ if (!has_something && checked > (128 << 10))
+ break;
+ }
+ // Probe end of the range.
+ for (uptr checked = 0; sz > 0; checked += kPageSize) {
+ bool has_something = FreeRange(proc, p + sz - kPageSize, kPageSize);
+ sz -= kPageSize;
+ // Stacks grow down, so sync object are most likely at the end of the region
+ // (if it is a stack). The very end of the stack is TLS and tsan increases
+ // TLS by at least 256K, so check at least 512K.
+ if (!has_something && checked > (512 << 10))
+ break;
+ }
+ // Finally, page out the whole range (including the parts that we've just
+ // freed). Note: we can't simply madvise, because we need to leave a zeroed
+ // range (otherwise __tsan_java_move can crash if it encounters a left-over
+ // meta objects in java heap).
+ uptr metap = (uptr)MemToMeta(p0);
+ uptr metasz = sz0 / kMetaRatio;
+ UnmapOrDie((void*)metap, metasz);
+ if (!MmapFixedNoReserve(metap, metasz))
+ Die();
+}
+
+MBlock* MetaMap::GetBlock(uptr p) {
+ u32 *meta = MemToMeta(p);
+ u32 idx = *meta;
+ for (;;) {
+ if (idx == 0)
+ return 0;
+ if (idx & kFlagBlock)
+ return block_alloc_.Map(idx & ~kFlagMask);
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ idx = s->next;
+ }
+}
+
+SyncVar* MetaMap::GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock) {
+ return GetAndLock(thr, pc, addr, write_lock, true);
+}
+
+SyncVar* MetaMap::GetIfExistsAndLock(uptr addr, bool write_lock) {
+ return GetAndLock(0, 0, addr, write_lock, false);
+}
+
+SyncVar* MetaMap::GetAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock, bool create) {
+ u32 *meta = MemToMeta(addr);
+ u32 idx0 = *meta;
+ u32 myidx = 0;
+ SyncVar *mys = 0;
+ for (;;) {
+ u32 idx = idx0;
+ for (;;) {
+ if (idx == 0)
+ break;
+ if (idx & kFlagBlock)
+ break;
+ DCHECK(idx & kFlagSync);
+ SyncVar * s = sync_alloc_.Map(idx & ~kFlagMask);
+ if (s->addr == addr) {
+ if (myidx != 0) {
+ mys->Reset(thr->proc());
+ sync_alloc_.Free(&thr->proc()->sync_cache, myidx);
+ }
+ if (write_lock)
+ s->mtx.Lock();
+ else
+ s->mtx.ReadLock();
+ return s;
+ }
+ idx = s->next;
+ }
+ if (!create)
+ return 0;
+ if (*meta != idx0) {
+ idx0 = *meta;
+ continue;
+ }
+
+ if (myidx == 0) {
+ const u64 uid = atomic_fetch_add(&uid_gen_, 1, memory_order_relaxed);
+ myidx = sync_alloc_.Alloc(&thr->proc()->sync_cache);
+ mys = sync_alloc_.Map(myidx);
+ mys->Init(thr, pc, addr, uid);
+ }
+ mys->next = idx0;
+ if (atomic_compare_exchange_strong((atomic_uint32_t*)meta, &idx0,
+ myidx | kFlagSync, memory_order_release)) {
+ if (write_lock)
+ mys->mtx.Lock();
+ else
+ mys->mtx.ReadLock();
+ return mys;
+ }
+ }
+}
+
+void MetaMap::MoveMemory(uptr src, uptr dst, uptr sz) {
+ // src and dst can overlap,
+ // there are no concurrent accesses to the regions (e.g. stop-the-world).
+ CHECK_NE(src, dst);
+ CHECK_NE(sz, 0);
+ uptr diff = dst - src;
+ u32 *src_meta = MemToMeta(src);
+ u32 *dst_meta = MemToMeta(dst);
+ u32 *src_meta_end = MemToMeta(src + sz);
+ uptr inc = 1;
+ if (dst > src) {
+ src_meta = MemToMeta(src + sz) - 1;
+ dst_meta = MemToMeta(dst + sz) - 1;
+ src_meta_end = MemToMeta(src) - 1;
+ inc = -1;
+ }
+ for (; src_meta != src_meta_end; src_meta += inc, dst_meta += inc) {
+ CHECK_EQ(*dst_meta, 0);
+ u32 idx = *src_meta;
+ *src_meta = 0;
+ *dst_meta = idx;
+ // Patch the addresses in sync objects.
+ while (idx != 0) {
+ if (idx & kFlagBlock)
+ break;
+ CHECK(idx & kFlagSync);
+ SyncVar *s = sync_alloc_.Map(idx & ~kFlagMask);
+ s->addr += diff;
+ idx = s->next;
+ }
+ }
+}
+
+void MetaMap::OnProcIdle(Processor *proc) {
+ block_alloc_.FlushCache(&proc->block_cache);
+ sync_alloc_.FlushCache(&proc->sync_cache);
+}
+
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_sync.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_sync.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_sync.h (revision 351984)
@@ -0,0 +1,145 @@
+//===-- tsan_sync.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_SYNC_H
+#define TSAN_SYNC_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "tsan_defs.h"
+#include "tsan_clock.h"
+#include "tsan_mutex.h"
+#include "tsan_dense_alloc.h"
+
+namespace __tsan {
+
+// These need to match __tsan_mutex_* flags defined in tsan_interface.h.
+// See documentation there as well.
+enum MutexFlags {
+ MutexFlagLinkerInit = 1 << 0, // __tsan_mutex_linker_init
+ MutexFlagWriteReentrant = 1 << 1, // __tsan_mutex_write_reentrant
+ MutexFlagReadReentrant = 1 << 2, // __tsan_mutex_read_reentrant
+ MutexFlagReadLock = 1 << 3, // __tsan_mutex_read_lock
+ MutexFlagTryLock = 1 << 4, // __tsan_mutex_try_lock
+ MutexFlagTryLockFailed = 1 << 5, // __tsan_mutex_try_lock_failed
+ MutexFlagRecursiveLock = 1 << 6, // __tsan_mutex_recursive_lock
+ MutexFlagRecursiveUnlock = 1 << 7, // __tsan_mutex_recursive_unlock
+ MutexFlagNotStatic = 1 << 8, // __tsan_mutex_not_static
+
+ // The following flags are runtime private.
+ // Mutex API misuse was detected, so don't report any more.
+ MutexFlagBroken = 1 << 30,
+ // We did not intercept pre lock event, so handle it on post lock.
+ MutexFlagDoPreLockOnPostLock = 1 << 29,
+ // Must list all mutex creation flags.
+ MutexCreationFlagMask = MutexFlagLinkerInit |
+ MutexFlagWriteReentrant |
+ MutexFlagReadReentrant |
+ MutexFlagNotStatic,
+};
+
+struct SyncVar {
+ SyncVar();
+
+ static const int kInvalidTid = -1;
+
+ uptr addr; // overwritten by DenseSlabAlloc freelist
+ Mutex mtx;
+ u64 uid; // Globally unique id.
+ u32 creation_stack_id;
+ int owner_tid; // Set only by exclusive owners.
+ u64 last_lock;
+ int recursion;
+ atomic_uint32_t flags;
+ u32 next; // in MetaMap
+ DDMutex dd;
+ SyncClock read_clock; // Used for rw mutexes only.
+ // The clock is placed last, so that it is situated on a different cache line
+ // with the mtx. This reduces contention for hot sync objects.
+ SyncClock clock;
+
+ void Init(ThreadState *thr, uptr pc, uptr addr, u64 uid);
+ void Reset(Processor *proc);
+
+ u64 GetId() const {
+ // 48 lsb is addr, then 14 bits is low part of uid, then 2 zero bits.
+ return GetLsb((u64)addr | (uid << 48), 60);
+ }
+ bool CheckId(u64 uid) const {
+ CHECK_EQ(uid, GetLsb(uid, 14));
+ return GetLsb(this->uid, 14) == uid;
+ }
+ static uptr SplitId(u64 id, u64 *uid) {
+ *uid = id >> 48;
+ return (uptr)GetLsb(id, 48);
+ }
+
+ bool IsFlagSet(u32 f) const {
+ return atomic_load_relaxed(&flags) & f;
+ }
+
+ void SetFlags(u32 f) {
+ atomic_store_relaxed(&flags, atomic_load_relaxed(&flags) | f);
+ }
+
+ void UpdateFlags(u32 flagz) {
+ // Filter out operation flags.
+ if (!(flagz & MutexCreationFlagMask))
+ return;
+ u32 current = atomic_load_relaxed(&flags);
+ if (current & MutexCreationFlagMask)
+ return;
+ // Note: this can be called from MutexPostReadLock which holds only read
+ // lock on the SyncVar.
+ atomic_store_relaxed(&flags, current | (flagz & MutexCreationFlagMask));
+ }
+};
+
+/* MetaMap allows to map arbitrary user pointers onto various descriptors.
+ Currently it maps pointers to heap block descriptors and sync var descs.
+ It uses 1/2 direct shadow, see tsan_platform.h.
+*/
+class MetaMap {
+ public:
+ MetaMap();
+
+ void AllocBlock(ThreadState *thr, uptr pc, uptr p, uptr sz);
+ uptr FreeBlock(Processor *proc, uptr p);
+ bool FreeRange(Processor *proc, uptr p, uptr sz);
+ void ResetRange(Processor *proc, uptr p, uptr sz);
+ MBlock* GetBlock(uptr p);
+
+ SyncVar* GetOrCreateAndLock(ThreadState *thr, uptr pc,
+ uptr addr, bool write_lock);
+ SyncVar* GetIfExistsAndLock(uptr addr, bool write_lock);
+
+ void MoveMemory(uptr src, uptr dst, uptr sz);
+
+ void OnProcIdle(Processor *proc);
+
+ private:
+ static const u32 kFlagMask = 3u << 30;
+ static const u32 kFlagBlock = 1u << 30;
+ static const u32 kFlagSync = 2u << 30;
+ typedef DenseSlabAlloc<MBlock, 1<<16, 1<<12> BlockAlloc;
+ typedef DenseSlabAlloc<SyncVar, 1<<16, 1<<10> SyncAlloc;
+ BlockAlloc block_alloc_;
+ SyncAlloc sync_alloc_;
+ atomic_uint64_t uid_gen_;
+
+ SyncVar* GetAndLock(ThreadState *thr, uptr pc, uptr addr, bool write_lock,
+ bool create);
+};
+
+} // namespace __tsan
+
+#endif // TSAN_SYNC_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_trace.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_trace.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_trace.h (revision 351984)
@@ -0,0 +1,75 @@
+//===-- tsan_trace.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_TRACE_H
+#define TSAN_TRACE_H
+
+#include "tsan_defs.h"
+#include "tsan_mutex.h"
+#include "tsan_stack_trace.h"
+#include "tsan_mutexset.h"
+
+namespace __tsan {
+
+const int kTracePartSizeBits = 13;
+const int kTracePartSize = 1 << kTracePartSizeBits;
+const int kTraceParts = 2 * 1024 * 1024 / kTracePartSize;
+const int kTraceSize = kTracePartSize * kTraceParts;
+
+// Must fit into 3 bits.
+enum EventType {
+ EventTypeMop,
+ EventTypeFuncEnter,
+ EventTypeFuncExit,
+ EventTypeLock,
+ EventTypeUnlock,
+ EventTypeRLock,
+ EventTypeRUnlock
+};
+
+// Represents a thread event (from most significant bit):
+// u64 typ : 3; // EventType.
+// u64 addr : 61; // Associated pc.
+typedef u64 Event;
+
+const uptr kEventPCBits = 61;
+
+struct TraceHeader {
+#if !SANITIZER_GO
+ BufferedStackTrace stack0; // Start stack for the trace.
+#else
+ VarSizeStackTrace stack0;
+#endif
+ u64 epoch0; // Start epoch for the trace.
+ MutexSet mset0;
+
+ TraceHeader() : stack0(), epoch0() {}
+};
+
+struct Trace {
+ Mutex mtx;
+#if !SANITIZER_GO
+ // Must be last to catch overflow as paging fault.
+ // Go shadow stack is dynamically allocated.
+ uptr shadow_stack[kShadowStackSize];
+#endif
+ // Must be the last field, because we unmap the unused part in
+ // CreateThreadContext.
+ TraceHeader headers[kTraceParts];
+
+ Trace()
+ : mtx(MutexTypeTrace, StatMtxTrace) {
+ }
+};
+
+} // namespace __tsan
+
+#endif // TSAN_TRACE_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_update_shadow_word_inl.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_update_shadow_word_inl.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_update_shadow_word_inl.h (revision 351984)
@@ -0,0 +1,69 @@
+//===-- tsan_update_shadow_word_inl.h ---------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Body of the hottest inner loop.
+// If we wrap this body into a function, compilers (both gcc and clang)
+// produce sligtly less efficient code.
+//===----------------------------------------------------------------------===//
+do {
+ StatInc(thr, StatShadowProcessed);
+ const unsigned kAccessSize = 1 << kAccessSizeLog;
+ u64 *sp = &shadow_mem[idx];
+ old = LoadShadow(sp);
+ if (LIKELY(old.IsZero())) {
+ StatInc(thr, StatShadowZero);
+ if (!stored) {
+ StoreIfNotYetStored(sp, &store_word);
+ stored = true;
+ }
+ break;
+ }
+ // is the memory access equal to the previous?
+ if (LIKELY(Shadow::Addr0AndSizeAreEqual(cur, old))) {
+ StatInc(thr, StatShadowSameSize);
+ // same thread?
+ if (LIKELY(Shadow::TidsAreEqual(old, cur))) {
+ StatInc(thr, StatShadowSameThread);
+ if (LIKELY(old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic))) {
+ StoreIfNotYetStored(sp, &store_word);
+ stored = true;
+ }
+ break;
+ }
+ StatInc(thr, StatShadowAnotherThread);
+ if (HappensBefore(old, thr)) {
+ if (old.IsRWWeakerOrEqual(kAccessIsWrite, kIsAtomic)) {
+ StoreIfNotYetStored(sp, &store_word);
+ stored = true;
+ }
+ break;
+ }
+ if (LIKELY(old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic)))
+ break;
+ goto RACE;
+ }
+ // Do the memory access intersect?
+ if (Shadow::TwoRangesIntersect(old, cur, kAccessSize)) {
+ StatInc(thr, StatShadowIntersect);
+ if (Shadow::TidsAreEqual(old, cur)) {
+ StatInc(thr, StatShadowSameThread);
+ break;
+ }
+ StatInc(thr, StatShadowAnotherThread);
+ if (old.IsBothReadsOrAtomic(kAccessIsWrite, kIsAtomic))
+ break;
+ if (LIKELY(HappensBefore(old, thr)))
+ break;
+ goto RACE;
+ }
+ // The accesses do not intersect.
+ StatInc(thr, StatShadowNotIntersect);
+ break;
+} while (0);
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ppc_regs.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ppc_regs.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ppc_regs.h (revision 351984)
@@ -0,0 +1,96 @@
+#define r0 0
+#define r1 1
+#define r2 2
+#define r3 3
+#define r4 4
+#define r5 5
+#define r6 6
+#define r7 7
+#define r8 8
+#define r9 9
+#define r10 10
+#define r11 11
+#define r12 12
+#define r13 13
+#define r14 14
+#define r15 15
+#define r16 16
+#define r17 17
+#define r18 18
+#define r19 19
+#define r20 20
+#define r21 21
+#define r22 22
+#define r23 23
+#define r24 24
+#define r25 25
+#define r26 26
+#define r27 27
+#define r28 28
+#define r29 29
+#define r30 30
+#define r31 31
+#define f0 0
+#define f1 1
+#define f2 2
+#define f3 3
+#define f4 4
+#define f5 5
+#define f6 6
+#define f7 7
+#define f8 8
+#define f9 9
+#define f10 10
+#define f11 11
+#define f12 12
+#define f13 13
+#define f14 14
+#define f15 15
+#define f16 16
+#define f17 17
+#define f18 18
+#define f19 19
+#define f20 20
+#define f21 21
+#define f22 22
+#define f23 23
+#define f24 24
+#define f25 25
+#define f26 26
+#define f27 27
+#define f28 28
+#define f29 29
+#define f30 30
+#define f31 31
+#define v0 0
+#define v1 1
+#define v2 2
+#define v3 3
+#define v4 4
+#define v5 5
+#define v6 6
+#define v7 7
+#define v8 8
+#define v9 9
+#define v10 10
+#define v11 11
+#define v12 12
+#define v13 13
+#define v14 14
+#define v15 15
+#define v16 16
+#define v17 17
+#define v18 18
+#define v19 19
+#define v20 20
+#define v21 21
+#define v22 22
+#define v23 23
+#define v24 24
+#define v25 25
+#define v26 26
+#define v27 27
+#define v28 28
+#define v29 29
+#define v30 30
+#define v31 31
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_ppc_regs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_mips64.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_mips64.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_mips64.S (revision 351984)
@@ -0,0 +1,214 @@
+.section .text
+.set noreorder
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception11real_setjmpE,8,8
+.globl setjmp
+.type setjmp, @function
+setjmp:
+
+ // save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc setjmp to t9
+ dla $t9,(_ZN14__interception11real_setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size setjmp, .-setjmp
+
+.hidden __tsan_setjmp
+.globl _setjmp
+.comm _ZN14__interception12real__setjmpE,8,8
+.type _setjmp, @function
+_setjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-40
+ sd $s0,32($sp)
+ sd $ra,24($sp)
+ sd $fp,16($sp)
+ sd $gp,8($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(_setjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(_setjmp)))
+ move $s0,$gp
+
+ // save jmp_buf
+ sd $a0,0($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,40
+
+ // restore jmp_buf
+ ld $a0,0($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc _setjmp to t9
+ dla $t9,(_ZN14__interception12real__setjmpE)
+
+ // restore env parameters
+ ld $gp,8($sp)
+ ld $fp,16($sp)
+ ld $ra,24($sp)
+ ld $s0,32($sp)
+ daddiu $sp,$sp,40
+
+ // tail jump to libc _setjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size _setjmp, .-_setjmp
+
+.hidden __tsan_setjmp
+.globl sigsetjmp
+.comm _ZN14__interception14real_sigsetjmpE,8,8
+.type sigsetjmp, @function
+sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer of libc sigsetjmp to t9
+ dla $t9,(_ZN14__interception14real_sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size sigsetjmp, .-sigsetjmp
+
+.hidden __tsan_setjmp
+.comm _ZN14__interception16real___sigsetjmpE,8,8
+.globl __sigsetjmp
+.type __sigsetjmp, @function
+__sigsetjmp:
+
+ // Save env parameters
+ daddiu $sp,$sp,-48
+ sd $s0,40($sp)
+ sd $ra,32($sp)
+ sd $fp,24($sp)
+ sd $gp,16($sp)
+
+ // calculate and save pointer to GOT
+ lui $gp,%hi(%neg(%gp_rel(__sigsetjmp)))
+ daddu $gp,$gp,$t9
+ daddiu $gp,$gp,%lo(%neg(%gp_rel(__sigsetjmp)))
+ move $s0,$gp
+
+ // save jmp_buf and savesig
+ sd $a0,0($sp)
+ sd $a1,8($sp)
+
+ // obtain $sp
+ dadd $a0,$zero,$sp
+
+ // call tsan interceptor
+ jal __tsan_setjmp
+ daddiu $a1,$a0,48
+
+ // restore jmp_buf and savesig
+ ld $a0,0($sp)
+ ld $a1,8($sp)
+
+ // restore gp
+ move $gp,$s0
+
+ // load pointer to libc __sigsetjmp in t9
+ dla $t9,(_ZN14__interception16real___sigsetjmpE)
+
+ // restore env parameters
+ ld $gp,16($sp)
+ ld $fp,24($sp)
+ ld $ra,32($sp)
+ ld $s0,40($sp)
+ daddiu $sp,$sp,48
+
+ // tail jump to libc __sigsetjmp
+ ld $t9,0($t9)
+ jr $t9
+ nop
+
+.size __sigsetjmp, .-__sigsetjmp
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_mips64.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_ppc64.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_ppc64.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_ppc64.S (revision 351984)
@@ -0,0 +1,288 @@
+#include "tsan_ppc_regs.h"
+
+ .section .text
+ .hidden __tsan_setjmp
+ .globl _setjmp
+ .type _setjmp, @function
+ .align 4
+#if _CALL_ELF == 2
+_setjmp:
+#else
+ .section ".opd","aw"
+ .align 3
+_setjmp:
+ .quad .L._setjmp,.TOC.@tocbase,0
+ .previous
+#endif
+.L._setjmp:
+ mflr r0
+ stdu r1,-48(r1)
+ std r2,24(r1)
+ std r3,32(r1)
+ std r0,40(r1)
+ // r3 is the original stack pointer.
+ addi r3,r1,48
+ // r4 is the mangled stack pointer (see glibc)
+ ld r4,-28696(r13)
+ xor r4,r3,r4
+ // Materialize a TOC in case we were called from libc.
+ // For big-endian, we load the TOC from the OPD. For little-
+ // endian, we use the .TOC. symbol to find it.
+ nop
+ bcl 20,31,0f
+0:
+ mflr r2
+#if _CALL_ELF == 2
+ addis r2,r2,.TOC.-0b@ha
+ addi r2,r2,.TOC.-0b@l
+#else
+ addis r2,r2,_setjmp-0b@ha
+ addi r2,r2,_setjmp-0b@l
+ ld r2,8(r2)
+#endif
+ // Call the interceptor.
+ bl __tsan_setjmp
+ nop
+ // Restore regs needed for setjmp.
+ ld r3,32(r1)
+ ld r0,40(r1)
+ // Emulate the real setjmp function. We do this because we can't
+ // perform a sibcall: The real setjmp function trashes the TOC
+ // pointer, and with a sibcall we have no way to restore it.
+ // This way we can make sure our caller's stack pointer and
+ // link register are saved correctly in the jmpbuf.
+ ld r6,-28696(r13)
+ addi r5,r1,48 // original stack ptr of caller
+ xor r5,r6,r5
+ std r5,0(r3) // mangled stack ptr of caller
+ ld r5,24(r1)
+ std r5,8(r3) // caller's saved TOC pointer
+ xor r0,r6,r0
+ std r0,16(r3) // caller's mangled return address
+ mfcr r0
+ // Nonvolatiles.
+ std r14,24(r3)
+ stfd f14,176(r3)
+ stw r0,172(r3) // CR
+ std r15,32(r3)
+ stfd f15,184(r3)
+ std r16,40(r3)
+ stfd f16,192(r3)
+ std r17,48(r3)
+ stfd f17,200(r3)
+ std r18,56(r3)
+ stfd f18,208(r3)
+ std r19,64(r3)
+ stfd f19,216(r3)
+ std r20,72(r3)
+ stfd f20,224(r3)
+ std r21,80(r3)
+ stfd f21,232(r3)
+ std r22,88(r3)
+ stfd f22,240(r3)
+ std r23,96(r3)
+ stfd f23,248(r3)
+ std r24,104(r3)
+ stfd f24,256(r3)
+ std r25,112(r3)
+ stfd f25,264(r3)
+ std r26,120(r3)
+ stfd f26,272(r3)
+ std r27,128(r3)
+ stfd f27,280(r3)
+ std r28,136(r3)
+ stfd f28,288(r3)
+ std r29,144(r3)
+ stfd f29,296(r3)
+ std r30,152(r3)
+ stfd f30,304(r3)
+ std r31,160(r3)
+ stfd f31,312(r3)
+ addi r5,r3,320
+ mfspr r0,256
+ stw r0,168(r3) // VRSAVE
+ addi r6,r5,16
+ stvx v20,0,r5
+ addi r5,r5,32
+ stvx v21,0,r6
+ addi r6,r6,32
+ stvx v22,0,r5
+ addi r5,r5,32
+ stvx v23,0,r6
+ addi r6,r6,32
+ stvx v24,0,r5
+ addi r5,r5,32
+ stvx v25,0,r6
+ addi r6,r6,32
+ stvx v26,0,r5
+ addi r5,r5,32
+ stvx v27,0,r6
+ addi r6,r6,32
+ stvx v28,0,r5
+ addi r5,r5,32
+ stvx v29,0,r6
+ addi r6,r6,32
+ stvx v30,0,r5
+ stvx v31,0,r6
+ // Clear the "mask-saved" slot.
+ li r4,0
+ stw r4,512(r3)
+ // Restore TOC, LR, and stack and return to caller.
+ ld r2,24(r1)
+ ld r0,40(r1)
+ addi r1,r1,48
+ li r3,0 // This is the setjmp return path
+ mtlr r0
+ blr
+ .size _setjmp, .-.L._setjmp
+
+ .globl setjmp
+ .type setjmp, @function
+ .align 4
+setjmp:
+ b _setjmp
+ .size setjmp, .-setjmp
+
+ // sigsetjmp is like setjmp, except that the mask in r4 needs
+ // to be saved at offset 512 of the jump buffer.
+ .globl __sigsetjmp
+ .type __sigsetjmp, @function
+ .align 4
+#if _CALL_ELF == 2
+__sigsetjmp:
+#else
+ .section ".opd","aw"
+ .align 3
+__sigsetjmp:
+ .quad .L.__sigsetjmp,.TOC.@tocbase,0
+ .previous
+#endif
+.L.__sigsetjmp:
+ mflr r0
+ stdu r1,-64(r1)
+ std r2,24(r1)
+ std r3,32(r1)
+ std r4,40(r1)
+ std r0,48(r1)
+ // r3 is the original stack pointer.
+ addi r3,r1,64
+ // r4 is the mangled stack pointer (see glibc)
+ ld r4,-28696(r13)
+ xor r4,r3,r4
+ // Materialize a TOC in case we were called from libc.
+ // For big-endian, we load the TOC from the OPD. For little-
+ // endian, we use the .TOC. symbol to find it.
+ nop
+ bcl 20,31,1f
+1:
+ mflr r2
+#if _CALL_ELF == 2
+ addis r2,r2,.TOC.-1b@ha
+ addi r2,r2,.TOC.-1b@l
+#else
+ addis r2,r2,_setjmp-1b@ha
+ addi r2,r2,_setjmp-1b@l
+ ld r2,8(r2)
+#endif
+ // Call the interceptor.
+ bl __tsan_setjmp
+ nop
+ // Restore regs needed for __sigsetjmp.
+ ld r3,32(r1)
+ ld r4,40(r1)
+ ld r0,48(r1)
+ // Emulate the real sigsetjmp function. We do this because we can't
+ // perform a sibcall: The real sigsetjmp function trashes the TOC
+ // pointer, and with a sibcall we have no way to restore it.
+ // This way we can make sure our caller's stack pointer and
+ // link register are saved correctly in the jmpbuf.
+ ld r6,-28696(r13)
+ addi r5,r1,64 // original stack ptr of caller
+ xor r5,r6,r5
+ std r5,0(r3) // mangled stack ptr of caller
+ ld r5,24(r1)
+ std r5,8(r3) // caller's saved TOC pointer
+ xor r0,r6,r0
+ std r0,16(r3) // caller's mangled return address
+ mfcr r0
+ // Nonvolatiles.
+ std r14,24(r3)
+ stfd f14,176(r3)
+ stw r0,172(r3) // CR
+ std r15,32(r3)
+ stfd f15,184(r3)
+ std r16,40(r3)
+ stfd f16,192(r3)
+ std r17,48(r3)
+ stfd f17,200(r3)
+ std r18,56(r3)
+ stfd f18,208(r3)
+ std r19,64(r3)
+ stfd f19,216(r3)
+ std r20,72(r3)
+ stfd f20,224(r3)
+ std r21,80(r3)
+ stfd f21,232(r3)
+ std r22,88(r3)
+ stfd f22,240(r3)
+ std r23,96(r3)
+ stfd f23,248(r3)
+ std r24,104(r3)
+ stfd f24,256(r3)
+ std r25,112(r3)
+ stfd f25,264(r3)
+ std r26,120(r3)
+ stfd f26,272(r3)
+ std r27,128(r3)
+ stfd f27,280(r3)
+ std r28,136(r3)
+ stfd f28,288(r3)
+ std r29,144(r3)
+ stfd f29,296(r3)
+ std r30,152(r3)
+ stfd f30,304(r3)
+ std r31,160(r3)
+ stfd f31,312(r3)
+ addi r5,r3,320
+ mfspr r0,256
+ stw r0,168(r3) // VRSAVE
+ addi r6,r5,16
+ stvx v20,0,r5
+ addi r5,r5,32
+ stvx v21,0,r6
+ addi r6,r6,32
+ stvx v22,0,r5
+ addi r5,r5,32
+ stvx v23,0,r6
+ addi r6,r6,32
+ stvx v24,0,r5
+ addi r5,r5,32
+ stvx v25,0,r6
+ addi r6,r6,32
+ stvx v26,0,r5
+ addi r5,r5,32
+ stvx v27,0,r6
+ addi r6,r6,32
+ stvx v28,0,r5
+ addi r5,r5,32
+ stvx v29,0,r6
+ addi r6,r6,32
+ stvx v30,0,r5
+ stvx v31,0,r6
+ // Save into the "mask-saved" slot.
+ stw r4,512(r3)
+ // Restore TOC, LR, and stack and return to caller.
+ ld r2,24(r1)
+ ld r0,48(r1)
+ addi r1,r1,64
+ li r3,0 // This is the sigsetjmp return path
+ mtlr r0
+ blr
+ .size __sigsetjmp, .-.L.__sigsetjmp
+
+ .globl sigsetjmp
+ .type sigsetjmp, @function
+ .align 4
+sigsetjmp:
+ b __sigsetjmp
+ .size sigsetjmp, .-sigsetjmp
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan_rtl_ppc64.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan.syms.extra
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan.syms.extra (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/rtl/tsan.syms.extra (revision 351984)
@@ -0,0 +1,26 @@
+__tsan_init
+__tsan_flush_memory
+__tsan_read*
+__tsan_write*
+__tsan_vptr*
+__tsan_func*
+__tsan_atomic*
+__tsan_java*
+__tsan_unaligned*
+__tsan_release
+__tsan_acquire
+__tsan_mutex_create
+__tsan_mutex_destroy
+__tsan_mutex_pre_lock
+__tsan_mutex_post_lock
+__tsan_mutex_pre_unlock
+__tsan_mutex_post_unlock
+__tsan_mutex_pre_signal
+__tsan_mutex_post_signal
+__tsan_mutex_pre_divert
+__tsan_mutex_post_divert
+__ubsan_*
+Annotate*
+WTFAnnotate*
+RunningOnValgrind
+ValgrindSlowdown
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/func_entry_exit.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/func_entry_exit.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/func_entry_exit.cc (revision 351984)
@@ -0,0 +1,20 @@
+// Synthetic benchmark for __tsan_func_entry/exit (spends ~75% there).
+
+void foo(bool x);
+
+int main() {
+ volatile int kRepeat1 = 1 << 30;
+ const int kRepeat = kRepeat1;
+ for (int i = 0; i < kRepeat; i++)
+ foo(false);
+}
+
+__attribute__((noinline)) void bar(volatile bool x) {
+ if (x)
+ foo(x);
+}
+
+__attribute__((noinline)) void foo(bool x) {
+ if (__builtin_expect(x, false))
+ bar(x);
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/mop.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/mop.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/mop.cc (revision 351984)
@@ -0,0 +1,80 @@
+// Synthetic benchmark for __tsan_read/write{1,2,4,8}.
+// As compared to mini_bench_local/shared.cc this benchmark passes through
+// deduplication logic (ContainsSameAccess).
+// First argument is access size (1, 2, 4, 8). Second optional arg switches
+// from writes to reads.
+
+#include <pthread.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <linux/futex.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+
+template<typename T, bool write>
+void* thread(void *arg) {
+ const int kSize = 2 << 10;
+ static volatile long data[kSize];
+ static volatile long turn;
+ const int kRepeat = 1 << 17;
+ const int id = !!arg;
+ for (int i = 0; i < kRepeat; i++) {
+ for (;;) {
+ int t = __atomic_load_n(&turn, __ATOMIC_ACQUIRE);
+ if (t == id)
+ break;
+ syscall(SYS_futex, &turn, FUTEX_WAIT, t, 0, 0, 0);
+ }
+ for (int j = 0; j < kSize; j++) {
+ if (write) {
+ ((volatile T*)&data[j])[0] = 1;
+ ((volatile T*)&data[j])[sizeof(T) == 8 ? 0 : 1] = 1;
+ } else {
+ T v0 = ((volatile T*)&data[j])[0];
+ T v1 = ((volatile T*)&data[j])[sizeof(T) == 8 ? 0 : 1];
+ (void)v0;
+ (void)v1;
+ }
+ }
+ __atomic_store_n(&turn, 1 - id, __ATOMIC_RELEASE);
+ syscall(SYS_futex, &turn, FUTEX_WAKE, 0, 0, 0, 0);
+ }
+ return 0;
+}
+
+template<typename T, bool write>
+void test() {
+ pthread_t th;
+ pthread_create(&th, 0, thread<T, write>, (void*)1);
+ thread<T, write>(0);
+ pthread_join(th, 0);
+}
+
+template<bool write>
+void testw(int size) {
+ switch (size) {
+ case 1: return test<char, write>();
+ case 2: return test<short, write>();
+ case 4: return test<int, write>();
+ case 8: return test<long long, write>();
+ }
+}
+
+int main(int argc, char** argv) {
+ int size = 8;
+ bool write = true;
+ if (argc > 1) {
+ size = atoi(argv[1]);
+ if (size != 1 && size != 2 && size != 4 && size != 8)
+ size = 8;
+ }
+ if (argc > 2)
+ write = false;
+ printf("%s%d\n", write ? "write" : "read", size);
+ if (write)
+ testw<true>(size);
+ else
+ testw<false>(size);
+ return 0;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/mini_bench_local.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/mini_bench_local.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/mini_bench_local.cc (revision 351984)
@@ -0,0 +1,49 @@
+// Mini-benchmark for tsan: non-shared memory writes.
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+int len;
+int *a;
+const int kNumIter = 1000;
+
+__attribute__((noinline))
+void Run(int idx) {
+ for (int i = 0, n = len; i < n; i++)
+ a[i + idx * n] = i;
+}
+
+void *Thread(void *arg) {
+ long idx = (long)arg;
+ printf("Thread %ld started\n", idx);
+ for (int i = 0; i < kNumIter; i++)
+ Run(idx);
+ printf("Thread %ld done\n", idx);
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ int n_threads = 0;
+ if (argc != 3) {
+ n_threads = 4;
+ len = 1000000;
+ } else {
+ n_threads = atoi(argv[1]);
+ assert(n_threads > 0 && n_threads <= 32);
+ len = atoi(argv[2]);
+ }
+ printf("%s: n_threads=%d len=%d iter=%d\n",
+ __FILE__, n_threads, len, kNumIter);
+ a = new int[n_threads * len];
+ pthread_t *t = new pthread_t[n_threads];
+ for (int i = 0; i < n_threads; i++) {
+ pthread_create(&t[i], 0, Thread, (void*)i);
+ }
+ for (int i = 0; i < n_threads; i++) {
+ pthread_join(t[i], 0);
+ }
+ delete [] t;
+ delete [] a;
+ return 0;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/mini_bench_shared.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/mini_bench_shared.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/mini_bench_shared.cc (revision 351984)
@@ -0,0 +1,51 @@
+// Mini-benchmark for tsan: shared memory reads.
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+int len;
+int *a;
+const int kNumIter = 1000;
+
+__attribute__((noinline))
+void Run(int idx) {
+ for (int i = 0, n = len; i < n; i++)
+ if (a[i] != i) abort();
+}
+
+void *Thread(void *arg) {
+ long idx = (long)arg;
+ printf("Thread %ld started\n", idx);
+ for (int i = 0; i < kNumIter; i++)
+ Run(idx);
+ printf("Thread %ld done\n", idx);
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ int n_threads = 0;
+ if (argc != 3) {
+ n_threads = 4;
+ len = 1000000;
+ } else {
+ n_threads = atoi(argv[1]);
+ assert(n_threads > 0 && n_threads <= 32);
+ len = atoi(argv[2]);
+ }
+ printf("%s: n_threads=%d len=%d iter=%d\n",
+ __FILE__, n_threads, len, kNumIter);
+ a = new int[len];
+ for (int i = 0, n = len; i < n; i++)
+ a[i] = i;
+ pthread_t *t = new pthread_t[n_threads];
+ for (int i = 0; i < n_threads; i++) {
+ pthread_create(&t[i], 0, Thread, (void*)i);
+ }
+ for (int i = 0; i < n_threads; i++) {
+ pthread_join(t[i], 0);
+ }
+ delete [] t;
+ delete [] a;
+ return 0;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/start_many_threads.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/start_many_threads.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/start_many_threads.cc (revision 351984)
@@ -0,0 +1,52 @@
+// Mini-benchmark for creating a lot of threads.
+//
+// Some facts:
+// a) clang -O1 takes <15ms to start N=500 threads,
+// consuming ~4MB more RAM than N=1.
+// b) clang -O1 -ftsan takes ~26s to start N=500 threads,
+// eats 5GB more RAM than N=1 (which is somewhat expected but still a lot)
+// but then it consumes ~4GB of extra memory when the threads shut down!
+// (definitely not in the barrier_wait interceptor)
+// Also, it takes 26s to run with N=500 vs just 1.1s to run with N=1.
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+
+pthread_barrier_t all_threads_ready;
+
+void* Thread(void *unused) {
+ pthread_barrier_wait(&all_threads_ready);
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ int n_threads;
+ if (argc == 1) {
+ n_threads = 100;
+ } else if (argc == 2) {
+ n_threads = atoi(argv[1]);
+ } else {
+ printf("Usage: %s n_threads\n", argv[0]);
+ return 1;
+ }
+ printf("%s: n_threads=%d\n", __FILE__, n_threads);
+
+ pthread_barrier_init(&all_threads_ready, NULL, n_threads + 1);
+
+ pthread_t *t = new pthread_t[n_threads];
+ for (int i = 0; i < n_threads; i++) {
+ int status = pthread_create(&t[i], 0, Thread, (void*)i);
+ assert(status == 0);
+ }
+ // sleep(5); // FIXME: simplify measuring the memory usage.
+ pthread_barrier_wait(&all_threads_ready);
+ for (int i = 0; i < n_threads; i++) {
+ pthread_join(t[i], 0);
+ }
+ // sleep(5); // FIXME: simplify measuring the memory usage.
+ delete [] t;
+
+ return 0;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/vts_many_threads_bench.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/vts_many_threads_bench.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/benchmarks/vts_many_threads_bench.cc (revision 351984)
@@ -0,0 +1,120 @@
+// Mini-benchmark for tsan VTS worst case performance
+// Idea:
+// 1) Spawn M + N threads (M >> N)
+// We'll call the 'M' threads as 'garbage threads'.
+// 2) Make sure all threads have created thus no TIDs were reused
+// 3) Join the garbage threads
+// 4) Do many sync operations on the remaining N threads
+//
+// It turns out that due to O(M+N) VTS complexity the (4) is much slower with
+// when N is large.
+//
+// Some numbers:
+// a) clang++ native O1 with n_iterations=200kk takes
+// 5s regardless of M
+// clang++ tsanv2 O1 with n_iterations=20kk takes
+// 23.5s with M=200
+// 11.5s with M=1
+// i.e. tsanv2 is ~23x to ~47x slower than native, depends on M.
+// b) g++ native O1 with n_iterations=200kk takes
+// 5.5s regardless of M
+// g++ tsanv1 O1 with n_iterations=2kk takes
+// 39.5s with M=200
+// 20.5s with M=1
+// i.e. tsanv1 is ~370x to ~720x slower than native, depends on M.
+
+#include <assert.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+class __attribute__((aligned(64))) Mutex {
+ public:
+ Mutex() { pthread_mutex_init(&m_, NULL); }
+ ~Mutex() { pthread_mutex_destroy(&m_); }
+ void Lock() { pthread_mutex_lock(&m_); }
+ void Unlock() { pthread_mutex_unlock(&m_); }
+
+ private:
+ pthread_mutex_t m_;
+};
+
+const int kNumMutexes = 1024;
+Mutex mutexes[kNumMutexes];
+
+int n_threads, n_iterations;
+
+pthread_barrier_t all_threads_ready, main_threads_ready;
+
+void* GarbageThread(void *unused) {
+ pthread_barrier_wait(&all_threads_ready);
+ return 0;
+}
+
+void *Thread(void *arg) {
+ long idx = (long)arg;
+ pthread_barrier_wait(&all_threads_ready);
+
+ // Wait for the main thread to join the garbage threads.
+ pthread_barrier_wait(&main_threads_ready);
+
+ printf("Thread %ld go!\n", idx);
+ int offset = idx * kNumMutexes / n_threads;
+ for (int i = 0; i < n_iterations; i++) {
+ mutexes[(offset + i) % kNumMutexes].Lock();
+ mutexes[(offset + i) % kNumMutexes].Unlock();
+ }
+ printf("Thread %ld done\n", idx);
+ return 0;
+}
+
+int main(int argc, char **argv) {
+ int n_garbage_threads;
+ if (argc == 1) {
+ n_threads = 2;
+ n_garbage_threads = 200;
+ n_iterations = 20000000;
+ } else if (argc == 4) {
+ n_threads = atoi(argv[1]);
+ assert(n_threads > 0 && n_threads <= 32);
+ n_garbage_threads = atoi(argv[2]);
+ assert(n_garbage_threads > 0 && n_garbage_threads <= 16000);
+ n_iterations = atoi(argv[3]);
+ } else {
+ printf("Usage: %s n_threads n_garbage_threads n_iterations\n", argv[0]);
+ return 1;
+ }
+ printf("%s: n_threads=%d n_garbage_threads=%d n_iterations=%d\n",
+ __FILE__, n_threads, n_garbage_threads, n_iterations);
+
+ pthread_barrier_init(&all_threads_ready, NULL, n_garbage_threads + n_threads + 1);
+ pthread_barrier_init(&main_threads_ready, NULL, n_threads + 1);
+
+ pthread_t *t = new pthread_t[n_threads];
+ {
+ pthread_t *g_t = new pthread_t[n_garbage_threads];
+ for (int i = 0; i < n_garbage_threads; i++) {
+ int status = pthread_create(&g_t[i], 0, GarbageThread, NULL);
+ assert(status == 0);
+ }
+ for (int i = 0; i < n_threads; i++) {
+ int status = pthread_create(&t[i], 0, Thread, (void*)i);
+ assert(status == 0);
+ }
+ pthread_barrier_wait(&all_threads_ready);
+ printf("All threads started! Killing the garbage threads.\n");
+ for (int i = 0; i < n_garbage_threads; i++) {
+ pthread_join(g_t[i], 0);
+ }
+ delete [] g_t;
+ }
+ printf("Resuming the main threads.\n");
+ pthread_barrier_wait(&main_threads_ready);
+
+
+ for (int i = 0; i < n_threads; i++) {
+ pthread_join(t[i], 0);
+ }
+ delete [] t;
+ return 0;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_interceptors.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_interceptors.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_interceptors.cc (revision 351984)
@@ -0,0 +1,328 @@
+//===-- dd_interceptors.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "dd_rtl.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include <pthread.h>
+#include <stdlib.h>
+
+using namespace __dsan;
+
+__attribute__((tls_model("initial-exec")))
+static __thread Thread *thr;
+__attribute__((tls_model("initial-exec")))
+static __thread volatile int initing;
+static bool inited;
+static uptr g_data_start;
+static uptr g_data_end;
+
+static bool InitThread() {
+ if (initing)
+ return false;
+ if (thr != 0)
+ return true;
+ initing = true;
+ if (!inited) {
+ inited = true;
+ Initialize();
+ }
+ thr = (Thread*)InternalAlloc(sizeof(*thr));
+ internal_memset(thr, 0, sizeof(*thr));
+ ThreadInit(thr);
+ initing = false;
+ return true;
+}
+
+INTERCEPTOR(int, pthread_mutex_destroy, pthread_mutex_t *m) {
+ InitThread();
+ MutexDestroy(thr, (uptr)m);
+ return REAL(pthread_mutex_destroy)(m);
+}
+
+INTERCEPTOR(int, pthread_mutex_lock, pthread_mutex_t *m) {
+ InitThread();
+ MutexBeforeLock(thr, (uptr)m, true);
+ int res = REAL(pthread_mutex_lock)(m);
+ MutexAfterLock(thr, (uptr)m, true, false);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_mutex_trylock, pthread_mutex_t *m) {
+ InitThread();
+ int res = REAL(pthread_mutex_trylock)(m);
+ if (res == 0)
+ MutexAfterLock(thr, (uptr)m, true, true);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_mutex_unlock, pthread_mutex_t *m) {
+ InitThread();
+ MutexBeforeUnlock(thr, (uptr)m, true);
+ return REAL(pthread_mutex_unlock)(m);
+}
+
+INTERCEPTOR(int, pthread_spin_destroy, pthread_spinlock_t *m) {
+ InitThread();
+ int res = REAL(pthread_spin_destroy)(m);
+ MutexDestroy(thr, (uptr)m);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_spin_lock, pthread_spinlock_t *m) {
+ InitThread();
+ MutexBeforeLock(thr, (uptr)m, true);
+ int res = REAL(pthread_spin_lock)(m);
+ MutexAfterLock(thr, (uptr)m, true, false);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_spin_trylock, pthread_spinlock_t *m) {
+ InitThread();
+ int res = REAL(pthread_spin_trylock)(m);
+ if (res == 0)
+ MutexAfterLock(thr, (uptr)m, true, true);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_spin_unlock, pthread_spinlock_t *m) {
+ InitThread();
+ MutexBeforeUnlock(thr, (uptr)m, true);
+ return REAL(pthread_spin_unlock)(m);
+}
+
+INTERCEPTOR(int, pthread_rwlock_destroy, pthread_rwlock_t *m) {
+ InitThread();
+ MutexDestroy(thr, (uptr)m);
+ return REAL(pthread_rwlock_destroy)(m);
+}
+
+INTERCEPTOR(int, pthread_rwlock_rdlock, pthread_rwlock_t *m) {
+ InitThread();
+ MutexBeforeLock(thr, (uptr)m, false);
+ int res = REAL(pthread_rwlock_rdlock)(m);
+ MutexAfterLock(thr, (uptr)m, false, false);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_rwlock_tryrdlock, pthread_rwlock_t *m) {
+ InitThread();
+ int res = REAL(pthread_rwlock_tryrdlock)(m);
+ if (res == 0)
+ MutexAfterLock(thr, (uptr)m, false, true);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_rwlock_timedrdlock, pthread_rwlock_t *m,
+ const timespec *abstime) {
+ InitThread();
+ int res = REAL(pthread_rwlock_timedrdlock)(m, abstime);
+ if (res == 0)
+ MutexAfterLock(thr, (uptr)m, false, true);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_rwlock_wrlock, pthread_rwlock_t *m) {
+ InitThread();
+ MutexBeforeLock(thr, (uptr)m, true);
+ int res = REAL(pthread_rwlock_wrlock)(m);
+ MutexAfterLock(thr, (uptr)m, true, false);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_rwlock_trywrlock, pthread_rwlock_t *m) {
+ InitThread();
+ int res = REAL(pthread_rwlock_trywrlock)(m);
+ if (res == 0)
+ MutexAfterLock(thr, (uptr)m, true, true);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_rwlock_timedwrlock, pthread_rwlock_t *m,
+ const timespec *abstime) {
+ InitThread();
+ int res = REAL(pthread_rwlock_timedwrlock)(m, abstime);
+ if (res == 0)
+ MutexAfterLock(thr, (uptr)m, true, true);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_rwlock_unlock, pthread_rwlock_t *m) {
+ InitThread();
+ MutexBeforeUnlock(thr, (uptr)m, true); // note: not necessary write unlock
+ return REAL(pthread_rwlock_unlock)(m);
+}
+
+static pthread_cond_t *init_cond(pthread_cond_t *c, bool force = false) {
+ atomic_uintptr_t *p = (atomic_uintptr_t*)c;
+ uptr cond = atomic_load(p, memory_order_acquire);
+ if (!force && cond != 0)
+ return (pthread_cond_t*)cond;
+ void *newcond = malloc(sizeof(pthread_cond_t));
+ internal_memset(newcond, 0, sizeof(pthread_cond_t));
+ if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond,
+ memory_order_acq_rel))
+ return (pthread_cond_t*)newcond;
+ free(newcond);
+ return (pthread_cond_t*)cond;
+}
+
+INTERCEPTOR(int, pthread_cond_init, pthread_cond_t *c,
+ const pthread_condattr_t *a) {
+ InitThread();
+ pthread_cond_t *cond = init_cond(c, true);
+ return REAL(pthread_cond_init)(cond, a);
+}
+
+INTERCEPTOR(int, pthread_cond_wait, pthread_cond_t *c, pthread_mutex_t *m) {
+ InitThread();
+ pthread_cond_t *cond = init_cond(c);
+ MutexBeforeUnlock(thr, (uptr)m, true);
+ MutexBeforeLock(thr, (uptr)m, true);
+ int res = REAL(pthread_cond_wait)(cond, m);
+ MutexAfterLock(thr, (uptr)m, true, false);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_timedwait, pthread_cond_t *c, pthread_mutex_t *m,
+ const timespec *abstime) {
+ InitThread();
+ pthread_cond_t *cond = init_cond(c);
+ MutexBeforeUnlock(thr, (uptr)m, true);
+ MutexBeforeLock(thr, (uptr)m, true);
+ int res = REAL(pthread_cond_timedwait)(cond, m, abstime);
+ MutexAfterLock(thr, (uptr)m, true, false);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_cond_signal, pthread_cond_t *c) {
+ InitThread();
+ pthread_cond_t *cond = init_cond(c);
+ return REAL(pthread_cond_signal)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_broadcast, pthread_cond_t *c) {
+ InitThread();
+ pthread_cond_t *cond = init_cond(c);
+ return REAL(pthread_cond_broadcast)(cond);
+}
+
+INTERCEPTOR(int, pthread_cond_destroy, pthread_cond_t *c) {
+ InitThread();
+ pthread_cond_t *cond = init_cond(c);
+ int res = REAL(pthread_cond_destroy)(cond);
+ free(cond);
+ atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed);
+ return res;
+}
+
+// for symbolizer
+INTERCEPTOR(char*, realpath, const char *path, char *resolved_path) {
+ InitThread();
+ return REAL(realpath)(path, resolved_path);
+}
+
+INTERCEPTOR(SSIZE_T, read, int fd, void *ptr, SIZE_T count) {
+ InitThread();
+ return REAL(read)(fd, ptr, count);
+}
+
+INTERCEPTOR(SSIZE_T, pread, int fd, void *ptr, SIZE_T count, OFF_T offset) {
+ InitThread();
+ return REAL(pread)(fd, ptr, count, offset);
+}
+
+extern "C" {
+void __dsan_before_mutex_lock(uptr m, int writelock) {
+ if (!InitThread())
+ return;
+ MutexBeforeLock(thr, m, writelock);
+}
+
+void __dsan_after_mutex_lock(uptr m, int writelock, int trylock) {
+ if (!InitThread())
+ return;
+ MutexAfterLock(thr, m, writelock, trylock);
+}
+
+void __dsan_before_mutex_unlock(uptr m, int writelock) {
+ if (!InitThread())
+ return;
+ MutexBeforeUnlock(thr, m, writelock);
+}
+
+void __dsan_mutex_destroy(uptr m) {
+ if (!InitThread())
+ return;
+ // if (m >= g_data_start && m < g_data_end)
+ // return;
+ MutexDestroy(thr, m);
+}
+} // extern "C"
+
+namespace __dsan {
+
+static void InitDataSeg() {
+ MemoryMappingLayout proc_maps(true);
+ char name[128];
+ MemoryMappedSegment segment(name, ARRAY_SIZE(name));
+ bool prev_is_data = false;
+ while (proc_maps.Next(&segment)) {
+ bool is_data = segment.offset != 0 && segment.filename[0] != 0;
+ // BSS may get merged with [heap] in /proc/self/maps. This is not very
+ // reliable.
+ bool is_bss = segment.offset == 0 &&
+ (segment.filename[0] == 0 ||
+ internal_strcmp(segment.filename, "[heap]") == 0) &&
+ prev_is_data;
+ if (g_data_start == 0 && is_data) g_data_start = segment.start;
+ if (is_bss) g_data_end = segment.end;
+ prev_is_data = is_data;
+ }
+ VPrintf(1, "guessed data_start=%p data_end=%p\n", g_data_start, g_data_end);
+ CHECK_LT(g_data_start, g_data_end);
+ CHECK_GE((uptr)&g_data_start, g_data_start);
+ CHECK_LT((uptr)&g_data_start, g_data_end);
+}
+
+void InitializeInterceptors() {
+ INTERCEPT_FUNCTION(pthread_mutex_destroy);
+ INTERCEPT_FUNCTION(pthread_mutex_lock);
+ INTERCEPT_FUNCTION(pthread_mutex_trylock);
+ INTERCEPT_FUNCTION(pthread_mutex_unlock);
+
+ INTERCEPT_FUNCTION(pthread_spin_destroy);
+ INTERCEPT_FUNCTION(pthread_spin_lock);
+ INTERCEPT_FUNCTION(pthread_spin_trylock);
+ INTERCEPT_FUNCTION(pthread_spin_unlock);
+
+ INTERCEPT_FUNCTION(pthread_rwlock_destroy);
+ INTERCEPT_FUNCTION(pthread_rwlock_rdlock);
+ INTERCEPT_FUNCTION(pthread_rwlock_tryrdlock);
+ INTERCEPT_FUNCTION(pthread_rwlock_timedrdlock);
+ INTERCEPT_FUNCTION(pthread_rwlock_wrlock);
+ INTERCEPT_FUNCTION(pthread_rwlock_trywrlock);
+ INTERCEPT_FUNCTION(pthread_rwlock_timedwrlock);
+ INTERCEPT_FUNCTION(pthread_rwlock_unlock);
+
+ INTERCEPT_FUNCTION_VER(pthread_cond_init, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_signal, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_broadcast, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_wait, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_timedwait, "GLIBC_2.3.2");
+ INTERCEPT_FUNCTION_VER(pthread_cond_destroy, "GLIBC_2.3.2");
+
+ // for symbolizer
+ INTERCEPT_FUNCTION(realpath);
+ INTERCEPT_FUNCTION(read);
+ INTERCEPT_FUNCTION(pread);
+
+ InitDataSeg();
+}
+
+} // namespace __dsan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_interceptors.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_rtl.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_rtl.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_rtl.cc (revision 351984)
@@ -0,0 +1,158 @@
+//===-- dd_rtl.cc ---------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "dd_rtl.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __dsan {
+
+static Context *ctx;
+
+static u32 CurrentStackTrace(Thread *thr, uptr skip) {
+ BufferedStackTrace stack;
+ thr->ignore_interceptors = true;
+ stack.Unwind(1000, 0, 0, 0, 0, 0, false);
+ thr->ignore_interceptors = false;
+ if (stack.size <= skip)
+ return 0;
+ return StackDepotPut(StackTrace(stack.trace + skip, stack.size - skip));
+}
+
+static void PrintStackTrace(Thread *thr, u32 stk) {
+ StackTrace stack = StackDepotGet(stk);
+ thr->ignore_interceptors = true;
+ stack.Print();
+ thr->ignore_interceptors = false;
+}
+
+static void ReportDeadlock(Thread *thr, DDReport *rep) {
+ if (rep == 0)
+ return;
+ BlockingMutexLock lock(&ctx->report_mutex);
+ Printf("==============================\n");
+ Printf("WARNING: lock-order-inversion (potential deadlock)\n");
+ for (int i = 0; i < rep->n; i++) {
+ Printf("Thread %d locks mutex %llu while holding mutex %llu:\n",
+ rep->loop[i].thr_ctx, rep->loop[i].mtx_ctx1, rep->loop[i].mtx_ctx0);
+ PrintStackTrace(thr, rep->loop[i].stk[1]);
+ if (rep->loop[i].stk[0]) {
+ Printf("Mutex %llu was acquired here:\n",
+ rep->loop[i].mtx_ctx0);
+ PrintStackTrace(thr, rep->loop[i].stk[0]);
+ }
+ }
+ Printf("==============================\n");
+}
+
+Callback::Callback(Thread *thr)
+ : thr(thr) {
+ lt = thr->dd_lt;
+ pt = thr->dd_pt;
+}
+
+u32 Callback::Unwind() {
+ return CurrentStackTrace(thr, 3);
+}
+
+static void InitializeFlags() {
+ Flags *f = flags();
+
+ // Default values.
+ f->second_deadlock_stack = false;
+
+ SetCommonFlagsDefaults();
+ {
+ // Override some common flags defaults.
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.allow_addr2line = true;
+ OverrideCommonFlags(cf);
+ }
+
+ // Override from command line.
+ FlagParser parser;
+ RegisterFlag(&parser, "second_deadlock_stack", "", &f->second_deadlock_stack);
+ RegisterCommonFlags(&parser);
+ parser.ParseStringFromEnv("DSAN_OPTIONS");
+ SetVerbosity(common_flags()->verbosity);
+}
+
+void Initialize() {
+ static u64 ctx_mem[sizeof(Context) / sizeof(u64) + 1];
+ ctx = new(ctx_mem) Context();
+
+ InitializeInterceptors();
+ InitializeFlags();
+ ctx->dd = DDetector::Create(flags());
+}
+
+void ThreadInit(Thread *thr) {
+ static atomic_uintptr_t id_gen;
+ uptr id = atomic_fetch_add(&id_gen, 1, memory_order_relaxed);
+ thr->dd_pt = ctx->dd->CreatePhysicalThread();
+ thr->dd_lt = ctx->dd->CreateLogicalThread(id);
+}
+
+void ThreadDestroy(Thread *thr) {
+ ctx->dd->DestroyPhysicalThread(thr->dd_pt);
+ ctx->dd->DestroyLogicalThread(thr->dd_lt);
+}
+
+void MutexBeforeLock(Thread *thr, uptr m, bool writelock) {
+ if (thr->ignore_interceptors)
+ return;
+ Callback cb(thr);
+ {
+ MutexHashMap::Handle h(&ctx->mutex_map, m);
+ if (h.created())
+ ctx->dd->MutexInit(&cb, &h->dd);
+ ctx->dd->MutexBeforeLock(&cb, &h->dd, writelock);
+ }
+ ReportDeadlock(thr, ctx->dd->GetReport(&cb));
+}
+
+void MutexAfterLock(Thread *thr, uptr m, bool writelock, bool trylock) {
+ if (thr->ignore_interceptors)
+ return;
+ Callback cb(thr);
+ {
+ MutexHashMap::Handle h(&ctx->mutex_map, m);
+ if (h.created())
+ ctx->dd->MutexInit(&cb, &h->dd);
+ ctx->dd->MutexAfterLock(&cb, &h->dd, writelock, trylock);
+ }
+ ReportDeadlock(thr, ctx->dd->GetReport(&cb));
+}
+
+void MutexBeforeUnlock(Thread *thr, uptr m, bool writelock) {
+ if (thr->ignore_interceptors)
+ return;
+ Callback cb(thr);
+ {
+ MutexHashMap::Handle h(&ctx->mutex_map, m);
+ ctx->dd->MutexBeforeUnlock(&cb, &h->dd, writelock);
+ }
+ ReportDeadlock(thr, ctx->dd->GetReport(&cb));
+}
+
+void MutexDestroy(Thread *thr, uptr m) {
+ if (thr->ignore_interceptors)
+ return;
+ Callback cb(thr);
+ MutexHashMap::Handle h(&ctx->mutex_map, m, true);
+ if (!h.exists())
+ return;
+ ctx->dd->MutexDestroy(&cb, &h->dd);
+}
+
+} // namespace __dsan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_rtl.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_rtl.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_rtl.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_rtl.h (revision 351984)
@@ -0,0 +1,66 @@
+//===-- dd_rtl.h ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+#ifndef DD_RTL_H
+#define DD_RTL_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_addrhashmap.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+
+namespace __dsan {
+
+typedef DDFlags Flags;
+
+struct Mutex {
+ DDMutex dd;
+};
+
+struct Thread {
+ DDPhysicalThread *dd_pt;
+ DDLogicalThread *dd_lt;
+
+ bool ignore_interceptors;
+};
+
+struct Callback : DDCallback {
+ Thread *thr;
+
+ Callback(Thread *thr);
+ u32 Unwind() override;
+};
+
+typedef AddrHashMap<Mutex, 31051> MutexHashMap;
+
+struct Context {
+ DDetector *dd;
+
+ BlockingMutex report_mutex;
+ MutexHashMap mutex_map;
+};
+
+inline Flags* flags() {
+ static Flags flags;
+ return &flags;
+}
+
+void Initialize();
+void InitializeInterceptors();
+
+void ThreadInit(Thread *thr);
+void ThreadDestroy(Thread *thr);
+
+void MutexBeforeLock(Thread *thr, uptr m, bool writelock);
+void MutexAfterLock(Thread *thr, uptr m, bool writelock, bool trylock);
+void MutexBeforeUnlock(Thread *thr, uptr m, bool writelock);
+void MutexDestroy(Thread *thr, uptr m);
+
+} // namespace __dsan
+#endif // DD_RTL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/dd/dd_rtl.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/go/test.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/go/test.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/go/test.c (revision 351984)
@@ -0,0 +1,105 @@
+//===-- test.c ------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanity test for Go runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include <sys/mman.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+void __tsan_init(void **thr, void **proc, void (*cb)(long, void*));
+void __tsan_fini();
+void __tsan_map_shadow(void *addr, unsigned long size);
+void __tsan_go_start(void *thr, void **chthr, void *pc);
+void __tsan_go_end(void *thr);
+void __tsan_proc_create(void **pproc);
+void __tsan_proc_destroy(void *proc);
+void __tsan_proc_wire(void *proc, void *thr);
+void __tsan_proc_unwire(void *proc, void *thr);
+void __tsan_read(void *thr, void *addr, void *pc);
+void __tsan_write(void *thr, void *addr, void *pc);
+void __tsan_func_enter(void *thr, void *pc);
+void __tsan_func_exit(void *thr);
+void __tsan_malloc(void *thr, void *pc, void *p, unsigned long sz);
+void __tsan_free(void *p, unsigned long sz);
+void __tsan_acquire(void *thr, void *addr);
+void __tsan_release(void *thr, void *addr);
+void __tsan_release_merge(void *thr, void *addr);
+
+void *current_proc;
+
+void symbolize_cb(long cmd, void *ctx) {
+ switch (cmd) {
+ case 0:
+ if (current_proc == 0)
+ abort();
+ *(void**)ctx = current_proc;
+ }
+}
+
+/*
+ * See lib/tsan/rtl/tsan_platform.h for details of what the memory layout
+ * of Go programs looks like. To prevent running over existing mappings,
+ * we pick an address slightly inside the Go heap region.
+ */
+void *go_heap = (void *)0xC011110000;
+char *buf0;
+
+void foobar() {}
+void barfoo() {}
+
+int main(void) {
+ void *thr0 = 0;
+ void *proc0 = 0;
+ __tsan_init(&thr0, &proc0, symbolize_cb);
+ current_proc = proc0;
+
+ // Allocate something resembling a heap in Go.
+ buf0 = mmap(go_heap, 16384, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_FIXED | MAP_ANON, -1, 0);
+ if (buf0 == MAP_FAILED) {
+ fprintf(stderr, "failed to allocate Go-like heap at %p; errno %d\n",
+ go_heap, errno);
+ return 1;
+ }
+ char *buf = (char*)((unsigned long)buf0 + (64<<10) - 1 & ~((64<<10) - 1));
+ __tsan_map_shadow(buf, 4096);
+ __tsan_malloc(thr0, (char*)&barfoo + 1, buf, 10);
+ __tsan_free(buf, 10);
+ __tsan_func_enter(thr0, (char*)&main + 1);
+ __tsan_malloc(thr0, (char*)&barfoo + 1, buf, 10);
+ __tsan_release(thr0, buf);
+ __tsan_release_merge(thr0, buf);
+ void *thr1 = 0;
+ __tsan_go_start(thr0, &thr1, (char*)&barfoo + 1);
+ void *thr2 = 0;
+ __tsan_go_start(thr0, &thr2, (char*)&barfoo + 1);
+ __tsan_func_exit(thr0);
+ __tsan_func_enter(thr1, (char*)&foobar + 1);
+ __tsan_func_enter(thr1, (char*)&foobar + 1);
+ __tsan_write(thr1, buf, (char*)&barfoo + 1);
+ __tsan_acquire(thr1, buf);
+ __tsan_func_exit(thr1);
+ __tsan_func_exit(thr1);
+ __tsan_go_end(thr1);
+ void *proc1 = 0;
+ __tsan_proc_create(&proc1);
+ current_proc = proc1;
+ __tsan_func_enter(thr2, (char*)&foobar + 1);
+ __tsan_read(thr2, buf, (char*)&barfoo + 1);
+ __tsan_free(buf, 10);
+ __tsan_func_exit(thr2);
+ __tsan_go_end(thr2);
+ __tsan_proc_destroy(proc1);
+ current_proc = proc0;
+ __tsan_fini();
+ return 0;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/go/tsan_go.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/go/tsan_go.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/tsan/go/tsan_go.cc (revision 351984)
@@ -0,0 +1,283 @@
+//===-- tsan_go.cc --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// ThreadSanitizer runtime for Go language.
+//
+//===----------------------------------------------------------------------===//
+
+#include "tsan_rtl.h"
+#include "tsan_symbolize.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include <stdlib.h>
+
+namespace __tsan {
+
+void InitializeInterceptors() {
+}
+
+void InitializeDynamicAnnotations() {
+}
+
+bool IsExpectedReport(uptr addr, uptr size) {
+ return false;
+}
+
+void *internal_alloc(MBlockType typ, uptr sz) {
+ return InternalAlloc(sz);
+}
+
+void internal_free(void *p) {
+ InternalFree(p);
+}
+
+// Callback into Go.
+static void (*go_runtime_cb)(uptr cmd, void *ctx);
+
+enum {
+ CallbackGetProc = 0,
+ CallbackSymbolizeCode = 1,
+ CallbackSymbolizeData = 2,
+};
+
+struct SymbolizeCodeContext {
+ uptr pc;
+ char *func;
+ char *file;
+ uptr line;
+ uptr off;
+ uptr res;
+};
+
+SymbolizedStack *SymbolizeCode(uptr addr) {
+ SymbolizedStack *s = SymbolizedStack::New(addr);
+ SymbolizeCodeContext cbctx;
+ internal_memset(&cbctx, 0, sizeof(cbctx));
+ cbctx.pc = addr;
+ go_runtime_cb(CallbackSymbolizeCode, &cbctx);
+ if (cbctx.res) {
+ AddressInfo &info = s->info;
+ info.module_offset = cbctx.off;
+ info.function = internal_strdup(cbctx.func ? cbctx.func : "??");
+ info.file = internal_strdup(cbctx.file ? cbctx.file : "-");
+ info.line = cbctx.line;
+ info.column = 0;
+ }
+ return s;
+}
+
+struct SymbolizeDataContext {
+ uptr addr;
+ uptr heap;
+ uptr start;
+ uptr size;
+ char *name;
+ char *file;
+ uptr line;
+ uptr res;
+};
+
+ReportLocation *SymbolizeData(uptr addr) {
+ SymbolizeDataContext cbctx;
+ internal_memset(&cbctx, 0, sizeof(cbctx));
+ cbctx.addr = addr;
+ go_runtime_cb(CallbackSymbolizeData, &cbctx);
+ if (!cbctx.res)
+ return 0;
+ if (cbctx.heap) {
+ MBlock *b = ctx->metamap.GetBlock(cbctx.start);
+ if (!b)
+ return 0;
+ ReportLocation *loc = ReportLocation::New(ReportLocationHeap);
+ loc->heap_chunk_start = cbctx.start;
+ loc->heap_chunk_size = b->siz;
+ loc->tid = b->tid;
+ loc->stack = SymbolizeStackId(b->stk);
+ return loc;
+ } else {
+ ReportLocation *loc = ReportLocation::New(ReportLocationGlobal);
+ loc->global.name = internal_strdup(cbctx.name ? cbctx.name : "??");
+ loc->global.file = internal_strdup(cbctx.file ? cbctx.file : "??");
+ loc->global.line = cbctx.line;
+ loc->global.start = cbctx.start;
+ loc->global.size = cbctx.size;
+ return loc;
+ }
+}
+
+static ThreadState *main_thr;
+static bool inited;
+
+static Processor* get_cur_proc() {
+ if (UNLIKELY(!inited)) {
+ // Running Initialize().
+ // We have not yet returned the Processor to Go, so we cannot ask it back.
+ // Currently, Initialize() does not use the Processor, so return nullptr.
+ return nullptr;
+ }
+ Processor *proc;
+ go_runtime_cb(CallbackGetProc, &proc);
+ return proc;
+}
+
+Processor *ThreadState::proc() {
+ return get_cur_proc();
+}
+
+extern "C" {
+
+static ThreadState *AllocGoroutine() {
+ ThreadState *thr = (ThreadState*)internal_alloc(MBlockThreadContex,
+ sizeof(ThreadState));
+ internal_memset(thr, 0, sizeof(*thr));
+ return thr;
+}
+
+void __tsan_init(ThreadState **thrp, Processor **procp,
+ void (*cb)(uptr cmd, void *cb)) {
+ go_runtime_cb = cb;
+ ThreadState *thr = AllocGoroutine();
+ main_thr = *thrp = thr;
+ Initialize(thr);
+ *procp = thr->proc1;
+ inited = true;
+}
+
+void __tsan_fini() {
+ // FIXME: Not necessary thread 0.
+ ThreadState *thr = main_thr;
+ int res = Finalize(thr);
+ exit(res);
+}
+
+void __tsan_map_shadow(uptr addr, uptr size) {
+ MapShadow(addr, size);
+}
+
+void __tsan_read(ThreadState *thr, void *addr, void *pc) {
+ MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_read_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
+ if (callpc != 0)
+ FuncEntry(thr, callpc);
+ MemoryRead(thr, (uptr)pc, (uptr)addr, kSizeLog1);
+ if (callpc != 0)
+ FuncExit(thr);
+}
+
+void __tsan_write(ThreadState *thr, void *addr, void *pc) {
+ MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
+}
+
+void __tsan_write_pc(ThreadState *thr, void *addr, uptr callpc, uptr pc) {
+ if (callpc != 0)
+ FuncEntry(thr, callpc);
+ MemoryWrite(thr, (uptr)pc, (uptr)addr, kSizeLog1);
+ if (callpc != 0)
+ FuncExit(thr);
+}
+
+void __tsan_read_range(ThreadState *thr, void *addr, uptr size, uptr pc) {
+ MemoryAccessRange(thr, (uptr)pc, (uptr)addr, size, false);
+}
+
+void __tsan_write_range(ThreadState *thr, void *addr, uptr size, uptr pc) {
+ MemoryAccessRange(thr, (uptr)pc, (uptr)addr, size, true);
+}
+
+void __tsan_func_enter(ThreadState *thr, void *pc) {
+ FuncEntry(thr, (uptr)pc);
+}
+
+void __tsan_func_exit(ThreadState *thr) {
+ FuncExit(thr);
+}
+
+void __tsan_malloc(ThreadState *thr, uptr pc, uptr p, uptr sz) {
+ CHECK(inited);
+ if (thr && pc)
+ ctx->metamap.AllocBlock(thr, pc, p, sz);
+ MemoryResetRange(0, 0, (uptr)p, sz);
+}
+
+void __tsan_free(uptr p, uptr sz) {
+ ctx->metamap.FreeRange(get_cur_proc(), p, sz);
+}
+
+void __tsan_go_start(ThreadState *parent, ThreadState **pthr, void *pc) {
+ ThreadState *thr = AllocGoroutine();
+ *pthr = thr;
+ int goid = ThreadCreate(parent, (uptr)pc, 0, true);
+ ThreadStart(thr, goid, 0, ThreadType::Regular);
+}
+
+void __tsan_go_end(ThreadState *thr) {
+ ThreadFinish(thr);
+ internal_free(thr);
+}
+
+void __tsan_proc_create(Processor **pproc) {
+ *pproc = ProcCreate();
+}
+
+void __tsan_proc_destroy(Processor *proc) {
+ ProcDestroy(proc);
+}
+
+void __tsan_acquire(ThreadState *thr, void *addr) {
+ Acquire(thr, 0, (uptr)addr);
+}
+
+void __tsan_release(ThreadState *thr, void *addr) {
+ ReleaseStore(thr, 0, (uptr)addr);
+}
+
+void __tsan_release_merge(ThreadState *thr, void *addr) {
+ Release(thr, 0, (uptr)addr);
+}
+
+void __tsan_finalizer_goroutine(ThreadState *thr) {
+ AcquireGlobal(thr, 0);
+}
+
+void __tsan_mutex_before_lock(ThreadState *thr, uptr addr, uptr write) {
+ if (write)
+ MutexPreLock(thr, 0, addr);
+ else
+ MutexPreReadLock(thr, 0, addr);
+}
+
+void __tsan_mutex_after_lock(ThreadState *thr, uptr addr, uptr write) {
+ if (write)
+ MutexPostLock(thr, 0, addr);
+ else
+ MutexPostReadLock(thr, 0, addr);
+}
+
+void __tsan_mutex_before_unlock(ThreadState *thr, uptr addr, uptr write) {
+ if (write)
+ MutexUnlock(thr, 0, addr);
+ else
+ MutexReadUnlock(thr, 0, addr);
+}
+
+void __tsan_go_ignore_sync_begin(ThreadState *thr) {
+ ThreadIgnoreSyncBegin(thr, 0);
+}
+
+void __tsan_go_ignore_sync_end(ThreadState *thr) {
+ ThreadIgnoreSyncEnd(thr, 0);
+}
+
+void __tsan_report_count(u64 *pn) {
+ Lock lock(&ctx->report_mtx);
+ *pn = ctx->nreported;
+}
+
+} // extern "C"
+} // namespace __tsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/cfi/cfi.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/cfi/cfi.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/cfi/cfi.cpp (revision 351984)
@@ -0,0 +1,476 @@
+//===-------- cfi.cpp -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the runtime support for the cross-DSO CFI.
+//
+//===----------------------------------------------------------------------===//
+
+#include <assert.h>
+#include <elf.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#if SANITIZER_FREEBSD
+#include <sys/link_elf.h>
+#endif
+#include <link.h>
+#include <string.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+
+#if SANITIZER_LINUX
+typedef ElfW(Phdr) Elf_Phdr;
+typedef ElfW(Ehdr) Elf_Ehdr;
+typedef ElfW(Addr) Elf_Addr;
+typedef ElfW(Sym) Elf_Sym;
+typedef ElfW(Dyn) Elf_Dyn;
+#elif SANITIZER_FREEBSD
+#if SANITIZER_WORDSIZE == 64
+#define ElfW64_Dyn Elf_Dyn
+#define ElfW64_Sym Elf_Sym
+#else
+#define ElfW32_Dyn Elf_Dyn
+#define ElfW32_Sym Elf_Sym
+#endif
+#endif
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "ubsan/ubsan_init.h"
+#include "ubsan/ubsan_flags.h"
+
+#ifdef CFI_ENABLE_DIAG
+#include "ubsan/ubsan_handlers.h"
+#endif
+
+using namespace __sanitizer;
+
+namespace __cfi {
+
+#define kCfiShadowLimitsStorageSize 4096 // 1 page
+// Lets hope that the data segment is mapped with 4K pages.
+// The pointer to the cfi shadow region is stored at the start of this page.
+// The rest of the page is unused and re-mapped read-only.
+static union {
+ char space[kCfiShadowLimitsStorageSize];
+ struct {
+ uptr start;
+ uptr size;
+ } limits;
+} cfi_shadow_limits_storage
+ __attribute__((aligned(kCfiShadowLimitsStorageSize)));
+static constexpr uptr kShadowGranularity = 12;
+static constexpr uptr kShadowAlign = 1UL << kShadowGranularity; // 4096
+
+static constexpr uint16_t kInvalidShadow = 0;
+static constexpr uint16_t kUncheckedShadow = 0xFFFFU;
+
+// Get the start address of the CFI shadow region.
+uptr GetShadow() {
+ return cfi_shadow_limits_storage.limits.start;
+}
+
+uptr GetShadowSize() {
+ return cfi_shadow_limits_storage.limits.size;
+}
+
+// This will only work while the shadow is not allocated.
+void SetShadowSize(uptr size) {
+ cfi_shadow_limits_storage.limits.size = size;
+}
+
+uptr MemToShadowOffset(uptr x) {
+ return (x >> kShadowGranularity) << 1;
+}
+
+uint16_t *MemToShadow(uptr x, uptr shadow_base) {
+ return (uint16_t *)(shadow_base + MemToShadowOffset(x));
+}
+
+typedef int (*CFICheckFn)(u64, void *, void *);
+
+// This class reads and decodes the shadow contents.
+class ShadowValue {
+ uptr addr;
+ uint16_t v;
+ explicit ShadowValue(uptr addr, uint16_t v) : addr(addr), v(v) {}
+
+public:
+ bool is_invalid() const { return v == kInvalidShadow; }
+
+ bool is_unchecked() const { return v == kUncheckedShadow; }
+
+ CFICheckFn get_cfi_check() const {
+ assert(!is_invalid() && !is_unchecked());
+ uptr aligned_addr = addr & ~(kShadowAlign - 1);
+ uptr p = aligned_addr - (((uptr)v - 1) << kShadowGranularity);
+ return reinterpret_cast<CFICheckFn>(p);
+ }
+
+ // Load a shadow value for the given application memory address.
+ static const ShadowValue load(uptr addr) {
+ uptr shadow_base = GetShadow();
+ uptr shadow_offset = MemToShadowOffset(addr);
+ if (shadow_offset > GetShadowSize())
+ return ShadowValue(addr, kInvalidShadow);
+ else
+ return ShadowValue(
+ addr, *reinterpret_cast<uint16_t *>(shadow_base + shadow_offset));
+ }
+};
+
+class ShadowBuilder {
+ uptr shadow_;
+
+public:
+ // Allocate a new empty shadow (for the entire address space) on the side.
+ void Start();
+ // Mark the given address range as unchecked.
+ // This is used for uninstrumented libraries like libc.
+ // Any CFI check with a target in that range will pass.
+ void AddUnchecked(uptr begin, uptr end);
+ // Mark the given address range as belonging to a library with the given
+ // cfi_check function.
+ void Add(uptr begin, uptr end, uptr cfi_check);
+ // Finish shadow construction. Atomically switch the current active shadow
+ // region with the newly constructed one and deallocate the former.
+ void Install();
+};
+
+void ShadowBuilder::Start() {
+ shadow_ = (uptr)MmapNoReserveOrDie(GetShadowSize(), "CFI shadow");
+ VReport(1, "CFI: shadow at %zx .. %zx\n", shadow_, shadow_ + GetShadowSize());
+}
+
+void ShadowBuilder::AddUnchecked(uptr begin, uptr end) {
+ uint16_t *shadow_begin = MemToShadow(begin, shadow_);
+ uint16_t *shadow_end = MemToShadow(end - 1, shadow_) + 1;
+ // memset takes a byte, so our unchecked shadow value requires both bytes to
+ // be the same. Make sure we're ok during compilation.
+ static_assert((kUncheckedShadow & 0xff) == ((kUncheckedShadow >> 8) & 0xff),
+ "Both bytes of the 16-bit value must be the same!");
+ memset(shadow_begin, kUncheckedShadow & 0xff,
+ (shadow_end - shadow_begin) * sizeof(*shadow_begin));
+}
+
+void ShadowBuilder::Add(uptr begin, uptr end, uptr cfi_check) {
+ assert((cfi_check & (kShadowAlign - 1)) == 0);
+
+ // Don't fill anything below cfi_check. We can not represent those addresses
+ // in the shadow, and must make sure at codegen to place all valid call
+ // targets above cfi_check.
+ begin = Max(begin, cfi_check);
+ uint16_t *s = MemToShadow(begin, shadow_);
+ uint16_t *s_end = MemToShadow(end - 1, shadow_) + 1;
+ uint16_t sv = ((begin - cfi_check) >> kShadowGranularity) + 1;
+ for (; s < s_end; s++, sv++)
+ *s = sv;
+}
+
+#if SANITIZER_LINUX || SANITIZER_FREEBSD || SANITIZER_NETBSD
+void ShadowBuilder::Install() {
+ MprotectReadOnly(shadow_, GetShadowSize());
+ uptr main_shadow = GetShadow();
+ if (main_shadow) {
+ // Update.
+#if SANITIZER_LINUX
+ void *res = mremap((void *)shadow_, GetShadowSize(), GetShadowSize(),
+ MREMAP_MAYMOVE | MREMAP_FIXED, (void *)main_shadow);
+ CHECK(res != MAP_FAILED);
+#elif SANITIZER_NETBSD
+ void *res = mremap((void *)shadow_, GetShadowSize(), (void *)main_shadow,
+ GetShadowSize(), MAP_FIXED);
+ CHECK(res != MAP_FAILED);
+#else
+ void *res = MmapFixedOrDie(shadow_, GetShadowSize(), "cfi shadow");
+ CHECK(res != MAP_FAILED);
+ ::memcpy(&shadow_, &main_shadow, GetShadowSize());
+#endif
+ } else {
+ // Initial setup.
+ CHECK_EQ(kCfiShadowLimitsStorageSize, GetPageSizeCached());
+ CHECK_EQ(0, GetShadow());
+ cfi_shadow_limits_storage.limits.start = shadow_;
+ MprotectReadOnly((uptr)&cfi_shadow_limits_storage,
+ sizeof(cfi_shadow_limits_storage));
+ CHECK_EQ(shadow_, GetShadow());
+ }
+}
+#else
+#error not implemented
+#endif
+
+// This is a workaround for a glibc bug:
+// https://sourceware.org/bugzilla/show_bug.cgi?id=15199
+// Other platforms can, hopefully, just do
+// dlopen(RTLD_NOLOAD | RTLD_LAZY)
+// dlsym("__cfi_check").
+uptr find_cfi_check_in_dso(dl_phdr_info *info) {
+ const Elf_Dyn *dynamic = nullptr;
+ for (int i = 0; i < info->dlpi_phnum; ++i) {
+ if (info->dlpi_phdr[i].p_type == PT_DYNAMIC) {
+ dynamic =
+ (const Elf_Dyn *)(info->dlpi_addr + info->dlpi_phdr[i].p_vaddr);
+ break;
+ }
+ }
+ if (!dynamic) return 0;
+ uptr strtab = 0, symtab = 0, strsz = 0;
+ for (const Elf_Dyn *p = dynamic; p->d_tag != PT_NULL; ++p) {
+ if (p->d_tag == DT_SYMTAB)
+ symtab = p->d_un.d_ptr;
+ else if (p->d_tag == DT_STRTAB)
+ strtab = p->d_un.d_ptr;
+ else if (p->d_tag == DT_STRSZ)
+ strsz = p->d_un.d_ptr;
+ }
+
+ if (symtab > strtab) {
+ VReport(1, "Can not handle: symtab > strtab (%p > %zx)\n", symtab, strtab);
+ return 0;
+ }
+
+ // Verify that strtab and symtab are inside of the same LOAD segment.
+ // This excludes VDSO, which has (very high) bogus strtab and symtab pointers.
+ int phdr_idx;
+ for (phdr_idx = 0; phdr_idx < info->dlpi_phnum; phdr_idx++) {
+ const Elf_Phdr *phdr = &info->dlpi_phdr[phdr_idx];
+ if (phdr->p_type == PT_LOAD) {
+ uptr beg = info->dlpi_addr + phdr->p_vaddr;
+ uptr end = beg + phdr->p_memsz;
+ if (strtab >= beg && strtab + strsz < end && symtab >= beg &&
+ symtab < end)
+ break;
+ }
+ }
+ if (phdr_idx == info->dlpi_phnum) {
+ // Nope, either different segments or just bogus pointers.
+ // Can not handle this.
+ VReport(1, "Can not handle: symtab %p, strtab %zx\n", symtab, strtab);
+ return 0;
+ }
+
+ for (const Elf_Sym *p = (const Elf_Sym *)symtab; (Elf_Addr)p < strtab;
+ ++p) {
+ // There is no reliable way to find the end of the symbol table. In
+ // lld-produces files, there are other sections between symtab and strtab.
+ // Stop looking when the symbol name is not inside strtab.
+ if (p->st_name >= strsz) break;
+ char *name = (char*)(strtab + p->st_name);
+ if (strcmp(name, "__cfi_check") == 0) {
+ assert(p->st_info == ELF32_ST_INFO(STB_GLOBAL, STT_FUNC) ||
+ p->st_info == ELF32_ST_INFO(STB_WEAK, STT_FUNC));
+ uptr addr = info->dlpi_addr + p->st_value;
+ return addr;
+ }
+ }
+ return 0;
+}
+
+int dl_iterate_phdr_cb(dl_phdr_info *info, size_t size, void *data) {
+ uptr cfi_check = find_cfi_check_in_dso(info);
+ if (cfi_check)
+ VReport(1, "Module '%s' __cfi_check %zx\n", info->dlpi_name, cfi_check);
+
+ ShadowBuilder *b = reinterpret_cast<ShadowBuilder *>(data);
+
+ for (int i = 0; i < info->dlpi_phnum; i++) {
+ const Elf_Phdr *phdr = &info->dlpi_phdr[i];
+ if (phdr->p_type == PT_LOAD) {
+ // Jump tables are in the executable segment.
+ // VTables are in the non-executable one.
+ // Need to fill shadow for both.
+ // FIXME: reject writable if vtables are in the r/o segment. Depend on
+ // PT_RELRO?
+ uptr cur_beg = info->dlpi_addr + phdr->p_vaddr;
+ uptr cur_end = cur_beg + phdr->p_memsz;
+ if (cfi_check) {
+ VReport(1, " %zx .. %zx\n", cur_beg, cur_end);
+ b->Add(cur_beg, cur_end, cfi_check);
+ } else {
+ b->AddUnchecked(cur_beg, cur_end);
+ }
+ }
+ }
+ return 0;
+}
+
+// Init or update shadow for the current set of loaded libraries.
+void UpdateShadow() {
+ ShadowBuilder b;
+ b.Start();
+ dl_iterate_phdr(dl_iterate_phdr_cb, &b);
+ b.Install();
+}
+
+void InitShadow() {
+ CHECK_EQ(0, GetShadow());
+ CHECK_EQ(0, GetShadowSize());
+
+ uptr vma = GetMaxUserVirtualAddress();
+ // Shadow is 2 -> 2**kShadowGranularity.
+ SetShadowSize((vma >> (kShadowGranularity - 1)) + 1);
+ VReport(1, "CFI: VMA size %zx, shadow size %zx\n", vma, GetShadowSize());
+
+ UpdateShadow();
+}
+
+THREADLOCAL int in_loader;
+BlockingMutex shadow_update_lock(LINKER_INITIALIZED);
+
+void EnterLoader() {
+ if (in_loader == 0) {
+ shadow_update_lock.Lock();
+ }
+ ++in_loader;
+}
+
+void ExitLoader() {
+ CHECK(in_loader > 0);
+ --in_loader;
+ UpdateShadow();
+ if (in_loader == 0) {
+ shadow_update_lock.Unlock();
+ }
+}
+
+ALWAYS_INLINE void CfiSlowPathCommon(u64 CallSiteTypeId, void *Ptr,
+ void *DiagData) {
+ uptr Addr = (uptr)Ptr;
+ VReport(3, "__cfi_slowpath: %llx, %p\n", CallSiteTypeId, Ptr);
+ ShadowValue sv = ShadowValue::load(Addr);
+ if (sv.is_invalid()) {
+ VReport(1, "CFI: invalid memory region for a check target: %p\n", Ptr);
+#ifdef CFI_ENABLE_DIAG
+ if (DiagData) {
+ __ubsan_handle_cfi_check_fail(
+ reinterpret_cast<__ubsan::CFICheckFailData *>(DiagData), Addr, false);
+ return;
+ }
+#endif
+ Trap();
+ }
+ if (sv.is_unchecked()) {
+ VReport(2, "CFI: unchecked call (shadow=FFFF): %p\n", Ptr);
+ return;
+ }
+ CFICheckFn cfi_check = sv.get_cfi_check();
+ VReport(2, "__cfi_check at %p\n", cfi_check);
+ cfi_check(CallSiteTypeId, Ptr, DiagData);
+}
+
+void InitializeFlags() {
+ SetCommonFlagsDefaults();
+#ifdef CFI_ENABLE_DIAG
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+#endif
+
+ FlagParser cfi_parser;
+ RegisterCommonFlags(&cfi_parser);
+ cfi_parser.ParseStringFromEnv("CFI_OPTIONS");
+
+#ifdef CFI_ENABLE_DIAG
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+
+ const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ ubsan_parser.ParseString(ubsan_default_options);
+ ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
+#endif
+
+ InitializeCommonFlags();
+
+ if (Verbosity())
+ ReportUnrecognizedFlags();
+
+ if (common_flags()->help) {
+ cfi_parser.PrintFlagDescriptions();
+ }
+}
+
+} // namespace __cfi
+
+using namespace __cfi;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__cfi_slowpath(u64 CallSiteTypeId, void *Ptr) {
+ CfiSlowPathCommon(CallSiteTypeId, Ptr, nullptr);
+}
+
+#ifdef CFI_ENABLE_DIAG
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__cfi_slowpath_diag(u64 CallSiteTypeId, void *Ptr, void *DiagData) {
+ CfiSlowPathCommon(CallSiteTypeId, Ptr, DiagData);
+}
+#endif
+
+static void EnsureInterceptorsInitialized();
+
+// Setup shadow for dlopen()ed libraries.
+// The actual shadow setup happens after dlopen() returns, which means that
+// a library can not be a target of any CFI checks while its constructors are
+// running. It's unclear how to fix this without some extra help from libc.
+// In glibc, mmap inside dlopen is not interceptable.
+// Maybe a seccomp-bpf filter?
+// We could insert a high-priority constructor into the library, but that would
+// not help with the uninstrumented libraries.
+INTERCEPTOR(void*, dlopen, const char *filename, int flag) {
+ EnsureInterceptorsInitialized();
+ EnterLoader();
+ void *handle = REAL(dlopen)(filename, flag);
+ ExitLoader();
+ return handle;
+}
+
+INTERCEPTOR(int, dlclose, void *handle) {
+ EnsureInterceptorsInitialized();
+ EnterLoader();
+ int res = REAL(dlclose)(handle);
+ ExitLoader();
+ return res;
+}
+
+static BlockingMutex interceptor_init_lock(LINKER_INITIALIZED);
+static bool interceptors_inited = false;
+
+static void EnsureInterceptorsInitialized() {
+ BlockingMutexLock lock(&interceptor_init_lock);
+ if (interceptors_inited)
+ return;
+
+ INTERCEPT_FUNCTION(dlopen);
+ INTERCEPT_FUNCTION(dlclose);
+
+ interceptors_inited = true;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+#if !SANITIZER_CAN_USE_PREINIT_ARRAY
+// On ELF platforms, the constructor is invoked using .preinit_array (see below)
+__attribute__((constructor(0)))
+#endif
+void __cfi_init() {
+ SanitizerToolName = "CFI";
+ InitializeFlags();
+ InitShadow();
+
+#ifdef CFI_ENABLE_DIAG
+ __ubsan::InitAsPlugin();
+#endif
+}
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+// On ELF platforms, run cfi initialization before any other constructors.
+// On other platforms we use the constructor attribute to arrange to run our
+// initialization early.
+extern "C" {
+__attribute__((section(".preinit_array"),
+ used)) void (*__cfi_preinit)(void) = __cfi_init;
+}
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/cfi/cfi_blacklist.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/cfi/cfi_blacklist.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/cfi/cfi_blacklist.txt (revision 351984)
@@ -0,0 +1,17 @@
+[cfi-unrelated-cast]
+# The specification of std::get_temporary_buffer mandates a cast to
+# uninitialized T* (libstdc++, MSVC stdlib).
+fun:_ZSt20get_temporary_buffer*
+fun:*get_temporary_buffer@.*@std@@*
+
+# STL address-of magic (libstdc++).
+fun:*__addressof*
+
+# Windows C++ stdlib headers that contain bad unrelated casts.
+src:*xmemory0
+src:*xstddef
+
+# std::_Sp_counted_ptr_inplace::_Sp_counted_ptr_inplace() (libstdc++).
+# This ctor is used by std::make_shared and needs to cast to uninitialized T*
+# in order to call std::allocator_traits<T>::construct.
+fun:_ZNSt23_Sp_counted_ptr_inplace*
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/cfi/cfi_blacklist.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/crt/crtbegin.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/crt/crtbegin.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/crt/crtbegin.c (revision 351984)
@@ -0,0 +1,97 @@
+//===-- crtbegin.c - Start of constructors and destructors ----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stddef.h>
+
+__attribute__((visibility("hidden"))) void *__dso_handle = &__dso_handle;
+
+__extension__ static void *__EH_FRAME_LIST__[]
+ __attribute__((section(".eh_frame"), aligned(sizeof(void *)))) = {};
+
+extern void __register_frame_info(const void *, void *) __attribute__((weak));
+extern void *__deregister_frame_info(const void *) __attribute__((weak));
+
+#ifndef CRT_HAS_INITFINI_ARRAY
+typedef void (*fp)(void);
+
+static fp __CTOR_LIST__[]
+ __attribute__((section(".ctors"), aligned(sizeof(fp)))) = {(fp)-1};
+extern fp __CTOR_LIST_END__[];
+#endif
+
+extern void __cxa_finalize(void *) __attribute__((weak));
+
+static void __attribute__((used)) __do_init() {
+ static _Bool __initialized;
+ if (__builtin_expect(__initialized, 0))
+ return;
+ __initialized = 1;
+
+ static struct { void *p[8]; } __object;
+ if (__register_frame_info)
+ __register_frame_info(__EH_FRAME_LIST__, &__object);
+
+#ifndef CRT_HAS_INITFINI_ARRAY
+ const size_t n = __CTOR_LIST_END__ - __CTOR_LIST__ - 1;
+ for (size_t i = n; i >= 1; i--) __CTOR_LIST__[i]();
+#endif
+}
+
+#ifdef CRT_HAS_INITFINI_ARRAY
+__attribute__((section(".init_array"),
+ used)) static void (*__init)(void) = __do_init;
+#else // CRT_HAS_INITFINI_ARRAY
+#if defined(__i386__) || defined(__x86_64__)
+__asm__(".pushsection .init,\"ax\",@progbits\n\t"
+ "call " __USER_LABEL_PREFIX__ "__do_init\n\t"
+ ".popsection");
+#elif defined(__arm__)
+__asm__(".pushsection .init,\"ax\",%progbits\n\t"
+ "bl " __USER_LABEL_PREFIX__ "__do_init\n\t"
+ ".popsection");
+#endif // CRT_HAS_INITFINI_ARRAY
+#endif
+
+#ifndef CRT_HAS_INITFINI_ARRAY
+static fp __DTOR_LIST__[]
+ __attribute__((section(".dtors"), aligned(sizeof(fp)))) = {(fp)-1};
+extern fp __DTOR_LIST_END__[];
+#endif
+
+static void __attribute__((used)) __do_fini() {
+ static _Bool __finalized;
+ if (__builtin_expect(__finalized, 0))
+ return;
+ __finalized = 1;
+
+ if (__cxa_finalize)
+ __cxa_finalize(__dso_handle);
+
+#ifndef CRT_HAS_INITFINI_ARRAY
+ if (__deregister_frame_info)
+ __deregister_frame_info(__EH_FRAME_LIST__);
+
+ const size_t n = __DTOR_LIST_END__ - __DTOR_LIST__ - 1;
+ for (size_t i = 1; i <= n; i++) __DTOR_LIST__[i]();
+#endif
+}
+
+#ifdef CRT_HAS_INITFINI_ARRAY
+__attribute__((section(".fini_array"),
+ used)) static void (*__fini)(void) = __do_fini;
+#else // CRT_HAS_INITFINI_ARRAY
+#if defined(__i386__) || defined(__x86_64__)
+__asm__(".pushsection .fini,\"ax\",@progbits\n\t"
+ "call " __USER_LABEL_PREFIX__ "__do_fini\n\t"
+ ".popsection");
+#elif defined(__arm__)
+__asm__(".pushsection .fini,\"ax\",%progbits\n\t"
+ "bl " __USER_LABEL_PREFIX__ "__do_fini\n\t"
+ ".popsection");
+#endif
+#endif // CRT_HAS_INIT_FINI_ARRAY
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/crt/crtend.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/crt/crtend.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/crt/crtend.c (revision 351984)
@@ -0,0 +1,22 @@
+//===-- crtend.c - End of constructors and destructors --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <stdint.h>
+
+// Put 4-byte zero which is the length field in FDE at the end as a terminator.
+const int32_t __EH_FRAME_LIST_END__[]
+ __attribute__((section(".eh_frame"), aligned(sizeof(int32_t)),
+ visibility("hidden"), used)) = {0};
+
+#ifndef CRT_HAS_INITFINI_ARRAY
+typedef void (*fp)(void);
+fp __CTOR_LIST_END__[]
+ __attribute__((section(".ctors"), visibility("hidden"), used)) = {0};
+fp __DTOR_LIST_END__[]
+ __attribute__((section(".dtors"), visibility("hidden"), used)) = {0};
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.cc (revision 351984)
@@ -0,0 +1,460 @@
+//===-- dfsan.cc ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// DataFlowSanitizer runtime. This file defines the public interface to
+// DataFlowSanitizer as well as the definition of certain runtime functions
+// called automatically by the compiler (specifically the instrumentation pass
+// in llvm/lib/Transforms/Instrumentation/DataFlowSanitizer.cpp).
+//
+// The public interface is defined in include/sanitizer/dfsan_interface.h whose
+// functions are prefixed dfsan_ while the compiler interface functions are
+// prefixed __dfsan_.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+#include "dfsan/dfsan.h"
+
+using namespace __dfsan;
+
+typedef atomic_uint16_t atomic_dfsan_label;
+static const dfsan_label kInitializingLabel = -1;
+
+static const uptr kNumLabels = 1 << (sizeof(dfsan_label) * 8);
+
+static atomic_dfsan_label __dfsan_last_label;
+static dfsan_label_info __dfsan_label_info[kNumLabels];
+
+Flags __dfsan::flags_data;
+
+SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL dfsan_label __dfsan_retval_tls;
+SANITIZER_INTERFACE_ATTRIBUTE THREADLOCAL dfsan_label __dfsan_arg_tls[64];
+
+SANITIZER_INTERFACE_ATTRIBUTE uptr __dfsan_shadow_ptr_mask;
+
+// On Linux/x86_64, memory is laid out as follows:
+//
+// +--------------------+ 0x800000000000 (top of memory)
+// | application memory |
+// +--------------------+ 0x700000008000 (kAppAddr)
+// | |
+// | unused |
+// | |
+// +--------------------+ 0x200200000000 (kUnusedAddr)
+// | union table |
+// +--------------------+ 0x200000000000 (kUnionTableAddr)
+// | shadow memory |
+// +--------------------+ 0x000000010000 (kShadowAddr)
+// | reserved by kernel |
+// +--------------------+ 0x000000000000
+//
+// To derive a shadow memory address from an application memory address,
+// bits 44-46 are cleared to bring the address into the range
+// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to
+// account for the double byte representation of shadow labels and move the
+// address into the shadow memory range. See the function shadow_for below.
+
+// On Linux/MIPS64, memory is laid out as follows:
+//
+// +--------------------+ 0x10000000000 (top of memory)
+// | application memory |
+// +--------------------+ 0xF000008000 (kAppAddr)
+// | |
+// | unused |
+// | |
+// +--------------------+ 0x2200000000 (kUnusedAddr)
+// | union table |
+// +--------------------+ 0x2000000000 (kUnionTableAddr)
+// | shadow memory |
+// +--------------------+ 0x0000010000 (kShadowAddr)
+// | reserved by kernel |
+// +--------------------+ 0x0000000000
+
+// On Linux/AArch64 (39-bit VMA), memory is laid out as follow:
+//
+// +--------------------+ 0x8000000000 (top of memory)
+// | application memory |
+// +--------------------+ 0x7000008000 (kAppAddr)
+// | |
+// | unused |
+// | |
+// +--------------------+ 0x1200000000 (kUnusedAddr)
+// | union table |
+// +--------------------+ 0x1000000000 (kUnionTableAddr)
+// | shadow memory |
+// +--------------------+ 0x0000010000 (kShadowAddr)
+// | reserved by kernel |
+// +--------------------+ 0x0000000000
+
+// On Linux/AArch64 (42-bit VMA), memory is laid out as follow:
+//
+// +--------------------+ 0x40000000000 (top of memory)
+// | application memory |
+// +--------------------+ 0x3ff00008000 (kAppAddr)
+// | |
+// | unused |
+// | |
+// +--------------------+ 0x1200000000 (kUnusedAddr)
+// | union table |
+// +--------------------+ 0x8000000000 (kUnionTableAddr)
+// | shadow memory |
+// +--------------------+ 0x0000010000 (kShadowAddr)
+// | reserved by kernel |
+// +--------------------+ 0x0000000000
+
+// On Linux/AArch64 (48-bit VMA), memory is laid out as follow:
+//
+// +--------------------+ 0x1000000000000 (top of memory)
+// | application memory |
+// +--------------------+ 0xffff00008000 (kAppAddr)
+// | unused |
+// +--------------------+ 0xaaaab0000000 (top of PIE address)
+// | application PIE |
+// +--------------------+ 0xaaaaa0000000 (top of PIE address)
+// | |
+// | unused |
+// | |
+// +--------------------+ 0x1200000000 (kUnusedAddr)
+// | union table |
+// +--------------------+ 0x8000000000 (kUnionTableAddr)
+// | shadow memory |
+// +--------------------+ 0x0000010000 (kShadowAddr)
+// | reserved by kernel |
+// +--------------------+ 0x0000000000
+
+typedef atomic_dfsan_label dfsan_union_table_t[kNumLabels][kNumLabels];
+
+#ifdef DFSAN_RUNTIME_VMA
+// Runtime detected VMA size.
+int __dfsan::vmaSize;
+#endif
+
+static uptr UnusedAddr() {
+ return MappingArchImpl<MAPPING_UNION_TABLE_ADDR>()
+ + sizeof(dfsan_union_table_t);
+}
+
+static atomic_dfsan_label *union_table(dfsan_label l1, dfsan_label l2) {
+ return &(*(dfsan_union_table_t *) UnionTableAddr())[l1][l2];
+}
+
+// Checks we do not run out of labels.
+static void dfsan_check_label(dfsan_label label) {
+ if (label == kInitializingLabel) {
+ Report("FATAL: DataFlowSanitizer: out of labels\n");
+ Die();
+ }
+}
+
+// Resolves the union of two unequal labels. Nonequality is a precondition for
+// this function (the instrumentation pass inlines the equality test).
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+dfsan_label __dfsan_union(dfsan_label l1, dfsan_label l2) {
+ if (flags().fast16labels)
+ return l1 | l2;
+ DCHECK_NE(l1, l2);
+
+ if (l1 == 0)
+ return l2;
+ if (l2 == 0)
+ return l1;
+
+ if (l1 > l2)
+ Swap(l1, l2);
+
+ atomic_dfsan_label *table_ent = union_table(l1, l2);
+ // We need to deal with the case where two threads concurrently request
+ // a union of the same pair of labels. If the table entry is uninitialized,
+ // (i.e. 0) use a compare-exchange to set the entry to kInitializingLabel
+ // (i.e. -1) to mark that we are initializing it.
+ dfsan_label label = 0;
+ if (atomic_compare_exchange_strong(table_ent, &label, kInitializingLabel,
+ memory_order_acquire)) {
+ // Check whether l2 subsumes l1. We don't need to check whether l1
+ // subsumes l2 because we are guaranteed here that l1 < l2, and (at least
+ // in the cases we are interested in) a label may only subsume labels
+ // created earlier (i.e. with a lower numerical value).
+ if (__dfsan_label_info[l2].l1 == l1 ||
+ __dfsan_label_info[l2].l2 == l1) {
+ label = l2;
+ } else {
+ label =
+ atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1;
+ dfsan_check_label(label);
+ __dfsan_label_info[label].l1 = l1;
+ __dfsan_label_info[label].l2 = l2;
+ }
+ atomic_store(table_ent, label, memory_order_release);
+ } else if (label == kInitializingLabel) {
+ // Another thread is initializing the entry. Wait until it is finished.
+ do {
+ internal_sched_yield();
+ label = atomic_load(table_ent, memory_order_acquire);
+ } while (label == kInitializingLabel);
+ }
+ return label;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+dfsan_label __dfsan_union_load(const dfsan_label *ls, uptr n) {
+ dfsan_label label = ls[0];
+ for (uptr i = 1; i != n; ++i) {
+ dfsan_label next_label = ls[i];
+ if (label != next_label)
+ label = __dfsan_union(label, next_label);
+ }
+ return label;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __dfsan_unimplemented(char *fname) {
+ if (flags().warn_unimplemented)
+ Report("WARNING: DataFlowSanitizer: call to uninstrumented function %s\n",
+ fname);
+}
+
+// Use '-mllvm -dfsan-debug-nonzero-labels' and break on this function
+// to try to figure out where labels are being introduced in a nominally
+// label-free program.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_nonzero_label() {
+ if (flags().warn_nonzero_labels)
+ Report("WARNING: DataFlowSanitizer: saw nonzero label\n");
+}
+
+// Indirect call to an uninstrumented vararg function. We don't have a way of
+// handling these at the moment.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__dfsan_vararg_wrapper(const char *fname) {
+ Report("FATAL: DataFlowSanitizer: unsupported indirect call to vararg "
+ "function %s\n", fname);
+ Die();
+}
+
+// Like __dfsan_union, but for use from the client or custom functions. Hence
+// the equality comparison is done here before calling __dfsan_union.
+SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
+dfsan_union(dfsan_label l1, dfsan_label l2) {
+ if (l1 == l2)
+ return l1;
+ return __dfsan_union(l1, l2);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+dfsan_label dfsan_create_label(const char *desc, void *userdata) {
+ dfsan_label label =
+ atomic_fetch_add(&__dfsan_last_label, 1, memory_order_relaxed) + 1;
+ dfsan_check_label(label);
+ __dfsan_label_info[label].l1 = __dfsan_label_info[label].l2 = 0;
+ __dfsan_label_info[label].desc = desc;
+ __dfsan_label_info[label].userdata = userdata;
+ return label;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __dfsan_set_label(dfsan_label label, void *addr, uptr size) {
+ for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp) {
+ // Don't write the label if it is already the value we need it to be.
+ // In a program where most addresses are not labeled, it is common that
+ // a page of shadow memory is entirely zeroed. The Linux copy-on-write
+ // implementation will share all of the zeroed pages, making a copy of a
+ // page when any value is written. The un-sharing will happen even if
+ // the value written does not change the value in memory. Avoiding the
+ // write when both |label| and |*labelp| are zero dramatically reduces
+ // the amount of real memory used by large programs.
+ if (label == *labelp)
+ continue;
+
+ *labelp = label;
+ }
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void dfsan_set_label(dfsan_label label, void *addr, uptr size) {
+ __dfsan_set_label(label, addr, size);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void dfsan_add_label(dfsan_label label, void *addr, uptr size) {
+ for (dfsan_label *labelp = shadow_for(addr); size != 0; --size, ++labelp)
+ if (*labelp != label)
+ *labelp = __dfsan_union(*labelp, label);
+}
+
+// Unlike the other dfsan interface functions the behavior of this function
+// depends on the label of one of its arguments. Hence it is implemented as a
+// custom function.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
+__dfsw_dfsan_get_label(long data, dfsan_label data_label,
+ dfsan_label *ret_label) {
+ *ret_label = 0;
+ return data_label;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
+dfsan_read_label(const void *addr, uptr size) {
+ if (size == 0)
+ return 0;
+ return __dfsan_union_load(shadow_for(addr), size);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label) {
+ return &__dfsan_label_info[label];
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE int
+dfsan_has_label(dfsan_label label, dfsan_label elem) {
+ if (label == elem)
+ return true;
+ const dfsan_label_info *info = dfsan_get_label_info(label);
+ if (info->l1 != 0) {
+ return dfsan_has_label(info->l1, elem) || dfsan_has_label(info->l2, elem);
+ } else {
+ return false;
+ }
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE dfsan_label
+dfsan_has_label_with_desc(dfsan_label label, const char *desc) {
+ const dfsan_label_info *info = dfsan_get_label_info(label);
+ if (info->l1 != 0) {
+ return dfsan_has_label_with_desc(info->l1, desc) ||
+ dfsan_has_label_with_desc(info->l2, desc);
+ } else {
+ return internal_strcmp(desc, info->desc) == 0;
+ }
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE uptr
+dfsan_get_label_count(void) {
+ dfsan_label max_label_allocated =
+ atomic_load(&__dfsan_last_label, memory_order_relaxed);
+
+ return static_cast<uptr>(max_label_allocated);
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+dfsan_dump_labels(int fd) {
+ dfsan_label last_label =
+ atomic_load(&__dfsan_last_label, memory_order_relaxed);
+
+ for (uptr l = 1; l <= last_label; ++l) {
+ char buf[64];
+ internal_snprintf(buf, sizeof(buf), "%u %u %u ", l,
+ __dfsan_label_info[l].l1, __dfsan_label_info[l].l2);
+ WriteToFile(fd, buf, internal_strlen(buf));
+ if (__dfsan_label_info[l].l1 == 0 && __dfsan_label_info[l].desc) {
+ WriteToFile(fd, __dfsan_label_info[l].desc,
+ internal_strlen(__dfsan_label_info[l].desc));
+ }
+ WriteToFile(fd, "\n", 1);
+ }
+}
+
+void Flags::SetDefaults() {
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+}
+
+static void RegisterDfsanFlags(FlagParser *parser, Flags *f) {
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+}
+
+static void InitializeFlags() {
+ SetCommonFlagsDefaults();
+ flags().SetDefaults();
+
+ FlagParser parser;
+ RegisterCommonFlags(&parser);
+ RegisterDfsanFlags(&parser, &flags());
+ parser.ParseStringFromEnv("DFSAN_OPTIONS");
+ InitializeCommonFlags();
+ if (Verbosity()) ReportUnrecognizedFlags();
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+}
+
+static void InitializePlatformEarly() {
+ AvoidCVE_2016_2143();
+#ifdef DFSAN_RUNTIME_VMA
+ __dfsan::vmaSize =
+ (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+ if (__dfsan::vmaSize == 39 || __dfsan::vmaSize == 42 ||
+ __dfsan::vmaSize == 48) {
+ __dfsan_shadow_ptr_mask = ShadowMask();
+ } else {
+ Printf("FATAL: DataFlowSanitizer: unsupported VMA range\n");
+ Printf("FATAL: Found %d - Supported 39, 42, and 48\n", __dfsan::vmaSize);
+ Die();
+ }
+#endif
+}
+
+static void dfsan_fini() {
+ if (internal_strcmp(flags().dump_labels_at_exit, "") != 0) {
+ fd_t fd = OpenFile(flags().dump_labels_at_exit, WrOnly);
+ if (fd == kInvalidFd) {
+ Report("WARNING: DataFlowSanitizer: unable to open output file %s\n",
+ flags().dump_labels_at_exit);
+ return;
+ }
+
+ Report("INFO: DataFlowSanitizer: dumping labels to %s\n",
+ flags().dump_labels_at_exit);
+ dfsan_dump_labels(fd);
+ CloseFile(fd);
+ }
+}
+
+extern "C" void dfsan_flush() {
+ UnmapOrDie((void*)ShadowAddr(), UnusedAddr() - ShadowAddr());
+ if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr()))
+ Die();
+}
+
+static void dfsan_init(int argc, char **argv, char **envp) {
+ InitializeFlags();
+
+ ::InitializePlatformEarly();
+
+ if (!MmapFixedNoReserve(ShadowAddr(), UnusedAddr() - ShadowAddr()))
+ Die();
+
+ // Protect the region of memory we don't use, to preserve the one-to-one
+ // mapping from application to shadow memory. But if ASLR is disabled, Linux
+ // will load our executable in the middle of our unused region. This mostly
+ // works so long as the program doesn't use too much memory. We support this
+ // case by disabling memory protection when ASLR is disabled.
+ uptr init_addr = (uptr)&dfsan_init;
+ if (!(init_addr >= UnusedAddr() && init_addr < AppAddr()))
+ MmapFixedNoAccess(UnusedAddr(), AppAddr() - UnusedAddr());
+
+ InitializeInterceptors();
+
+ // Register the fini callback to run when the program terminates successfully
+ // or it is killed by the runtime.
+ Atexit(dfsan_fini);
+ AddDieCallback(dfsan_fini);
+
+ __dfsan_label_info[kInitializingLabel].desc = "<init label>";
+}
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+__attribute__((section(".preinit_array"), used))
+static void (*dfsan_init_ptr)(int, char **, char **) = dfsan_init;
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.h (revision 351984)
@@ -0,0 +1,72 @@
+//===-- dfsan.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// Private DFSan header.
+//===----------------------------------------------------------------------===//
+
+#ifndef DFSAN_H
+#define DFSAN_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "dfsan_platform.h"
+
+using __sanitizer::uptr;
+using __sanitizer::u16;
+
+// Copy declarations from public sanitizer/dfsan_interface.h header here.
+typedef u16 dfsan_label;
+
+struct dfsan_label_info {
+ dfsan_label l1;
+ dfsan_label l2;
+ const char *desc;
+ void *userdata;
+};
+
+extern "C" {
+void dfsan_add_label(dfsan_label label, void *addr, uptr size);
+void dfsan_set_label(dfsan_label label, void *addr, uptr size);
+dfsan_label dfsan_read_label(const void *addr, uptr size);
+dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
+} // extern "C"
+
+template <typename T>
+void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
+ dfsan_set_label(label, (void *)&data, sizeof(T));
+}
+
+namespace __dfsan {
+
+void InitializeInterceptors();
+
+inline dfsan_label *shadow_for(void *ptr) {
+ return (dfsan_label *) ((((uptr) ptr) & ShadowMask()) << 1);
+}
+
+inline const dfsan_label *shadow_for(const void *ptr) {
+ return shadow_for(const_cast<void *>(ptr));
+}
+
+struct Flags {
+#define DFSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "dfsan_flags.inc"
+#undef DFSAN_FLAG
+
+ void SetDefaults();
+};
+
+extern Flags flags_data;
+inline Flags &flags() {
+ return flags_data;
+}
+
+} // namespace __dfsan
+
+#endif // DFSAN_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_custom.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_custom.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_custom.cc (revision 351984)
@@ -0,0 +1,1156 @@
+//===-- dfsan.cc ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// This file defines the custom functions listed in done_abilist.txt.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_linux.h"
+
+#include "dfsan/dfsan.h"
+
+#include <arpa/inet.h>
+#include <assert.h>
+#include <ctype.h>
+#include <dlfcn.h>
+#include <link.h>
+#include <poll.h>
+#include <pthread.h>
+#include <pwd.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/select.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+using namespace __dfsan;
+
+#define CALL_WEAK_INTERCEPTOR_HOOK(f, ...) \
+ do { \
+ if (f) \
+ f(__VA_ARGS__); \
+ } while (false)
+#define DECLARE_WEAK_INTERCEPTOR_HOOK(f, ...) \
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE void f(__VA_ARGS__);
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE int
+__dfsw_stat(const char *path, struct stat *buf, dfsan_label path_label,
+ dfsan_label buf_label, dfsan_label *ret_label) {
+ int ret = stat(path, buf);
+ if (ret == 0)
+ dfsan_set_label(0, buf, sizeof(struct stat));
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_fstat(int fd, struct stat *buf,
+ dfsan_label fd_label,
+ dfsan_label buf_label,
+ dfsan_label *ret_label) {
+ int ret = fstat(fd, buf);
+ if (ret == 0)
+ dfsan_set_label(0, buf, sizeof(struct stat));
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strchr(const char *s, int c,
+ dfsan_label s_label,
+ dfsan_label c_label,
+ dfsan_label *ret_label) {
+ for (size_t i = 0;; ++i) {
+ if (s[i] == c || s[i] == 0) {
+ if (flags().strict_data_dependencies) {
+ *ret_label = s_label;
+ } else {
+ *ret_label = dfsan_union(dfsan_read_label(s, i + 1),
+ dfsan_union(s_label, c_label));
+ }
+ return s[i] == 0 ? nullptr : const_cast<char *>(s+i);
+ }
+ }
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, uptr caller_pc,
+ const void *s1, const void *s2, size_t n,
+ dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_label n_label)
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_memcmp(const void *s1, const void *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label,
+ dfsan_label n_label,
+ dfsan_label *ret_label) {
+ CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_memcmp, GET_CALLER_PC(), s1, s2, n,
+ s1_label, s2_label, n_label);
+ const char *cs1 = (const char *) s1, *cs2 = (const char *) s2;
+ for (size_t i = 0; i != n; ++i) {
+ if (cs1[i] != cs2[i]) {
+ if (flags().strict_data_dependencies) {
+ *ret_label = 0;
+ } else {
+ *ret_label = dfsan_union(dfsan_read_label(cs1, i + 1),
+ dfsan_read_label(cs2, i + 1));
+ }
+ return cs1[i] - cs2[i];
+ }
+ }
+
+ if (flags().strict_data_dependencies) {
+ *ret_label = 0;
+ } else {
+ *ret_label = dfsan_union(dfsan_read_label(cs1, n),
+ dfsan_read_label(cs2, n));
+ }
+ return 0;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, uptr caller_pc,
+ const char *s1, const char *s2,
+ dfsan_label s1_label, dfsan_label s2_label)
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strcmp(const char *s1, const char *s2,
+ dfsan_label s1_label,
+ dfsan_label s2_label,
+ dfsan_label *ret_label) {
+ CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strcmp, GET_CALLER_PC(), s1, s2,
+ s1_label, s2_label);
+ for (size_t i = 0;; ++i) {
+ if (s1[i] != s2[i] || s1[i] == 0 || s2[i] == 0) {
+ if (flags().strict_data_dependencies) {
+ *ret_label = 0;
+ } else {
+ *ret_label = dfsan_union(dfsan_read_label(s1, i + 1),
+ dfsan_read_label(s2, i + 1));
+ }
+ return s1[i] - s2[i];
+ }
+ }
+ return 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int
+__dfsw_strcasecmp(const char *s1, const char *s2, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label *ret_label) {
+ for (size_t i = 0;; ++i) {
+ if (tolower(s1[i]) != tolower(s2[i]) || s1[i] == 0 || s2[i] == 0) {
+ if (flags().strict_data_dependencies) {
+ *ret_label = 0;
+ } else {
+ *ret_label = dfsan_union(dfsan_read_label(s1, i + 1),
+ dfsan_read_label(s2, i + 1));
+ }
+ return s1[i] - s2[i];
+ }
+ }
+ return 0;
+}
+
+DECLARE_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, uptr caller_pc,
+ const char *s1, const char *s2, size_t n,
+ dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_label n_label)
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_strncmp(const char *s1, const char *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label,
+ dfsan_label n_label,
+ dfsan_label *ret_label) {
+ if (n == 0) {
+ *ret_label = 0;
+ return 0;
+ }
+
+ CALL_WEAK_INTERCEPTOR_HOOK(dfsan_weak_hook_strncmp, GET_CALLER_PC(), s1, s2,
+ n, s1_label, s2_label, n_label);
+
+ for (size_t i = 0;; ++i) {
+ if (s1[i] != s2[i] || s1[i] == 0 || s2[i] == 0 || i == n - 1) {
+ if (flags().strict_data_dependencies) {
+ *ret_label = 0;
+ } else {
+ *ret_label = dfsan_union(dfsan_read_label(s1, i + 1),
+ dfsan_read_label(s2, i + 1));
+ }
+ return s1[i] - s2[i];
+ }
+ }
+ return 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int
+__dfsw_strncasecmp(const char *s1, const char *s2, size_t n,
+ dfsan_label s1_label, dfsan_label s2_label,
+ dfsan_label n_label, dfsan_label *ret_label) {
+ if (n == 0) {
+ *ret_label = 0;
+ return 0;
+ }
+
+ for (size_t i = 0;; ++i) {
+ if (tolower(s1[i]) != tolower(s2[i]) || s1[i] == 0 || s2[i] == 0 ||
+ i == n - 1) {
+ if (flags().strict_data_dependencies) {
+ *ret_label = 0;
+ } else {
+ *ret_label = dfsan_union(dfsan_read_label(s1, i + 1),
+ dfsan_read_label(s2, i + 1));
+ }
+ return s1[i] - s2[i];
+ }
+ }
+ return 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_calloc(size_t nmemb, size_t size,
+ dfsan_label nmemb_label,
+ dfsan_label size_label,
+ dfsan_label *ret_label) {
+ void *p = calloc(nmemb, size);
+ dfsan_set_label(0, p, nmemb * size);
+ *ret_label = 0;
+ return p;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE size_t
+__dfsw_strlen(const char *s, dfsan_label s_label, dfsan_label *ret_label) {
+ size_t ret = strlen(s);
+ if (flags().strict_data_dependencies) {
+ *ret_label = 0;
+ } else {
+ *ret_label = dfsan_read_label(s, ret + 1);
+ }
+ return ret;
+}
+
+
+static void *dfsan_memcpy(void *dest, const void *src, size_t n) {
+ dfsan_label *sdest = shadow_for(dest);
+ const dfsan_label *ssrc = shadow_for(src);
+ internal_memcpy((void *)sdest, (const void *)ssrc, n * sizeof(dfsan_label));
+ return internal_memcpy(dest, src, n);
+}
+
+static void dfsan_memset(void *s, int c, dfsan_label c_label, size_t n) {
+ internal_memset(s, c, n);
+ dfsan_set_label(c_label, s, n);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__dfsw_memcpy(void *dest, const void *src, size_t n,
+ dfsan_label dest_label, dfsan_label src_label,
+ dfsan_label n_label, dfsan_label *ret_label) {
+ *ret_label = dest_label;
+ return dfsan_memcpy(dest, src, n);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__dfsw_memset(void *s, int c, size_t n,
+ dfsan_label s_label, dfsan_label c_label,
+ dfsan_label n_label, dfsan_label *ret_label) {
+ dfsan_memset(s, c, c_label, n);
+ *ret_label = s_label;
+ return s;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *
+__dfsw_strdup(const char *s, dfsan_label s_label, dfsan_label *ret_label) {
+ size_t len = strlen(s);
+ void *p = malloc(len+1);
+ dfsan_memcpy(p, s, len+1);
+ *ret_label = 0;
+ return static_cast<char *>(p);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *
+__dfsw_strncpy(char *s1, const char *s2, size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label,
+ dfsan_label *ret_label) {
+ size_t len = strlen(s2);
+ if (len < n) {
+ dfsan_memcpy(s1, s2, len+1);
+ dfsan_memset(s1+len+1, 0, 0, n-len-1);
+ } else {
+ dfsan_memcpy(s1, s2, n);
+ }
+
+ *ret_label = s1_label;
+ return s1;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE ssize_t
+__dfsw_pread(int fd, void *buf, size_t count, off_t offset,
+ dfsan_label fd_label, dfsan_label buf_label,
+ dfsan_label count_label, dfsan_label offset_label,
+ dfsan_label *ret_label) {
+ ssize_t ret = pread(fd, buf, count, offset);
+ if (ret > 0)
+ dfsan_set_label(0, buf, ret);
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE ssize_t
+__dfsw_read(int fd, void *buf, size_t count,
+ dfsan_label fd_label, dfsan_label buf_label,
+ dfsan_label count_label,
+ dfsan_label *ret_label) {
+ ssize_t ret = read(fd, buf, count);
+ if (ret > 0)
+ dfsan_set_label(0, buf, ret);
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_clock_gettime(clockid_t clk_id,
+ struct timespec *tp,
+ dfsan_label clk_id_label,
+ dfsan_label tp_label,
+ dfsan_label *ret_label) {
+ int ret = clock_gettime(clk_id, tp);
+ if (ret == 0)
+ dfsan_set_label(0, tp, sizeof(struct timespec));
+ *ret_label = 0;
+ return ret;
+}
+
+static void unpoison(const void *ptr, uptr size) {
+ dfsan_set_label(0, const_cast<void *>(ptr), size);
+}
+
+// dlopen() ultimately calls mmap() down inside the loader, which generally
+// doesn't participate in dynamic symbol resolution. Therefore we won't
+// intercept its calls to mmap, and we have to hook it here.
+SANITIZER_INTERFACE_ATTRIBUTE void *
+__dfsw_dlopen(const char *filename, int flag, dfsan_label filename_label,
+ dfsan_label flag_label, dfsan_label *ret_label) {
+ void *handle = dlopen(filename, flag);
+ link_map *map = GET_LINK_MAP_BY_DLOPEN_HANDLE(handle);
+ if (map)
+ ForEachMappedRegion(map, unpoison);
+ *ret_label = 0;
+ return handle;
+}
+
+struct pthread_create_info {
+ void *(*start_routine_trampoline)(void *, void *, dfsan_label, dfsan_label *);
+ void *start_routine;
+ void *arg;
+};
+
+static void *pthread_create_cb(void *p) {
+ pthread_create_info pci(*(pthread_create_info *)p);
+ free(p);
+ dfsan_label ret_label;
+ return pci.start_routine_trampoline(pci.start_routine, pci.arg, 0,
+ &ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_pthread_create(
+ pthread_t *thread, const pthread_attr_t *attr,
+ void *(*start_routine_trampoline)(void *, void *, dfsan_label,
+ dfsan_label *),
+ void *start_routine, void *arg, dfsan_label thread_label,
+ dfsan_label attr_label, dfsan_label start_routine_label,
+ dfsan_label arg_label, dfsan_label *ret_label) {
+ pthread_create_info *pci =
+ (pthread_create_info *)malloc(sizeof(pthread_create_info));
+ pci->start_routine_trampoline = start_routine_trampoline;
+ pci->start_routine = start_routine;
+ pci->arg = arg;
+ int rv = pthread_create(thread, attr, pthread_create_cb, (void *)pci);
+ if (rv != 0)
+ free(pci);
+ *ret_label = 0;
+ return rv;
+}
+
+struct dl_iterate_phdr_info {
+ int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
+ size_t size, void *data, dfsan_label info_label,
+ dfsan_label size_label, dfsan_label data_label,
+ dfsan_label *ret_label);
+ void *callback;
+ void *data;
+};
+
+int dl_iterate_phdr_cb(struct dl_phdr_info *info, size_t size, void *data) {
+ dl_iterate_phdr_info *dipi = (dl_iterate_phdr_info *)data;
+ dfsan_set_label(0, *info);
+ dfsan_set_label(0, const_cast<char *>(info->dlpi_name),
+ strlen(info->dlpi_name) + 1);
+ dfsan_set_label(
+ 0, const_cast<char *>(reinterpret_cast<const char *>(info->dlpi_phdr)),
+ sizeof(*info->dlpi_phdr) * info->dlpi_phnum);
+ dfsan_label ret_label;
+ return dipi->callback_trampoline(dipi->callback, info, size, dipi->data, 0, 0,
+ 0, &ret_label);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_dl_iterate_phdr(
+ int (*callback_trampoline)(void *callback, struct dl_phdr_info *info,
+ size_t size, void *data, dfsan_label info_label,
+ dfsan_label size_label, dfsan_label data_label,
+ dfsan_label *ret_label),
+ void *callback, void *data, dfsan_label callback_label,
+ dfsan_label data_label, dfsan_label *ret_label) {
+ dl_iterate_phdr_info dipi = { callback_trampoline, callback, data };
+ *ret_label = 0;
+ return dl_iterate_phdr(dl_iterate_phdr_cb, &dipi);
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+char *__dfsw_ctime_r(const time_t *timep, char *buf, dfsan_label timep_label,
+ dfsan_label buf_label, dfsan_label *ret_label) {
+ char *ret = ctime_r(timep, buf);
+ if (ret) {
+ dfsan_set_label(dfsan_read_label(timep, sizeof(time_t)), buf,
+ strlen(buf) + 1);
+ *ret_label = buf_label;
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+char *__dfsw_fgets(char *s, int size, FILE *stream, dfsan_label s_label,
+ dfsan_label size_label, dfsan_label stream_label,
+ dfsan_label *ret_label) {
+ char *ret = fgets(s, size, stream);
+ if (ret) {
+ dfsan_set_label(0, ret, strlen(ret) + 1);
+ *ret_label = s_label;
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+char *__dfsw_getcwd(char *buf, size_t size, dfsan_label buf_label,
+ dfsan_label size_label, dfsan_label *ret_label) {
+ char *ret = getcwd(buf, size);
+ if (ret) {
+ dfsan_set_label(0, ret, strlen(ret) + 1);
+ *ret_label = buf_label;
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+char *__dfsw_get_current_dir_name(dfsan_label *ret_label) {
+ char *ret = get_current_dir_name();
+ if (ret) {
+ dfsan_set_label(0, ret, strlen(ret) + 1);
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_gethostname(char *name, size_t len, dfsan_label name_label,
+ dfsan_label len_label, dfsan_label *ret_label) {
+ int ret = gethostname(name, len);
+ if (ret == 0) {
+ dfsan_set_label(0, name, strlen(name) + 1);
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_getrlimit(int resource, struct rlimit *rlim,
+ dfsan_label resource_label, dfsan_label rlim_label,
+ dfsan_label *ret_label) {
+ int ret = getrlimit(resource, rlim);
+ if (ret == 0) {
+ dfsan_set_label(0, rlim, sizeof(struct rlimit));
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_getrusage(int who, struct rusage *usage, dfsan_label who_label,
+ dfsan_label usage_label, dfsan_label *ret_label) {
+ int ret = getrusage(who, usage);
+ if (ret == 0) {
+ dfsan_set_label(0, usage, sizeof(struct rusage));
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+char *__dfsw_strcpy(char *dest, const char *src, dfsan_label dst_label,
+ dfsan_label src_label, dfsan_label *ret_label) {
+ char *ret = strcpy(dest, src);
+ if (ret) {
+ internal_memcpy(shadow_for(dest), shadow_for(src),
+ sizeof(dfsan_label) * (strlen(src) + 1));
+ }
+ *ret_label = dst_label;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+long int __dfsw_strtol(const char *nptr, char **endptr, int base,
+ dfsan_label nptr_label, dfsan_label endptr_label,
+ dfsan_label base_label, dfsan_label *ret_label) {
+ char *tmp_endptr;
+ long int ret = strtol(nptr, &tmp_endptr, base);
+ if (endptr) {
+ *endptr = tmp_endptr;
+ }
+ if (tmp_endptr > nptr) {
+ // If *tmp_endptr is '\0' include its label as well.
+ *ret_label = dfsan_union(
+ base_label,
+ dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1)));
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+double __dfsw_strtod(const char *nptr, char **endptr,
+ dfsan_label nptr_label, dfsan_label endptr_label,
+ dfsan_label *ret_label) {
+ char *tmp_endptr;
+ double ret = strtod(nptr, &tmp_endptr);
+ if (endptr) {
+ *endptr = tmp_endptr;
+ }
+ if (tmp_endptr > nptr) {
+ // If *tmp_endptr is '\0' include its label as well.
+ *ret_label = dfsan_read_label(
+ nptr,
+ tmp_endptr - nptr + (*tmp_endptr ? 0 : 1));
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+long long int __dfsw_strtoll(const char *nptr, char **endptr, int base,
+ dfsan_label nptr_label, dfsan_label endptr_label,
+ dfsan_label base_label, dfsan_label *ret_label) {
+ char *tmp_endptr;
+ long long int ret = strtoll(nptr, &tmp_endptr, base);
+ if (endptr) {
+ *endptr = tmp_endptr;
+ }
+ if (tmp_endptr > nptr) {
+ // If *tmp_endptr is '\0' include its label as well.
+ *ret_label = dfsan_union(
+ base_label,
+ dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1)));
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+unsigned long int __dfsw_strtoul(const char *nptr, char **endptr, int base,
+ dfsan_label nptr_label, dfsan_label endptr_label,
+ dfsan_label base_label, dfsan_label *ret_label) {
+ char *tmp_endptr;
+ unsigned long int ret = strtoul(nptr, &tmp_endptr, base);
+ if (endptr) {
+ *endptr = tmp_endptr;
+ }
+ if (tmp_endptr > nptr) {
+ // If *tmp_endptr is '\0' include its label as well.
+ *ret_label = dfsan_union(
+ base_label,
+ dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1)));
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+long long unsigned int __dfsw_strtoull(const char *nptr, char **endptr,
+ dfsan_label nptr_label,
+ int base, dfsan_label endptr_label,
+ dfsan_label base_label,
+ dfsan_label *ret_label) {
+ char *tmp_endptr;
+ long long unsigned int ret = strtoull(nptr, &tmp_endptr, base);
+ if (endptr) {
+ *endptr = tmp_endptr;
+ }
+ if (tmp_endptr > nptr) {
+ // If *tmp_endptr is '\0' include its label as well.
+ *ret_label = dfsan_union(
+ base_label,
+ dfsan_read_label(nptr, tmp_endptr - nptr + (*tmp_endptr ? 0 : 1)));
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+time_t __dfsw_time(time_t *t, dfsan_label t_label, dfsan_label *ret_label) {
+ time_t ret = time(t);
+ if (ret != (time_t) -1 && t) {
+ dfsan_set_label(0, t, sizeof(time_t));
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_inet_pton(int af, const char *src, void *dst, dfsan_label af_label,
+ dfsan_label src_label, dfsan_label dst_label,
+ dfsan_label *ret_label) {
+ int ret = inet_pton(af, src, dst);
+ if (ret == 1) {
+ dfsan_set_label(dfsan_read_label(src, strlen(src) + 1), dst,
+ af == AF_INET ? sizeof(struct in_addr) : sizeof(in6_addr));
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+struct tm *__dfsw_localtime_r(const time_t *timep, struct tm *result,
+ dfsan_label timep_label, dfsan_label result_label,
+ dfsan_label *ret_label) {
+ struct tm *ret = localtime_r(timep, result);
+ if (ret) {
+ dfsan_set_label(dfsan_read_label(timep, sizeof(time_t)), result,
+ sizeof(struct tm));
+ *ret_label = result_label;
+ } else {
+ *ret_label = 0;
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_getpwuid_r(id_t uid, struct passwd *pwd,
+ char *buf, size_t buflen, struct passwd **result,
+ dfsan_label uid_label, dfsan_label pwd_label,
+ dfsan_label buf_label, dfsan_label buflen_label,
+ dfsan_label result_label, dfsan_label *ret_label) {
+ // Store the data in pwd, the strings referenced from pwd in buf, and the
+ // address of pwd in *result. On failure, NULL is stored in *result.
+ int ret = getpwuid_r(uid, pwd, buf, buflen, result);
+ if (ret == 0) {
+ dfsan_set_label(0, pwd, sizeof(struct passwd));
+ dfsan_set_label(0, buf, strlen(buf) + 1);
+ }
+ *ret_label = 0;
+ dfsan_set_label(0, result, sizeof(struct passwd*));
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_poll(struct pollfd *fds, nfds_t nfds, int timeout,
+ dfsan_label dfs_label, dfsan_label nfds_label,
+ dfsan_label timeout_label, dfsan_label *ret_label) {
+ int ret = poll(fds, nfds, timeout);
+ if (ret >= 0) {
+ for (; nfds > 0; --nfds) {
+ dfsan_set_label(0, &fds[nfds - 1].revents, sizeof(fds[nfds - 1].revents));
+ }
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_select(int nfds, fd_set *readfds, fd_set *writefds,
+ fd_set *exceptfds, struct timeval *timeout,
+ dfsan_label nfds_label, dfsan_label readfds_label,
+ dfsan_label writefds_label, dfsan_label exceptfds_label,
+ dfsan_label timeout_label, dfsan_label *ret_label) {
+ int ret = select(nfds, readfds, writefds, exceptfds, timeout);
+ // Clear everything (also on error) since their content is either set or
+ // undefined.
+ if (readfds) {
+ dfsan_set_label(0, readfds, sizeof(fd_set));
+ }
+ if (writefds) {
+ dfsan_set_label(0, writefds, sizeof(fd_set));
+ }
+ if (exceptfds) {
+ dfsan_set_label(0, exceptfds, sizeof(fd_set));
+ }
+ dfsan_set_label(0, timeout, sizeof(struct timeval));
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_sched_getaffinity(pid_t pid, size_t cpusetsize, cpu_set_t *mask,
+ dfsan_label pid_label,
+ dfsan_label cpusetsize_label,
+ dfsan_label mask_label, dfsan_label *ret_label) {
+ int ret = sched_getaffinity(pid, cpusetsize, mask);
+ if (ret == 0) {
+ dfsan_set_label(0, mask, cpusetsize);
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_sigemptyset(sigset_t *set, dfsan_label set_label,
+ dfsan_label *ret_label) {
+ int ret = sigemptyset(set);
+ dfsan_set_label(0, set, sizeof(sigset_t));
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_sigaction(int signum, const struct sigaction *act,
+ struct sigaction *oldact, dfsan_label signum_label,
+ dfsan_label act_label, dfsan_label oldact_label,
+ dfsan_label *ret_label) {
+ int ret = sigaction(signum, act, oldact);
+ if (oldact) {
+ dfsan_set_label(0, oldact, sizeof(struct sigaction));
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_gettimeofday(struct timeval *tv, struct timezone *tz,
+ dfsan_label tv_label, dfsan_label tz_label,
+ dfsan_label *ret_label) {
+ int ret = gettimeofday(tv, tz);
+ if (tv) {
+ dfsan_set_label(0, tv, sizeof(struct timeval));
+ }
+ if (tz) {
+ dfsan_set_label(0, tz, sizeof(struct timezone));
+ }
+ *ret_label = 0;
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_memchr(void *s, int c, size_t n,
+ dfsan_label s_label,
+ dfsan_label c_label,
+ dfsan_label n_label,
+ dfsan_label *ret_label) {
+ void *ret = memchr(s, c, n);
+ if (flags().strict_data_dependencies) {
+ *ret_label = ret ? s_label : 0;
+ } else {
+ size_t len =
+ ret ? reinterpret_cast<char *>(ret) - reinterpret_cast<char *>(s) + 1
+ : n;
+ *ret_label =
+ dfsan_union(dfsan_read_label(s, len), dfsan_union(s_label, c_label));
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strrchr(char *s, int c,
+ dfsan_label s_label,
+ dfsan_label c_label,
+ dfsan_label *ret_label) {
+ char *ret = strrchr(s, c);
+ if (flags().strict_data_dependencies) {
+ *ret_label = ret ? s_label : 0;
+ } else {
+ *ret_label =
+ dfsan_union(dfsan_read_label(s, strlen(s) + 1),
+ dfsan_union(s_label, c_label));
+ }
+
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE char *__dfsw_strstr(char *haystack, char *needle,
+ dfsan_label haystack_label,
+ dfsan_label needle_label,
+ dfsan_label *ret_label) {
+ char *ret = strstr(haystack, needle);
+ if (flags().strict_data_dependencies) {
+ *ret_label = ret ? haystack_label : 0;
+ } else {
+ size_t len = ret ? ret + strlen(needle) - haystack : strlen(haystack) + 1;
+ *ret_label =
+ dfsan_union(dfsan_read_label(haystack, len),
+ dfsan_union(dfsan_read_label(needle, strlen(needle) + 1),
+ dfsan_union(haystack_label, needle_label)));
+ }
+
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int __dfsw_nanosleep(const struct timespec *req,
+ struct timespec *rem,
+ dfsan_label req_label,
+ dfsan_label rem_label,
+ dfsan_label *ret_label) {
+ int ret = nanosleep(req, rem);
+ *ret_label = 0;
+ if (ret == -1) {
+ // Interrupted by a signal, rem is filled with the remaining time.
+ dfsan_set_label(0, rem, sizeof(struct timespec));
+ }
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int
+__dfsw_socketpair(int domain, int type, int protocol, int sv[2],
+ dfsan_label domain_label, dfsan_label type_label,
+ dfsan_label protocol_label, dfsan_label sv_label,
+ dfsan_label *ret_label) {
+ int ret = socketpair(domain, type, protocol, sv);
+ *ret_label = 0;
+ if (ret == 0) {
+ dfsan_set_label(0, sv, sizeof(*sv) * 2);
+ }
+ return ret;
+}
+
+// Type of the trampoline function passed to the custom version of
+// dfsan_set_write_callback.
+typedef void (*write_trampoline_t)(
+ void *callback,
+ int fd, const void *buf, ssize_t count,
+ dfsan_label fd_label, dfsan_label buf_label, dfsan_label count_label);
+
+// Calls to dfsan_set_write_callback() set the values in this struct.
+// Calls to the custom version of write() read (and invoke) them.
+static struct {
+ write_trampoline_t write_callback_trampoline = nullptr;
+ void *write_callback = nullptr;
+} write_callback_info;
+
+SANITIZER_INTERFACE_ATTRIBUTE void
+__dfsw_dfsan_set_write_callback(
+ write_trampoline_t write_callback_trampoline,
+ void *write_callback,
+ dfsan_label write_callback_label,
+ dfsan_label *ret_label) {
+ write_callback_info.write_callback_trampoline = write_callback_trampoline;
+ write_callback_info.write_callback = write_callback;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE int
+__dfsw_write(int fd, const void *buf, size_t count,
+ dfsan_label fd_label, dfsan_label buf_label,
+ dfsan_label count_label, dfsan_label *ret_label) {
+ if (write_callback_info.write_callback) {
+ write_callback_info.write_callback_trampoline(
+ write_callback_info.write_callback,
+ fd, buf, count,
+ fd_label, buf_label, count_label);
+ }
+
+ *ret_label = 0;
+ return write(fd, buf, count);
+}
+} // namespace __dfsan
+
+// Type used to extract a dfsan_label with va_arg()
+typedef int dfsan_label_va;
+
+// Formats a chunk either a constant string or a single format directive (e.g.,
+// '%.3f').
+struct Formatter {
+ Formatter(char *str_, const char *fmt_, size_t size_)
+ : str(str_), str_off(0), size(size_), fmt_start(fmt_), fmt_cur(fmt_),
+ width(-1) {}
+
+ int format() {
+ char *tmp_fmt = build_format_string();
+ int retval =
+ snprintf(str + str_off, str_off < size ? size - str_off : 0, tmp_fmt,
+ 0 /* used only to avoid warnings */);
+ free(tmp_fmt);
+ return retval;
+ }
+
+ template <typename T> int format(T arg) {
+ char *tmp_fmt = build_format_string();
+ int retval;
+ if (width >= 0) {
+ retval = snprintf(str + str_off, str_off < size ? size - str_off : 0,
+ tmp_fmt, width, arg);
+ } else {
+ retval = snprintf(str + str_off, str_off < size ? size - str_off : 0,
+ tmp_fmt, arg);
+ }
+ free(tmp_fmt);
+ return retval;
+ }
+
+ char *build_format_string() {
+ size_t fmt_size = fmt_cur - fmt_start + 1;
+ char *new_fmt = (char *)malloc(fmt_size + 1);
+ assert(new_fmt);
+ internal_memcpy(new_fmt, fmt_start, fmt_size);
+ new_fmt[fmt_size] = '\0';
+ return new_fmt;
+ }
+
+ char *str_cur() { return str + str_off; }
+
+ size_t num_written_bytes(int retval) {
+ if (retval < 0) {
+ return 0;
+ }
+
+ size_t num_avail = str_off < size ? size - str_off : 0;
+ if (num_avail == 0) {
+ return 0;
+ }
+
+ size_t num_written = retval;
+ // A return value of {v,}snprintf of size or more means that the output was
+ // truncated.
+ if (num_written >= num_avail) {
+ num_written -= num_avail;
+ }
+
+ return num_written;
+ }
+
+ char *str;
+ size_t str_off;
+ size_t size;
+ const char *fmt_start;
+ const char *fmt_cur;
+ int width;
+};
+
+// Formats the input and propagates the input labels to the output. The output
+// is stored in 'str'. 'size' bounds the number of output bytes. 'format' and
+// 'ap' are the format string and the list of arguments for formatting. Returns
+// the return value vsnprintf would return.
+//
+// The function tokenizes the format string in chunks representing either a
+// constant string or a single format directive (e.g., '%.3f') and formats each
+// chunk independently into the output string. This approach allows to figure
+// out which bytes of the output string depends on which argument and thus to
+// propagate labels more precisely.
+//
+// WARNING: This implementation does not support conversion specifiers with
+// positional arguments.
+static int format_buffer(char *str, size_t size, const char *fmt,
+ dfsan_label *va_labels, dfsan_label *ret_label,
+ va_list ap) {
+ Formatter formatter(str, fmt, size);
+
+ while (*formatter.fmt_cur) {
+ formatter.fmt_start = formatter.fmt_cur;
+ formatter.width = -1;
+ int retval = 0;
+
+ if (*formatter.fmt_cur != '%') {
+ // Ordinary character. Consume all the characters until a '%' or the end
+ // of the string.
+ for (; *(formatter.fmt_cur + 1) && *(formatter.fmt_cur + 1) != '%';
+ ++formatter.fmt_cur) {}
+ retval = formatter.format();
+ dfsan_set_label(0, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ } else {
+ // Conversion directive. Consume all the characters until a conversion
+ // specifier or the end of the string.
+ bool end_fmt = false;
+ for (; *formatter.fmt_cur && !end_fmt; ) {
+ switch (*++formatter.fmt_cur) {
+ case 'd':
+ case 'i':
+ case 'o':
+ case 'u':
+ case 'x':
+ case 'X':
+ switch (*(formatter.fmt_cur - 1)) {
+ case 'h':
+ // Also covers the 'hh' case (since the size of the arg is still
+ // an int).
+ retval = formatter.format(va_arg(ap, int));
+ break;
+ case 'l':
+ if (formatter.fmt_cur - formatter.fmt_start >= 2 &&
+ *(formatter.fmt_cur - 2) == 'l') {
+ retval = formatter.format(va_arg(ap, long long int));
+ } else {
+ retval = formatter.format(va_arg(ap, long int));
+ }
+ break;
+ case 'q':
+ retval = formatter.format(va_arg(ap, long long int));
+ break;
+ case 'j':
+ retval = formatter.format(va_arg(ap, intmax_t));
+ break;
+ case 'z':
+ case 't':
+ retval = formatter.format(va_arg(ap, size_t));
+ break;
+ default:
+ retval = formatter.format(va_arg(ap, int));
+ }
+ dfsan_set_label(*va_labels++, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ end_fmt = true;
+ break;
+
+ case 'a':
+ case 'A':
+ case 'e':
+ case 'E':
+ case 'f':
+ case 'F':
+ case 'g':
+ case 'G':
+ if (*(formatter.fmt_cur - 1) == 'L') {
+ retval = formatter.format(va_arg(ap, long double));
+ } else {
+ retval = formatter.format(va_arg(ap, double));
+ }
+ dfsan_set_label(*va_labels++, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ end_fmt = true;
+ break;
+
+ case 'c':
+ retval = formatter.format(va_arg(ap, int));
+ dfsan_set_label(*va_labels++, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ end_fmt = true;
+ break;
+
+ case 's': {
+ char *arg = va_arg(ap, char *);
+ retval = formatter.format(arg);
+ va_labels++;
+ internal_memcpy(shadow_for(formatter.str_cur()), shadow_for(arg),
+ sizeof(dfsan_label) *
+ formatter.num_written_bytes(retval));
+ end_fmt = true;
+ break;
+ }
+
+ case 'p':
+ retval = formatter.format(va_arg(ap, void *));
+ dfsan_set_label(*va_labels++, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ end_fmt = true;
+ break;
+
+ case 'n': {
+ int *ptr = va_arg(ap, int *);
+ *ptr = (int)formatter.str_off;
+ va_labels++;
+ dfsan_set_label(0, ptr, sizeof(ptr));
+ end_fmt = true;
+ break;
+ }
+
+ case '%':
+ retval = formatter.format();
+ dfsan_set_label(0, formatter.str_cur(),
+ formatter.num_written_bytes(retval));
+ end_fmt = true;
+ break;
+
+ case '*':
+ formatter.width = va_arg(ap, int);
+ va_labels++;
+ break;
+
+ default:
+ break;
+ }
+ }
+ }
+
+ if (retval < 0) {
+ return retval;
+ }
+
+ formatter.fmt_cur++;
+ formatter.str_off += retval;
+ }
+
+ *ret_label = 0;
+
+ // Number of bytes written in total.
+ return formatter.str_off;
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_sprintf(char *str, const char *format, dfsan_label str_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, ...) {
+ va_list ap;
+ va_start(ap, ret_label);
+ int ret = format_buffer(str, ~0ul, format, va_labels, ret_label, ap);
+ va_end(ap);
+ return ret;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __dfsw_snprintf(char *str, size_t size, const char *format,
+ dfsan_label str_label, dfsan_label size_label,
+ dfsan_label format_label, dfsan_label *va_labels,
+ dfsan_label *ret_label, ...) {
+ va_list ap;
+ va_start(ap, ret_label);
+ int ret = format_buffer(str, size, format, va_labels, ret_label, ap);
+ va_end(ap);
+ return ret;
+}
+
+// Default empty implementations (weak). Users should redefine them.
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard, u32 *) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_guard_init, u32 *,
+ u32 *) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_pcs_init, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_cov_trace_pc_indir, void) {}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp1, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp2, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp4, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_cmp8, void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp1,
+ void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp2,
+ void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp4,
+ void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_const_cmp8,
+ void) {}
+SANITIZER_INTERFACE_WEAK_DEF(void, __dfsw___sanitizer_cov_trace_switch, void) {}
+} // extern "C"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_custom.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_flags.inc (revision 351984)
@@ -0,0 +1,35 @@
+//===-- dfsan_flags.inc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// DFSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef DFSAN_FLAG
+# error "Define DFSAN_FLAG prior to including this file!"
+#endif
+
+// DFSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+DFSAN_FLAG(bool, warn_unimplemented, true,
+ "Whether to warn on unimplemented functions.")
+DFSAN_FLAG(bool, warn_nonzero_labels, false,
+ "Whether to warn on unimplemented functions.")
+DFSAN_FLAG(
+ bool, strict_data_dependencies, true,
+ "Whether to propagate labels only when there is an obvious data dependency"
+ "(e.g., when comparing strings, ignore the fact that the output of the"
+ "comparison might be data-dependent on the content of the strings). This"
+ "applies only to the custom functions defined in 'custom.c'.")
+DFSAN_FLAG(const char *, dump_labels_at_exit, "", "The path of the file where "
+ "to dump the labels when the "
+ "program terminates.")
+DFSAN_FLAG(bool, fast16labels, false,
+ "Enables experimental mode where DFSan supports only 16 power-of-2 labels "
+ "(1, 2, 4, 8, ... 32768) and the label union is computed as a bit-wise OR."
+)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_interceptors.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_interceptors.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_interceptors.cc (revision 351984)
@@ -0,0 +1,45 @@
+//===-- dfsan_interceptors.cc ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// Interceptors for standard library functions.
+//===----------------------------------------------------------------------===//
+
+#include "dfsan/dfsan.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+using namespace __sanitizer;
+
+INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags,
+ int fd, OFF_T offset) {
+ void *res = REAL(mmap)(addr, length, prot, flags, fd, offset);
+ if (res != (void*)-1)
+ dfsan_set_label(0, res, RoundUpTo(length, GetPageSize()));
+ return res;
+}
+
+INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags,
+ int fd, OFF64_T offset) {
+ void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset);
+ if (res != (void*)-1)
+ dfsan_set_label(0, res, RoundUpTo(length, GetPageSize()));
+ return res;
+}
+
+namespace __dfsan {
+void InitializeInterceptors() {
+ static int inited = 0;
+ CHECK_EQ(inited, 0);
+
+ INTERCEPT_FUNCTION(mmap);
+ INTERCEPT_FUNCTION(mmap64);
+ inited = 1;
+}
+} // namespace __dfsan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_interceptors.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_platform.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_platform.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_platform.h (revision 351984)
@@ -0,0 +1,115 @@
+//===-- dfsan_platform.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// Platform specific information for DFSan.
+//===----------------------------------------------------------------------===//
+
+#ifndef DFSAN_PLATFORM_H
+#define DFSAN_PLATFORM_H
+
+namespace __dfsan {
+
+#if defined(__x86_64__)
+struct Mapping {
+ static const uptr kShadowAddr = 0x10000;
+ static const uptr kUnionTableAddr = 0x200000000000;
+ static const uptr kAppAddr = 0x700000008000;
+ static const uptr kShadowMask = ~0x700000000000;
+};
+#elif defined(__mips64)
+struct Mapping {
+ static const uptr kShadowAddr = 0x10000;
+ static const uptr kUnionTableAddr = 0x2000000000;
+ static const uptr kAppAddr = 0xF000008000;
+ static const uptr kShadowMask = ~0xF000000000;
+};
+#elif defined(__aarch64__)
+struct Mapping39 {
+ static const uptr kShadowAddr = 0x10000;
+ static const uptr kUnionTableAddr = 0x1000000000;
+ static const uptr kAppAddr = 0x7000008000;
+ static const uptr kShadowMask = ~0x7800000000;
+};
+
+struct Mapping42 {
+ static const uptr kShadowAddr = 0x10000;
+ static const uptr kUnionTableAddr = 0x8000000000;
+ static const uptr kAppAddr = 0x3ff00008000;
+ static const uptr kShadowMask = ~0x3c000000000;
+};
+
+struct Mapping48 {
+ static const uptr kShadowAddr = 0x10000;
+ static const uptr kUnionTableAddr = 0x8000000000;
+ static const uptr kAppAddr = 0xffff00008000;
+ static const uptr kShadowMask = ~0xfffff0000000;
+};
+
+extern int vmaSize;
+# define DFSAN_RUNTIME_VMA 1
+#else
+# error "DFSan not supported for this platform!"
+#endif
+
+enum MappingType {
+ MAPPING_SHADOW_ADDR,
+ MAPPING_UNION_TABLE_ADDR,
+ MAPPING_APP_ADDR,
+ MAPPING_SHADOW_MASK
+};
+
+template<typename Mapping, int Type>
+uptr MappingImpl(void) {
+ switch (Type) {
+ case MAPPING_SHADOW_ADDR: return Mapping::kShadowAddr;
+ case MAPPING_UNION_TABLE_ADDR: return Mapping::kUnionTableAddr;
+ case MAPPING_APP_ADDR: return Mapping::kAppAddr;
+ case MAPPING_SHADOW_MASK: return Mapping::kShadowMask;
+ }
+}
+
+template<int Type>
+uptr MappingArchImpl(void) {
+#ifdef __aarch64__
+ switch (vmaSize) {
+ case 39: return MappingImpl<Mapping39, Type>();
+ case 42: return MappingImpl<Mapping42, Type>();
+ case 48: return MappingImpl<Mapping48, Type>();
+ }
+ DCHECK(0);
+ return 0;
+#else
+ return MappingImpl<Mapping, Type>();
+#endif
+}
+
+ALWAYS_INLINE
+uptr ShadowAddr() {
+ return MappingArchImpl<MAPPING_SHADOW_ADDR>();
+}
+
+ALWAYS_INLINE
+uptr UnionTableAddr() {
+ return MappingArchImpl<MAPPING_UNION_TABLE_ADDR>();
+}
+
+ALWAYS_INLINE
+uptr AppAddr() {
+ return MappingArchImpl<MAPPING_APP_ADDR>();
+}
+
+ALWAYS_INLINE
+uptr ShadowMask() {
+ return MappingArchImpl<MAPPING_SHADOW_MASK>();
+}
+
+} // namespace __dfsan
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan_platform.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/done_abilist.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/done_abilist.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/done_abilist.txt (revision 351984)
@@ -0,0 +1,307 @@
+fun:main=uninstrumented
+fun:main=discard
+
+###############################################################################
+# DFSan interface functions
+###############################################################################
+fun:dfsan_union=uninstrumented
+fun:dfsan_union=discard
+fun:dfsan_create_label=uninstrumented
+fun:dfsan_create_label=discard
+fun:dfsan_set_label=uninstrumented
+fun:dfsan_set_label=discard
+fun:dfsan_add_label=uninstrumented
+fun:dfsan_add_label=discard
+fun:dfsan_get_label=uninstrumented
+fun:dfsan_get_label=custom
+fun:dfsan_read_label=uninstrumented
+fun:dfsan_read_label=discard
+fun:dfsan_get_label_count=uninstrumented
+fun:dfsan_get_label_count=discard
+fun:dfsan_get_label_info=uninstrumented
+fun:dfsan_get_label_info=discard
+fun:dfsan_has_label=uninstrumented
+fun:dfsan_has_label=discard
+fun:dfsan_has_label_with_desc=uninstrumented
+fun:dfsan_has_label_with_desc=discard
+fun:dfsan_set_write_callback=uninstrumented
+fun:dfsan_set_write_callback=custom
+fun:dfsan_flush=uninstrumented
+fun:dfsan_flush=discard
+
+###############################################################################
+# glibc
+###############################################################################
+fun:malloc=discard
+fun:realloc=discard
+fun:free=discard
+
+# Functions that return a value that depends on the input, but the output might
+# not be necessarily data-dependent on the input.
+fun:isalpha=functional
+fun:isdigit=functional
+fun:isprint=functional
+fun:isxdigit=functional
+fun:isalnum=functional
+fun:ispunct=functional
+fun:isspace=functional
+fun:tolower=functional
+fun:toupper=functional
+
+# Functions that return a value that is data-dependent on the input.
+fun:btowc=functional
+fun:exp=functional
+fun:exp2=functional
+fun:fabs=functional
+fun:finite=functional
+fun:floor=functional
+fun:fmod=functional
+fun:isinf=functional
+fun:isnan=functional
+fun:log=functional
+fun:modf=functional
+fun:pow=functional
+fun:round=functional
+fun:sqrt=functional
+fun:wctob=functional
+
+# Functions that produce an output that does not depend on the input (shadow is
+# zeroed automatically).
+fun:__assert_fail=discard
+fun:__ctype_b_loc=discard
+fun:__cxa_atexit=discard
+fun:__errno_location=discard
+fun:__newlocale=discard
+fun:__sbrk=discard
+fun:__sigsetjmp=discard
+fun:__uselocale=discard
+fun:__wctype_l=discard
+fun:access=discard
+fun:alarm=discard
+fun:atexit=discard
+fun:bind=discard
+fun:chdir=discard
+fun:close=discard
+fun:closedir=discard
+fun:connect=discard
+fun:dladdr=discard
+fun:dlclose=discard
+fun:fclose=discard
+fun:feof=discard
+fun:ferror=discard
+fun:fflush=discard
+fun:fileno=discard
+fun:fopen=discard
+fun:fprintf=discard
+fun:fputc=discard
+fun:fputc=discard
+fun:fputs=discard
+fun:fputs=discard
+fun:fseek=discard
+fun:ftell=discard
+fun:fwrite=discard
+fun:getenv=discard
+fun:getuid=discard
+fun:geteuid=discard
+fun:getpagesize=discard
+fun:getpid=discard
+fun:kill=discard
+fun:listen=discard
+fun:lseek=discard
+fun:mkdir=discard
+fun:mmap=discard
+fun:munmap=discard
+fun:open=discard
+fun:pipe=discard
+fun:posix_fadvise=discard
+fun:posix_memalign=discard
+fun:prctl=discard
+fun:printf=discard
+fun:pthread_sigmask=discard
+fun:putc=discard
+fun:putchar=discard
+fun:puts=discard
+fun:rand=discard
+fun:random=discard
+fun:remove=discard
+fun:sched_getcpu=discard
+fun:sched_get_priority_max=discard
+fun:sched_setaffinity=discard
+fun:sched_yield=discard
+fun:sem_destroy=discard
+fun:sem_init=discard
+fun:sem_post=discard
+fun:sem_wait=discard
+fun:send=discard
+fun:sendmsg=discard
+fun:sendto=discard
+fun:setsockopt=discard
+fun:shutdown=discard
+fun:sleep=discard
+fun:socket=discard
+fun:strerror=discard
+fun:strspn=discard
+fun:strcspn=discard
+fun:symlink=discard
+fun:syscall=discard
+fun:unlink=discard
+fun:uselocale=discard
+
+# Functions that produce output does not depend on the input (need to zero the
+# shadow manually).
+fun:calloc=custom
+fun:clock_gettime=custom
+fun:dlopen=custom
+fun:fgets=custom
+fun:fstat=custom
+fun:getcwd=custom
+fun:get_current_dir_name=custom
+fun:gethostname=custom
+fun:getrlimit=custom
+fun:getrusage=custom
+fun:nanosleep=custom
+fun:pread=custom
+fun:read=custom
+fun:socketpair=custom
+fun:stat=custom
+fun:time=custom
+
+# Functions that produce an output that depend on the input (propagate the
+# shadow manually).
+fun:ctime_r=custom
+fun:inet_pton=custom
+fun:localtime_r=custom
+fun:memcpy=custom
+fun:memset=custom
+fun:strcpy=custom
+fun:strdup=custom
+fun:strncpy=custom
+fun:strtod=custom
+fun:strtol=custom
+fun:strtoll=custom
+fun:strtoul=custom
+fun:strtoull=custom
+
+# Functions that produce an output that is computed from the input, but is not
+# necessarily data dependent.
+fun:memchr=custom
+fun:memcmp=custom
+fun:strcasecmp=custom
+fun:strchr=custom
+fun:strcmp=custom
+fun:strlen=custom
+fun:strncasecmp=custom
+fun:strncmp=custom
+fun:strrchr=custom
+fun:strstr=custom
+
+# Functions which take action based on global state, such as running a callback
+# set by a sepperate function.
+fun:write=custom
+
+# Functions that take a callback (wrap the callback manually).
+fun:dl_iterate_phdr=custom
+
+fun:getpwuid_r=custom
+fun:poll=custom
+fun:sched_getaffinity=custom
+fun:select=custom
+fun:sigemptyset=custom
+fun:sigaction=custom
+fun:gettimeofday=custom
+
+# sprintf-like
+fun:sprintf=custom
+fun:snprintf=custom
+
+# TODO: custom
+fun:asprintf=discard
+fun:qsort=discard
+
+###############################################################################
+# pthread
+###############################################################################
+fun:pthread_equal=discard
+fun:pthread_getspecific=discard
+fun:pthread_key_create=discard
+fun:pthread_key_delete=discard
+fun:pthread_mutex_destroy=discard
+fun:pthread_mutex_init=discard
+fun:pthread_mutex_lock=discard
+fun:pthread_mutex_trylock=discard
+fun:pthread_mutex_unlock=discard
+fun:pthread_mutexattr_destroy=discard
+fun:pthread_mutexattr_init=discard
+fun:pthread_mutexattr_settype=discard
+fun:pthread_once=discard
+fun:pthread_self=discard
+fun:pthread_setspecific=discard
+
+# Functions that take a callback (wrap the callback manually).
+fun:pthread_create=custom
+
+###############################################################################
+# libffi/libgo
+###############################################################################
+# Functions that are written in asm or are called from asm.
+fun:ffi_call_unix64=uninstrumented
+fun:ffi_call_unix64=discard
+fun:ffi_closure_unix64_inner=uninstrumented
+fun:ffi_closure_unix64_inner=discard
+fun:ffi_closure_unix64=uninstrumented
+fun:ffi_closure_unix64=discard
+fun:__go_get_closure=uninstrumented
+fun:__go_get_closure=discard
+fun:__go_makefunc_can_recover=uninstrumented
+fun:__go_makefunc_can_recover=discard
+fun:__go_makefunc_returning=uninstrumented
+fun:__go_makefunc_returning=discard
+fun:reflect.MakeFuncStubGo=uninstrumented
+fun:reflect.MakeFuncStubGo=discard
+fun:reflect.makeFuncStub=uninstrumented
+fun:reflect.makeFuncStub=discard
+
+
+###############################################################################
+# lib/Fuzzer
+###############################################################################
+# Replaces __sanitizer_cov_trace_cmp with __dfsw___sanitizer_cov_trace_cmp
+fun:__sanitizer_cov_trace_cmp1=custom
+fun:__sanitizer_cov_trace_cmp1=uninstrumented
+fun:__sanitizer_cov_trace_cmp2=custom
+fun:__sanitizer_cov_trace_cmp2=uninstrumented
+fun:__sanitizer_cov_trace_cmp4=custom
+fun:__sanitizer_cov_trace_cmp4=uninstrumented
+fun:__sanitizer_cov_trace_cmp8=custom
+fun:__sanitizer_cov_trace_cmp8=uninstrumented
+fun:__sanitizer_cov_trace_const_cmp1=custom
+fun:__sanitizer_cov_trace_const_cmp1=uninstrumented
+fun:__sanitizer_cov_trace_const_cmp2=custom
+fun:__sanitizer_cov_trace_const_cmp2=uninstrumented
+fun:__sanitizer_cov_trace_const_cmp4=custom
+fun:__sanitizer_cov_trace_const_cmp4=uninstrumented
+fun:__sanitizer_cov_trace_const_cmp8=custom
+fun:__sanitizer_cov_trace_const_cmp8=uninstrumented
+# Similar for __sanitizer_cov_trace_switch
+fun:__sanitizer_cov_trace_switch=custom
+fun:__sanitizer_cov_trace_switch=uninstrumented
+
+# Ignores all other __sanitizer callbacks.
+fun:__sanitizer_cov=uninstrumented
+fun:__sanitizer_cov=discard
+fun:__sanitizer_cov_module_init=uninstrumented
+fun:__sanitizer_cov_module_init=discard
+fun:__sanitizer_cov_with_check=uninstrumented
+fun:__sanitizer_cov_with_check=discard
+fun:__sanitizer_set_death_callback=uninstrumented
+fun:__sanitizer_set_death_callback=discard
+fun:__sanitizer_update_counter_bitset_and_clear_counters=uninstrumented
+fun:__sanitizer_update_counter_bitset_and_clear_counters=discard
+fun:__sanitizer_cov_trace_pc*=uninstrumented
+fun:__sanitizer_cov_trace_pc*=discard
+fun:__sanitizer_cov_pcs_init=uninstrumented
+fun:__sanitizer_cov_pcs_init=discard
+
+# Ignores the dfsan wrappers.
+fun:__dfsw_*=uninstrumented
+fun:__dfsw_*=discard
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/done_abilist.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/libc_ubuntu1404_abilist.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/libc_ubuntu1404_abilist.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/libc_ubuntu1404_abilist.txt (revision 351984)
@@ -0,0 +1,3433 @@
+fun:_Exit=uninstrumented
+fun:_IO_adjust_column=uninstrumented
+fun:_IO_adjust_wcolumn=uninstrumented
+fun:_IO_default_doallocate=uninstrumented
+fun:_IO_default_finish=uninstrumented
+fun:_IO_default_pbackfail=uninstrumented
+fun:_IO_default_uflow=uninstrumented
+fun:_IO_default_xsgetn=uninstrumented
+fun:_IO_default_xsputn=uninstrumented
+fun:_IO_do_write=uninstrumented
+fun:_IO_doallocbuf=uninstrumented
+fun:_IO_fclose=uninstrumented
+fun:_IO_fdopen=uninstrumented
+fun:_IO_feof=uninstrumented
+fun:_IO_ferror=uninstrumented
+fun:_IO_fflush=uninstrumented
+fun:_IO_fgetpos=uninstrumented
+fun:_IO_fgetpos64=uninstrumented
+fun:_IO_fgets=uninstrumented
+fun:_IO_file_attach=uninstrumented
+fun:_IO_file_close=uninstrumented
+fun:_IO_file_close_it=uninstrumented
+fun:_IO_file_doallocate=uninstrumented
+fun:_IO_file_finish=uninstrumented
+fun:_IO_file_fopen=uninstrumented
+fun:_IO_file_init=uninstrumented
+fun:_IO_file_open=uninstrumented
+fun:_IO_file_overflow=uninstrumented
+fun:_IO_file_read=uninstrumented
+fun:_IO_file_seek=uninstrumented
+fun:_IO_file_seekoff=uninstrumented
+fun:_IO_file_setbuf=uninstrumented
+fun:_IO_file_stat=uninstrumented
+fun:_IO_file_sync=uninstrumented
+fun:_IO_file_underflow=uninstrumented
+fun:_IO_file_write=uninstrumented
+fun:_IO_file_xsputn=uninstrumented
+fun:_IO_flockfile=uninstrumented
+fun:_IO_flush_all=uninstrumented
+fun:_IO_flush_all_linebuffered=uninstrumented
+fun:_IO_fopen=uninstrumented
+fun:_IO_fprintf=uninstrumented
+fun:_IO_fputs=uninstrumented
+fun:_IO_fread=uninstrumented
+fun:_IO_free_backup_area=uninstrumented
+fun:_IO_free_wbackup_area=uninstrumented
+fun:_IO_fsetpos=uninstrumented
+fun:_IO_fsetpos64=uninstrumented
+fun:_IO_ftell=uninstrumented
+fun:_IO_ftrylockfile=uninstrumented
+fun:_IO_funlockfile=uninstrumented
+fun:_IO_fwrite=uninstrumented
+fun:_IO_getc=uninstrumented
+fun:_IO_getline=uninstrumented
+fun:_IO_getline_info=uninstrumented
+fun:_IO_gets=uninstrumented
+fun:_IO_init=uninstrumented
+fun:_IO_init_marker=uninstrumented
+fun:_IO_init_wmarker=uninstrumented
+fun:_IO_iter_begin=uninstrumented
+fun:_IO_iter_end=uninstrumented
+fun:_IO_iter_file=uninstrumented
+fun:_IO_iter_next=uninstrumented
+fun:_IO_least_wmarker=uninstrumented
+fun:_IO_link_in=uninstrumented
+fun:_IO_list_lock=uninstrumented
+fun:_IO_list_resetlock=uninstrumented
+fun:_IO_list_unlock=uninstrumented
+fun:_IO_marker_delta=uninstrumented
+fun:_IO_marker_difference=uninstrumented
+fun:_IO_padn=uninstrumented
+fun:_IO_peekc_locked=uninstrumented
+fun:_IO_popen=uninstrumented
+fun:_IO_printf=uninstrumented
+fun:_IO_proc_close=uninstrumented
+fun:_IO_proc_open=uninstrumented
+fun:_IO_putc=uninstrumented
+fun:_IO_puts=uninstrumented
+fun:_IO_remove_marker=uninstrumented
+fun:_IO_seekmark=uninstrumented
+fun:_IO_seekoff=uninstrumented
+fun:_IO_seekpos=uninstrumented
+fun:_IO_seekwmark=uninstrumented
+fun:_IO_setb=uninstrumented
+fun:_IO_setbuffer=uninstrumented
+fun:_IO_setvbuf=uninstrumented
+fun:_IO_sgetn=uninstrumented
+fun:_IO_sprintf=uninstrumented
+fun:_IO_sputbackc=uninstrumented
+fun:_IO_sputbackwc=uninstrumented
+fun:_IO_sscanf=uninstrumented
+fun:_IO_str_init_readonly=uninstrumented
+fun:_IO_str_init_static=uninstrumented
+fun:_IO_str_overflow=uninstrumented
+fun:_IO_str_pbackfail=uninstrumented
+fun:_IO_str_seekoff=uninstrumented
+fun:_IO_str_underflow=uninstrumented
+fun:_IO_sungetc=uninstrumented
+fun:_IO_sungetwc=uninstrumented
+fun:_IO_switch_to_get_mode=uninstrumented
+fun:_IO_switch_to_main_wget_area=uninstrumented
+fun:_IO_switch_to_wbackup_area=uninstrumented
+fun:_IO_switch_to_wget_mode=uninstrumented
+fun:_IO_un_link=uninstrumented
+fun:_IO_ungetc=uninstrumented
+fun:_IO_unsave_markers=uninstrumented
+fun:_IO_unsave_wmarkers=uninstrumented
+fun:_IO_vfprintf=uninstrumented
+fun:_IO_vfscanf=uninstrumented
+fun:_IO_vsprintf=uninstrumented
+fun:_IO_wdefault_doallocate=uninstrumented
+fun:_IO_wdefault_finish=uninstrumented
+fun:_IO_wdefault_pbackfail=uninstrumented
+fun:_IO_wdefault_uflow=uninstrumented
+fun:_IO_wdefault_xsgetn=uninstrumented
+fun:_IO_wdefault_xsputn=uninstrumented
+fun:_IO_wdo_write=uninstrumented
+fun:_IO_wdoallocbuf=uninstrumented
+fun:_IO_wfile_overflow=uninstrumented
+fun:_IO_wfile_seekoff=uninstrumented
+fun:_IO_wfile_sync=uninstrumented
+fun:_IO_wfile_underflow=uninstrumented
+fun:_IO_wfile_xsputn=uninstrumented
+fun:_IO_wmarker_delta=uninstrumented
+fun:_IO_wsetb=uninstrumented
+fun:_Unwind_Backtrace=uninstrumented
+fun:_Unwind_DeleteException=uninstrumented
+fun:_Unwind_FindEnclosingFunction=uninstrumented
+fun:_Unwind_Find_FDE=uninstrumented
+fun:_Unwind_ForcedUnwind=uninstrumented
+fun:_Unwind_GetCFA=uninstrumented
+fun:_Unwind_GetDataRelBase=uninstrumented
+fun:_Unwind_GetGR=uninstrumented
+fun:_Unwind_GetIP=uninstrumented
+fun:_Unwind_GetIPInfo=uninstrumented
+fun:_Unwind_GetLanguageSpecificData=uninstrumented
+fun:_Unwind_GetRegionStart=uninstrumented
+fun:_Unwind_GetTextRelBase=uninstrumented
+fun:_Unwind_RaiseException=uninstrumented
+fun:_Unwind_Resume=uninstrumented
+fun:_Unwind_Resume_or_Rethrow=uninstrumented
+fun:_Unwind_SetGR=uninstrumented
+fun:_Unwind_SetIP=uninstrumented
+fun:__absvdi2=uninstrumented
+fun:__absvsi2=uninstrumented
+fun:__absvti2=uninstrumented
+fun:__acos_finite=uninstrumented
+fun:__acosf_finite=uninstrumented
+fun:__acosh_finite=uninstrumented
+fun:__acoshf_finite=uninstrumented
+fun:__acoshl_finite=uninstrumented
+fun:__acosl_finite=uninstrumented
+fun:__addtf3=uninstrumented
+fun:__addvdi3=uninstrumented
+fun:__addvsi3=uninstrumented
+fun:__addvti3=uninstrumented
+fun:__adjtimex=uninstrumented
+fun:__arch_prctl=uninstrumented
+fun:__argz_count=uninstrumented
+fun:__argz_next=uninstrumented
+fun:__argz_stringify=uninstrumented
+fun:__ashlti3=uninstrumented
+fun:__ashrti3=uninstrumented
+fun:__asin_finite=uninstrumented
+fun:__asinf_finite=uninstrumented
+fun:__asinl_finite=uninstrumented
+fun:__asprintf=uninstrumented
+fun:__asprintf_chk=uninstrumented
+fun:__assert=uninstrumented
+fun:__assert_fail=uninstrumented
+fun:__assert_perror_fail=uninstrumented
+fun:__atan2_finite=uninstrumented
+fun:__atan2f_finite=uninstrumented
+fun:__atan2l_finite=uninstrumented
+fun:__atanh_finite=uninstrumented
+fun:__atanhf_finite=uninstrumented
+fun:__atanhl_finite=uninstrumented
+fun:__b64_ntop=uninstrumented
+fun:__b64_pton=uninstrumented
+fun:__backtrace=uninstrumented
+fun:__backtrace_symbols=uninstrumented
+fun:__backtrace_symbols_fd=uninstrumented
+fun:__bid128_abs=uninstrumented
+fun:__bid128_add=uninstrumented
+fun:__bid128_class=uninstrumented
+fun:__bid128_copy=uninstrumented
+fun:__bid128_copySign=uninstrumented
+fun:__bid128_div=uninstrumented
+fun:__bid128_fma=uninstrumented
+fun:__bid128_from_int32=uninstrumented
+fun:__bid128_from_int64=uninstrumented
+fun:__bid128_from_uint32=uninstrumented
+fun:__bid128_from_uint64=uninstrumented
+fun:__bid128_isCanonical=uninstrumented
+fun:__bid128_isFinite=uninstrumented
+fun:__bid128_isInf=uninstrumented
+fun:__bid128_isNaN=uninstrumented
+fun:__bid128_isNormal=uninstrumented
+fun:__bid128_isSignaling=uninstrumented
+fun:__bid128_isSigned=uninstrumented
+fun:__bid128_isSubnormal=uninstrumented
+fun:__bid128_isZero=uninstrumented
+fun:__bid128_mul=uninstrumented
+fun:__bid128_negate=uninstrumented
+fun:__bid128_quiet_equal=uninstrumented
+fun:__bid128_quiet_greater=uninstrumented
+fun:__bid128_quiet_greater_equal=uninstrumented
+fun:__bid128_quiet_greater_unordered=uninstrumented
+fun:__bid128_quiet_less=uninstrumented
+fun:__bid128_quiet_less_equal=uninstrumented
+fun:__bid128_quiet_less_unordered=uninstrumented
+fun:__bid128_quiet_not_equal=uninstrumented
+fun:__bid128_quiet_not_greater=uninstrumented
+fun:__bid128_quiet_not_less=uninstrumented
+fun:__bid128_quiet_ordered=uninstrumented
+fun:__bid128_quiet_unordered=uninstrumented
+fun:__bid128_radix=uninstrumented
+fun:__bid128_sameQuantum=uninstrumented
+fun:__bid128_signaling_greater=uninstrumented
+fun:__bid128_signaling_greater_equal=uninstrumented
+fun:__bid128_signaling_greater_unordered=uninstrumented
+fun:__bid128_signaling_less=uninstrumented
+fun:__bid128_signaling_less_equal=uninstrumented
+fun:__bid128_signaling_less_unordered=uninstrumented
+fun:__bid128_signaling_not_greater=uninstrumented
+fun:__bid128_signaling_not_less=uninstrumented
+fun:__bid128_sub=uninstrumented
+fun:__bid128_to_bid32=uninstrumented
+fun:__bid128_to_bid64=uninstrumented
+fun:__bid128_to_binary128=uninstrumented
+fun:__bid128_to_binary32=uninstrumented
+fun:__bid128_to_binary64=uninstrumented
+fun:__bid128_to_binary80=uninstrumented
+fun:__bid128_to_int32_ceil=uninstrumented
+fun:__bid128_to_int32_floor=uninstrumented
+fun:__bid128_to_int32_int=uninstrumented
+fun:__bid128_to_int32_rnint=uninstrumented
+fun:__bid128_to_int32_rninta=uninstrumented
+fun:__bid128_to_int32_xceil=uninstrumented
+fun:__bid128_to_int32_xfloor=uninstrumented
+fun:__bid128_to_int32_xint=uninstrumented
+fun:__bid128_to_int32_xrnint=uninstrumented
+fun:__bid128_to_int32_xrninta=uninstrumented
+fun:__bid128_to_int64_ceil=uninstrumented
+fun:__bid128_to_int64_floor=uninstrumented
+fun:__bid128_to_int64_int=uninstrumented
+fun:__bid128_to_int64_rnint=uninstrumented
+fun:__bid128_to_int64_rninta=uninstrumented
+fun:__bid128_to_int64_xceil=uninstrumented
+fun:__bid128_to_int64_xfloor=uninstrumented
+fun:__bid128_to_int64_xint=uninstrumented
+fun:__bid128_to_int64_xrnint=uninstrumented
+fun:__bid128_to_int64_xrninta=uninstrumented
+fun:__bid128_to_uint32_ceil=uninstrumented
+fun:__bid128_to_uint32_floor=uninstrumented
+fun:__bid128_to_uint32_int=uninstrumented
+fun:__bid128_to_uint32_rnint=uninstrumented
+fun:__bid128_to_uint32_rninta=uninstrumented
+fun:__bid128_to_uint32_xceil=uninstrumented
+fun:__bid128_to_uint32_xfloor=uninstrumented
+fun:__bid128_to_uint32_xint=uninstrumented
+fun:__bid128_to_uint32_xrnint=uninstrumented
+fun:__bid128_to_uint32_xrninta=uninstrumented
+fun:__bid128_to_uint64_ceil=uninstrumented
+fun:__bid128_to_uint64_floor=uninstrumented
+fun:__bid128_to_uint64_int=uninstrumented
+fun:__bid128_to_uint64_rnint=uninstrumented
+fun:__bid128_to_uint64_rninta=uninstrumented
+fun:__bid128_to_uint64_xceil=uninstrumented
+fun:__bid128_to_uint64_xfloor=uninstrumented
+fun:__bid128_to_uint64_xint=uninstrumented
+fun:__bid128_to_uint64_xrnint=uninstrumented
+fun:__bid128_to_uint64_xrninta=uninstrumented
+fun:__bid128_totalOrder=uninstrumented
+fun:__bid128_totalOrderMag=uninstrumented
+fun:__bid128dd_add=uninstrumented
+fun:__bid128dd_div=uninstrumented
+fun:__bid128dd_mul=uninstrumented
+fun:__bid128dd_sub=uninstrumented
+fun:__bid128ddd_fma=uninstrumented
+fun:__bid128ddq_fma=uninstrumented
+fun:__bid128dq_add=uninstrumented
+fun:__bid128dq_div=uninstrumented
+fun:__bid128dq_mul=uninstrumented
+fun:__bid128dq_sub=uninstrumented
+fun:__bid128dqd_fma=uninstrumented
+fun:__bid128dqq_fma=uninstrumented
+fun:__bid128qd_add=uninstrumented
+fun:__bid128qd_div=uninstrumented
+fun:__bid128qd_mul=uninstrumented
+fun:__bid128qd_sub=uninstrumented
+fun:__bid128qdd_fma=uninstrumented
+fun:__bid128qdq_fma=uninstrumented
+fun:__bid128qqd_fma=uninstrumented
+fun:__bid32_to_bid128=uninstrumented
+fun:__bid32_to_bid64=uninstrumented
+fun:__bid32_to_binary128=uninstrumented
+fun:__bid32_to_binary32=uninstrumented
+fun:__bid32_to_binary64=uninstrumented
+fun:__bid32_to_binary80=uninstrumented
+fun:__bid64_abs=uninstrumented
+fun:__bid64_add=uninstrumented
+fun:__bid64_class=uninstrumented
+fun:__bid64_copy=uninstrumented
+fun:__bid64_copySign=uninstrumented
+fun:__bid64_div=uninstrumented
+fun:__bid64_from_int32=uninstrumented
+fun:__bid64_from_int64=uninstrumented
+fun:__bid64_from_uint32=uninstrumented
+fun:__bid64_from_uint64=uninstrumented
+fun:__bid64_isCanonical=uninstrumented
+fun:__bid64_isFinite=uninstrumented
+fun:__bid64_isInf=uninstrumented
+fun:__bid64_isNaN=uninstrumented
+fun:__bid64_isNormal=uninstrumented
+fun:__bid64_isSignaling=uninstrumented
+fun:__bid64_isSigned=uninstrumented
+fun:__bid64_isSubnormal=uninstrumented
+fun:__bid64_isZero=uninstrumented
+fun:__bid64_mul=uninstrumented
+fun:__bid64_negate=uninstrumented
+fun:__bid64_quiet_equal=uninstrumented
+fun:__bid64_quiet_greater=uninstrumented
+fun:__bid64_quiet_greater_equal=uninstrumented
+fun:__bid64_quiet_greater_unordered=uninstrumented
+fun:__bid64_quiet_less=uninstrumented
+fun:__bid64_quiet_less_equal=uninstrumented
+fun:__bid64_quiet_less_unordered=uninstrumented
+fun:__bid64_quiet_not_equal=uninstrumented
+fun:__bid64_quiet_not_greater=uninstrumented
+fun:__bid64_quiet_not_less=uninstrumented
+fun:__bid64_quiet_ordered=uninstrumented
+fun:__bid64_quiet_unordered=uninstrumented
+fun:__bid64_radix=uninstrumented
+fun:__bid64_sameQuantum=uninstrumented
+fun:__bid64_signaling_greater=uninstrumented
+fun:__bid64_signaling_greater_equal=uninstrumented
+fun:__bid64_signaling_greater_unordered=uninstrumented
+fun:__bid64_signaling_less=uninstrumented
+fun:__bid64_signaling_less_equal=uninstrumented
+fun:__bid64_signaling_less_unordered=uninstrumented
+fun:__bid64_signaling_not_greater=uninstrumented
+fun:__bid64_signaling_not_less=uninstrumented
+fun:__bid64_sub=uninstrumented
+fun:__bid64_to_bid128=uninstrumented
+fun:__bid64_to_bid32=uninstrumented
+fun:__bid64_to_binary128=uninstrumented
+fun:__bid64_to_binary32=uninstrumented
+fun:__bid64_to_binary64=uninstrumented
+fun:__bid64_to_binary80=uninstrumented
+fun:__bid64_to_int32_ceil=uninstrumented
+fun:__bid64_to_int32_floor=uninstrumented
+fun:__bid64_to_int32_int=uninstrumented
+fun:__bid64_to_int32_rnint=uninstrumented
+fun:__bid64_to_int32_rninta=uninstrumented
+fun:__bid64_to_int32_xceil=uninstrumented
+fun:__bid64_to_int32_xfloor=uninstrumented
+fun:__bid64_to_int32_xint=uninstrumented
+fun:__bid64_to_int32_xrnint=uninstrumented
+fun:__bid64_to_int32_xrninta=uninstrumented
+fun:__bid64_to_int64_ceil=uninstrumented
+fun:__bid64_to_int64_floor=uninstrumented
+fun:__bid64_to_int64_int=uninstrumented
+fun:__bid64_to_int64_rnint=uninstrumented
+fun:__bid64_to_int64_rninta=uninstrumented
+fun:__bid64_to_int64_xceil=uninstrumented
+fun:__bid64_to_int64_xfloor=uninstrumented
+fun:__bid64_to_int64_xint=uninstrumented
+fun:__bid64_to_int64_xrnint=uninstrumented
+fun:__bid64_to_int64_xrninta=uninstrumented
+fun:__bid64_to_uint32_ceil=uninstrumented
+fun:__bid64_to_uint32_floor=uninstrumented
+fun:__bid64_to_uint32_int=uninstrumented
+fun:__bid64_to_uint32_rnint=uninstrumented
+fun:__bid64_to_uint32_rninta=uninstrumented
+fun:__bid64_to_uint32_xceil=uninstrumented
+fun:__bid64_to_uint32_xfloor=uninstrumented
+fun:__bid64_to_uint32_xint=uninstrumented
+fun:__bid64_to_uint32_xrnint=uninstrumented
+fun:__bid64_to_uint32_xrninta=uninstrumented
+fun:__bid64_to_uint64_ceil=uninstrumented
+fun:__bid64_to_uint64_floor=uninstrumented
+fun:__bid64_to_uint64_int=uninstrumented
+fun:__bid64_to_uint64_rnint=uninstrumented
+fun:__bid64_to_uint64_rninta=uninstrumented
+fun:__bid64_to_uint64_xceil=uninstrumented
+fun:__bid64_to_uint64_xfloor=uninstrumented
+fun:__bid64_to_uint64_xint=uninstrumented
+fun:__bid64_to_uint64_xrnint=uninstrumented
+fun:__bid64_to_uint64_xrninta=uninstrumented
+fun:__bid64_totalOrder=uninstrumented
+fun:__bid64_totalOrderMag=uninstrumented
+fun:__bid64ddq_fma=uninstrumented
+fun:__bid64dq_add=uninstrumented
+fun:__bid64dq_div=uninstrumented
+fun:__bid64dq_mul=uninstrumented
+fun:__bid64dq_sub=uninstrumented
+fun:__bid64dqd_fma=uninstrumented
+fun:__bid64dqq_fma=uninstrumented
+fun:__bid64qd_add=uninstrumented
+fun:__bid64qd_div=uninstrumented
+fun:__bid64qd_mul=uninstrumented
+fun:__bid64qd_sub=uninstrumented
+fun:__bid64qdd_fma=uninstrumented
+fun:__bid64qdq_fma=uninstrumented
+fun:__bid64qq_add=uninstrumented
+fun:__bid64qq_div=uninstrumented
+fun:__bid64qq_mul=uninstrumented
+fun:__bid64qq_sub=uninstrumented
+fun:__bid64qqd_fma=uninstrumented
+fun:__bid64qqq_fma=uninstrumented
+fun:__bid_adddd3=uninstrumented
+fun:__bid_addsd3=uninstrumented
+fun:__bid_addtd3=uninstrumented
+fun:__bid_divdd3=uninstrumented
+fun:__bid_divsd3=uninstrumented
+fun:__bid_divtd3=uninstrumented
+fun:__bid_eqdd2=uninstrumented
+fun:__bid_eqsd2=uninstrumented
+fun:__bid_eqtd2=uninstrumented
+fun:__bid_extendddtd2=uninstrumented
+fun:__bid_extendddtf=uninstrumented
+fun:__bid_extendddxf=uninstrumented
+fun:__bid_extenddfdd=uninstrumented
+fun:__bid_extenddftd=uninstrumented
+fun:__bid_extendsddd2=uninstrumented
+fun:__bid_extendsddf=uninstrumented
+fun:__bid_extendsdtd2=uninstrumented
+fun:__bid_extendsdtf=uninstrumented
+fun:__bid_extendsdxf=uninstrumented
+fun:__bid_extendsfdd=uninstrumented
+fun:__bid_extendsfsd=uninstrumented
+fun:__bid_extendsftd=uninstrumented
+fun:__bid_extendtftd=uninstrumented
+fun:__bid_extendxftd=uninstrumented
+fun:__bid_fixdddi=uninstrumented
+fun:__bid_fixddsi=uninstrumented
+fun:__bid_fixsddi=uninstrumented
+fun:__bid_fixsdsi=uninstrumented
+fun:__bid_fixtddi=uninstrumented
+fun:__bid_fixtdsi=uninstrumented
+fun:__bid_fixunsdddi=uninstrumented
+fun:__bid_fixunsddsi=uninstrumented
+fun:__bid_fixunssddi=uninstrumented
+fun:__bid_fixunssdsi=uninstrumented
+fun:__bid_fixunstddi=uninstrumented
+fun:__bid_fixunstdsi=uninstrumented
+fun:__bid_floatdidd=uninstrumented
+fun:__bid_floatdisd=uninstrumented
+fun:__bid_floatditd=uninstrumented
+fun:__bid_floatsidd=uninstrumented
+fun:__bid_floatsisd=uninstrumented
+fun:__bid_floatsitd=uninstrumented
+fun:__bid_floatunsdidd=uninstrumented
+fun:__bid_floatunsdisd=uninstrumented
+fun:__bid_floatunsditd=uninstrumented
+fun:__bid_floatunssidd=uninstrumented
+fun:__bid_floatunssisd=uninstrumented
+fun:__bid_floatunssitd=uninstrumented
+fun:__bid_gedd2=uninstrumented
+fun:__bid_gesd2=uninstrumented
+fun:__bid_getd2=uninstrumented
+fun:__bid_gtdd2=uninstrumented
+fun:__bid_gtsd2=uninstrumented
+fun:__bid_gttd2=uninstrumented
+fun:__bid_ledd2=uninstrumented
+fun:__bid_lesd2=uninstrumented
+fun:__bid_letd2=uninstrumented
+fun:__bid_ltdd2=uninstrumented
+fun:__bid_ltsd2=uninstrumented
+fun:__bid_lttd2=uninstrumented
+fun:__bid_muldd3=uninstrumented
+fun:__bid_mulsd3=uninstrumented
+fun:__bid_multd3=uninstrumented
+fun:__bid_nedd2=uninstrumented
+fun:__bid_nesd2=uninstrumented
+fun:__bid_netd2=uninstrumented
+fun:__bid_round128_19_38=uninstrumented
+fun:__bid_round192_39_57=uninstrumented
+fun:__bid_round256_58_76=uninstrumented
+fun:__bid_round64_2_18=uninstrumented
+fun:__bid_subdd3=uninstrumented
+fun:__bid_subsd3=uninstrumented
+fun:__bid_subtd3=uninstrumented
+fun:__bid_truncdddf=uninstrumented
+fun:__bid_truncddsd2=uninstrumented
+fun:__bid_truncddsf=uninstrumented
+fun:__bid_truncdfsd=uninstrumented
+fun:__bid_truncsdsf=uninstrumented
+fun:__bid_trunctddd2=uninstrumented
+fun:__bid_trunctddf=uninstrumented
+fun:__bid_trunctdsd2=uninstrumented
+fun:__bid_trunctdsf=uninstrumented
+fun:__bid_trunctdtf=uninstrumented
+fun:__bid_trunctdxf=uninstrumented
+fun:__bid_trunctfdd=uninstrumented
+fun:__bid_trunctfsd=uninstrumented
+fun:__bid_truncxfdd=uninstrumented
+fun:__bid_truncxfsd=uninstrumented
+fun:__bid_unorddd2=uninstrumented
+fun:__bid_unordsd2=uninstrumented
+fun:__bid_unordtd2=uninstrumented
+fun:__binary128_to_bid128=uninstrumented
+fun:__binary128_to_bid32=uninstrumented
+fun:__binary128_to_bid64=uninstrumented
+fun:__binary32_to_bid128=uninstrumented
+fun:__binary32_to_bid32=uninstrumented
+fun:__binary32_to_bid64=uninstrumented
+fun:__binary64_to_bid128=uninstrumented
+fun:__binary64_to_bid32=uninstrumented
+fun:__binary64_to_bid64=uninstrumented
+fun:__binary80_to_bid128=uninstrumented
+fun:__binary80_to_bid32=uninstrumented
+fun:__binary80_to_bid64=uninstrumented
+fun:__bsd_getpgrp=uninstrumented
+fun:__bswapdi2=uninstrumented
+fun:__bswapsi2=uninstrumented
+fun:__bzero=uninstrumented
+fun:__call_tls_dtors=uninstrumented
+fun:__chk_fail=uninstrumented
+fun:__clear_cache=uninstrumented
+fun:__clock_getcpuclockid=uninstrumented
+fun:__clock_getres=uninstrumented
+fun:__clock_gettime=uninstrumented
+fun:__clock_nanosleep=uninstrumented
+fun:__clock_settime=uninstrumented
+fun:__clog10=uninstrumented
+fun:__clog10f=uninstrumented
+fun:__clog10l=uninstrumented
+fun:__clone=uninstrumented
+fun:__close=uninstrumented
+fun:__clrsbdi2=uninstrumented
+fun:__clrsbti2=uninstrumented
+fun:__clzdi2=uninstrumented
+fun:__clzti2=uninstrumented
+fun:__cmpti2=uninstrumented
+fun:__cmsg_nxthdr=uninstrumented
+fun:__confstr_chk=uninstrumented
+fun:__connect=uninstrumented
+fun:__cosh_finite=uninstrumented
+fun:__coshf_finite=uninstrumented
+fun:__coshl_finite=uninstrumented
+fun:__cpu_indicator_init=uninstrumented
+fun:__create_ib_request=uninstrumented
+fun:__ctype_b_loc=uninstrumented
+fun:__ctype_get_mb_cur_max=uninstrumented
+fun:__ctype_init=uninstrumented
+fun:__ctype_tolower_loc=uninstrumented
+fun:__ctype_toupper_loc=uninstrumented
+fun:__ctzdi2=uninstrumented
+fun:__ctzti2=uninstrumented
+fun:__cxa_at_quick_exit=uninstrumented
+fun:__cxa_atexit=uninstrumented
+fun:__cxa_finalize=uninstrumented
+fun:__cxa_thread_atexit_impl=uninstrumented
+fun:__cyg_profile_func_enter=uninstrumented
+fun:__cyg_profile_func_exit=uninstrumented
+fun:__dcgettext=uninstrumented
+fun:__default_morecore=uninstrumented
+fun:__deregister_frame=uninstrumented
+fun:__deregister_frame_info=uninstrumented
+fun:__deregister_frame_info_bases=uninstrumented
+fun:__dfp_clear_except=uninstrumented
+fun:__dfp_get_round=uninstrumented
+fun:__dfp_raise_except=uninstrumented
+fun:__dfp_set_round=uninstrumented
+fun:__dfp_test_except=uninstrumented
+fun:__dgettext=uninstrumented
+fun:__divdc3=uninstrumented
+fun:__divsc3=uninstrumented
+fun:__divtc3=uninstrumented
+fun:__divtf3=uninstrumented
+fun:__divti3=uninstrumented
+fun:__divxc3=uninstrumented
+fun:__dn_comp=uninstrumented
+fun:__dn_count_labels=uninstrumented
+fun:__dn_expand=uninstrumented
+fun:__dn_skipname=uninstrumented
+fun:__do_niscall3=uninstrumented
+fun:__dprintf_chk=uninstrumented
+fun:__dup2=uninstrumented
+fun:__duplocale=uninstrumented
+fun:__emutls_get_address=uninstrumented
+fun:__emutls_register_common=uninstrumented
+fun:__enable_execute_stack=uninstrumented
+fun:__endmntent=uninstrumented
+fun:__eprintf=uninstrumented
+fun:__eqtf2=uninstrumented
+fun:__errno_location=uninstrumented
+fun:__exp10_finite=uninstrumented
+fun:__exp10f_finite=uninstrumented
+fun:__exp10l_finite=uninstrumented
+fun:__exp2_finite=uninstrumented
+fun:__exp2f_finite=uninstrumented
+fun:__exp2l_finite=uninstrumented
+fun:__exp_finite=uninstrumented
+fun:__expf_finite=uninstrumented
+fun:__expl_finite=uninstrumented
+fun:__extenddftf2=uninstrumented
+fun:__extendsftf2=uninstrumented
+fun:__extendxftf2=uninstrumented
+fun:__fbufsize=uninstrumented
+fun:__fcntl=uninstrumented
+fun:__fdelt_chk=uninstrumented
+fun:__fdelt_warn=uninstrumented
+fun:__fentry__=uninstrumented
+fun:__ffs=uninstrumented
+fun:__ffsdi2=uninstrumented
+fun:__ffsti2=uninstrumented
+fun:__fgets_chk=uninstrumented
+fun:__fgets_unlocked_chk=uninstrumented
+fun:__fgetws_chk=uninstrumented
+fun:__fgetws_unlocked_chk=uninstrumented
+fun:__finite=uninstrumented
+fun:__finitef=uninstrumented
+fun:__finitel=uninstrumented
+fun:__fixdfti=uninstrumented
+fun:__fixsfti=uninstrumented
+fun:__fixtfdi=uninstrumented
+fun:__fixtfsi=uninstrumented
+fun:__fixtfti=uninstrumented
+fun:__fixunsdfdi=uninstrumented
+fun:__fixunsdfti=uninstrumented
+fun:__fixunssfdi=uninstrumented
+fun:__fixunssfti=uninstrumented
+fun:__fixunstfdi=uninstrumented
+fun:__fixunstfsi=uninstrumented
+fun:__fixunstfti=uninstrumented
+fun:__fixunsxfdi=uninstrumented
+fun:__fixunsxfti=uninstrumented
+fun:__fixxfti=uninstrumented
+fun:__flbf=uninstrumented
+fun:__floatditf=uninstrumented
+fun:__floatsitf=uninstrumented
+fun:__floattidf=uninstrumented
+fun:__floattisf=uninstrumented
+fun:__floattitf=uninstrumented
+fun:__floattixf=uninstrumented
+fun:__floatunditf=uninstrumented
+fun:__floatunsitf=uninstrumented
+fun:__floatuntidf=uninstrumented
+fun:__floatuntisf=uninstrumented
+fun:__floatuntitf=uninstrumented
+fun:__floatuntixf=uninstrumented
+fun:__fmod_finite=uninstrumented
+fun:__fmodf_finite=uninstrumented
+fun:__fmodl_finite=uninstrumented
+fun:__follow_path=uninstrumented
+fun:__fork=uninstrumented
+fun:__fortify_fail=uninstrumented
+fun:__fp_nquery=uninstrumented
+fun:__fp_query=uninstrumented
+fun:__fp_resstat=uninstrumented
+fun:__fpclassify=uninstrumented
+fun:__fpclassifyf=uninstrumented
+fun:__fpclassifyl=uninstrumented
+fun:__fpending=uninstrumented
+fun:__fprintf_chk=uninstrumented
+fun:__fpurge=uninstrumented
+fun:__fread_chk=uninstrumented
+fun:__fread_unlocked_chk=uninstrumented
+fun:__freadable=uninstrumented
+fun:__freading=uninstrumented
+fun:__free_fdresult=uninstrumented
+fun:__freelocale=uninstrumented
+fun:__fsetlocking=uninstrumented
+fun:__fstat=uninstrumented
+fun:__fwprintf_chk=uninstrumented
+fun:__fwritable=uninstrumented
+fun:__fwriting=uninstrumented
+fun:__fxstat=uninstrumented
+fun:__fxstat64=uninstrumented
+fun:__fxstatat=uninstrumented
+fun:__fxstatat64=uninstrumented
+fun:__gai_sigqueue=uninstrumented
+fun:__gamma_r_finite=uninstrumented
+fun:__gammaf_r_finite=uninstrumented
+fun:__gammal_r_finite=uninstrumented
+fun:__gcc_bcmp=uninstrumented
+fun:__gcc_personality_v0=uninstrumented
+fun:__gconv_get_alias_db=uninstrumented
+fun:__gconv_get_cache=uninstrumented
+fun:__gconv_get_modules_db=uninstrumented
+fun:__generic_findstack=uninstrumented
+fun:__generic_morestack=uninstrumented
+fun:__generic_morestack_set_initial_sp=uninstrumented
+fun:__generic_releasestack=uninstrumented
+fun:__get_cpu_features=uninstrumented
+fun:__getauxval=uninstrumented
+fun:__getcwd_chk=uninstrumented
+fun:__getdelim=uninstrumented
+fun:__getdomainname_chk=uninstrumented
+fun:__getf2=uninstrumented
+fun:__getgroups_chk=uninstrumented
+fun:__gethostname_chk=uninstrumented
+fun:__getlogin_r_chk=uninstrumented
+fun:__getmntent_r=uninstrumented
+fun:__getpagesize=uninstrumented
+fun:__getpgid=uninstrumented
+fun:__getpid=uninstrumented
+fun:__gets_chk=uninstrumented
+fun:__gettimeofday=uninstrumented
+fun:__getwd_chk=uninstrumented
+fun:__gmtime_r=uninstrumented
+fun:__gttf2=uninstrumented
+fun:__h_errno_location=uninstrumented
+fun:__hostalias=uninstrumented
+fun:__hypot_finite=uninstrumented
+fun:__hypotf_finite=uninstrumented
+fun:__hypotl_finite=uninstrumented
+fun:__internal_endnetgrent=uninstrumented
+fun:__internal_getnetgrent_r=uninstrumented
+fun:__internal_setnetgrent=uninstrumented
+fun:__isalnum_l=uninstrumented
+fun:__isalpha_l=uninstrumented
+fun:__isascii_l=uninstrumented
+fun:__isblank_l=uninstrumented
+fun:__iscntrl_l=uninstrumented
+fun:__isctype=uninstrumented
+fun:__isdigit_l=uninstrumented
+fun:__isgraph_l=uninstrumented
+fun:__isinf=uninstrumented
+fun:__isinff=uninstrumented
+fun:__isinfl=uninstrumented
+fun:__islower_l=uninstrumented
+fun:__isnan=uninstrumented
+fun:__isnanf=uninstrumented
+fun:__isnanl=uninstrumented
+fun:__isoc99_fscanf=uninstrumented
+fun:__isoc99_fwscanf=uninstrumented
+fun:__isoc99_scanf=uninstrumented
+fun:__isoc99_sscanf=uninstrumented
+fun:__isoc99_swscanf=uninstrumented
+fun:__isoc99_vfscanf=uninstrumented
+fun:__isoc99_vfwscanf=uninstrumented
+fun:__isoc99_vscanf=uninstrumented
+fun:__isoc99_vsscanf=uninstrumented
+fun:__isoc99_vswscanf=uninstrumented
+fun:__isoc99_vwscanf=uninstrumented
+fun:__isoc99_wscanf=uninstrumented
+fun:__isprint_l=uninstrumented
+fun:__ispunct_l=uninstrumented
+fun:__issignaling=uninstrumented
+fun:__issignalingf=uninstrumented
+fun:__issignalingl=uninstrumented
+fun:__isspace_l=uninstrumented
+fun:__isupper_l=uninstrumented
+fun:__iswalnum_l=uninstrumented
+fun:__iswalpha_l=uninstrumented
+fun:__iswblank_l=uninstrumented
+fun:__iswcntrl_l=uninstrumented
+fun:__iswctype=uninstrumented
+fun:__iswctype_l=uninstrumented
+fun:__iswdigit_l=uninstrumented
+fun:__iswgraph_l=uninstrumented
+fun:__iswlower_l=uninstrumented
+fun:__iswprint_l=uninstrumented
+fun:__iswpunct_l=uninstrumented
+fun:__iswspace_l=uninstrumented
+fun:__iswupper_l=uninstrumented
+fun:__iswxdigit_l=uninstrumented
+fun:__isxdigit_l=uninstrumented
+fun:__ivaliduser=uninstrumented
+fun:__j0_finite=uninstrumented
+fun:__j0f_finite=uninstrumented
+fun:__j0l_finite=uninstrumented
+fun:__j1_finite=uninstrumented
+fun:__j1f_finite=uninstrumented
+fun:__j1l_finite=uninstrumented
+fun:__jn_finite=uninstrumented
+fun:__jnf_finite=uninstrumented
+fun:__jnl_finite=uninstrumented
+fun:__letf2=uninstrumented
+fun:__lgamma_r_finite=uninstrumented
+fun:__lgammaf_r_finite=uninstrumented
+fun:__lgammal_r_finite=uninstrumented
+fun:__libc_alloca_cutoff=uninstrumented
+fun:__libc_allocate_rtsig=uninstrumented
+fun:__libc_allocate_rtsig_private=uninstrumented
+fun:__libc_calloc=uninstrumented
+fun:__libc_clntudp_bufcreate=uninstrumented
+fun:__libc_csu_fini=uninstrumented
+fun:__libc_csu_init=uninstrumented
+fun:__libc_current_sigrtmax=uninstrumented
+fun:__libc_current_sigrtmax_private=uninstrumented
+fun:__libc_current_sigrtmin=uninstrumented
+fun:__libc_current_sigrtmin_private=uninstrumented
+fun:__libc_dl_error_tsd=uninstrumented
+fun:__libc_dlclose=uninstrumented
+fun:__libc_dlopen_mode=uninstrumented
+fun:__libc_dlsym=uninstrumented
+fun:__libc_fatal=uninstrumented
+fun:__libc_fork=uninstrumented
+fun:__libc_free=uninstrumented
+fun:__libc_freeres=uninstrumented
+fun:__libc_ifunc_impl_list=uninstrumented
+fun:__libc_init_first=uninstrumented
+fun:__libc_longjmp=uninstrumented
+fun:__libc_mallinfo=uninstrumented
+fun:__libc_malloc=uninstrumented
+fun:__libc_mallopt=uninstrumented
+fun:__libc_memalign=uninstrumented
+fun:__libc_pthread_init=uninstrumented
+fun:__libc_pvalloc=uninstrumented
+fun:__libc_pwrite=uninstrumented
+fun:__libc_realloc=uninstrumented
+fun:__libc_res_nquery=uninstrumented
+fun:__libc_res_nsearch=uninstrumented
+fun:__libc_rpc_getport=uninstrumented
+fun:__libc_sa_len=uninstrumented
+fun:__libc_secure_getenv=uninstrumented
+fun:__libc_siglongjmp=uninstrumented
+fun:__libc_start_main=uninstrumented
+fun:__libc_system=uninstrumented
+fun:__libc_thread_freeres=uninstrumented
+fun:__libc_valloc=uninstrumented
+fun:__loc_aton=uninstrumented
+fun:__loc_ntoa=uninstrumented
+fun:__log10_finite=uninstrumented
+fun:__log10f_finite=uninstrumented
+fun:__log10l_finite=uninstrumented
+fun:__log2_finite=uninstrumented
+fun:__log2f_finite=uninstrumented
+fun:__log2l_finite=uninstrumented
+fun:__log_finite=uninstrumented
+fun:__logf_finite=uninstrumented
+fun:__logl_finite=uninstrumented
+fun:__longjmp_chk=uninstrumented
+fun:__lseek=uninstrumented
+fun:__lshrti3=uninstrumented
+fun:__lstat=uninstrumented
+fun:__lttf2=uninstrumented
+fun:__lxstat=uninstrumented
+fun:__lxstat64=uninstrumented
+fun:__madvise=uninstrumented
+fun:__mbrlen=uninstrumented
+fun:__mbrtowc=uninstrumented
+fun:__mbsnrtowcs_chk=uninstrumented
+fun:__mbsrtowcs_chk=uninstrumented
+fun:__mbstowcs_chk=uninstrumented
+fun:__memcpy_chk=uninstrumented
+fun:__memmove_chk=uninstrumented
+fun:__mempcpy=uninstrumented
+fun:__mempcpy_chk=uninstrumented
+fun:__mempcpy_small=uninstrumented
+fun:__memset_chk=uninstrumented
+fun:__mknod=uninstrumented
+fun:__mktemp=uninstrumented
+fun:__modti3=uninstrumented
+fun:__monstartup=uninstrumented
+fun:__morestack=uninstrumented
+fun:__morestack_allocate_stack_space=uninstrumented
+fun:__morestack_block_signals=uninstrumented
+fun:__morestack_fail=uninstrumented
+fun:__morestack_get_guard=uninstrumented
+fun:__morestack_large_model=uninstrumented
+fun:__morestack_load_mmap=uninstrumented
+fun:__morestack_make_guard=uninstrumented
+fun:__morestack_non_split=uninstrumented
+fun:__morestack_release_segments=uninstrumented
+fun:__morestack_set_guard=uninstrumented
+fun:__morestack_unblock_signals=uninstrumented
+fun:__mq_open_2=uninstrumented
+fun:__muldc3=uninstrumented
+fun:__mulsc3=uninstrumented
+fun:__multc3=uninstrumented
+fun:__multf3=uninstrumented
+fun:__multi3=uninstrumented
+fun:__mulvdi3=uninstrumented
+fun:__mulvsi3=uninstrumented
+fun:__mulvti3=uninstrumented
+fun:__mulxc3=uninstrumented
+fun:__nanosleep=uninstrumented
+fun:__negtf2=uninstrumented
+fun:__negti2=uninstrumented
+fun:__negvdi2=uninstrumented
+fun:__negvsi2=uninstrumented
+fun:__negvti2=uninstrumented
+fun:__netf2=uninstrumented
+fun:__newlocale=uninstrumented
+fun:__nis_default_access=uninstrumented
+fun:__nis_default_group=uninstrumented
+fun:__nis_default_owner=uninstrumented
+fun:__nis_default_ttl=uninstrumented
+fun:__nis_finddirectory=uninstrumented
+fun:__nis_hash=uninstrumented
+fun:__nisbind_connect=uninstrumented
+fun:__nisbind_create=uninstrumented
+fun:__nisbind_destroy=uninstrumented
+fun:__nisbind_next=uninstrumented
+fun:__nl_langinfo_l=uninstrumented
+fun:__ns_get16=uninstrumented
+fun:__ns_get32=uninstrumented
+fun:__ns_name_ntop=uninstrumented
+fun:__ns_name_unpack=uninstrumented
+fun:__nss_configure_lookup=uninstrumented
+fun:__nss_database_lookup=uninstrumented
+fun:__nss_disable_nscd=uninstrumented
+fun:__nss_group_lookup=uninstrumented
+fun:__nss_group_lookup2=uninstrumented
+fun:__nss_hostname_digits_dots=uninstrumented
+fun:__nss_hosts_lookup=uninstrumented
+fun:__nss_hosts_lookup2=uninstrumented
+fun:__nss_lookup=uninstrumented
+fun:__nss_lookup_function=uninstrumented
+fun:__nss_next=uninstrumented
+fun:__nss_next2=uninstrumented
+fun:__nss_passwd_lookup=uninstrumented
+fun:__nss_passwd_lookup2=uninstrumented
+fun:__nss_services_lookup2=uninstrumented
+fun:__obstack_printf_chk=uninstrumented
+fun:__obstack_vprintf_chk=uninstrumented
+fun:__open=uninstrumented
+fun:__open64=uninstrumented
+fun:__open64_2=uninstrumented
+fun:__open_2=uninstrumented
+fun:__open_catalog=uninstrumented
+fun:__openat64_2=uninstrumented
+fun:__openat_2=uninstrumented
+fun:__overflow=uninstrumented
+fun:__p_cdname=uninstrumented
+fun:__p_cdnname=uninstrumented
+fun:__p_class=uninstrumented
+fun:__p_fqname=uninstrumented
+fun:__p_fqnname=uninstrumented
+fun:__p_option=uninstrumented
+fun:__p_query=uninstrumented
+fun:__p_rcode=uninstrumented
+fun:__p_secstodate=uninstrumented
+fun:__p_time=uninstrumented
+fun:__p_type=uninstrumented
+fun:__paritydi2=uninstrumented
+fun:__parityti2=uninstrumented
+fun:__pipe=uninstrumented
+fun:__poll=uninstrumented
+fun:__poll_chk=uninstrumented
+fun:__popcountdi2=uninstrumented
+fun:__popcountti2=uninstrumented
+fun:__posix_getopt=uninstrumented
+fun:__pow_finite=uninstrumented
+fun:__powf_finite=uninstrumented
+fun:__powidf2=uninstrumented
+fun:__powisf2=uninstrumented
+fun:__powitf2=uninstrumented
+fun:__powixf2=uninstrumented
+fun:__powl_finite=uninstrumented
+fun:__ppoll_chk=uninstrumented
+fun:__pread64=uninstrumented
+fun:__pread64_chk=uninstrumented
+fun:__pread_chk=uninstrumented
+fun:__prepare_niscall=uninstrumented
+fun:__printf_chk=uninstrumented
+fun:__printf_fp=uninstrumented
+fun:__profile_frequency=uninstrumented
+fun:__pthread_atfork=uninstrumented
+fun:__pthread_cleanup_routine=uninstrumented
+fun:__pthread_clock_gettime=uninstrumented
+fun:__pthread_clock_settime=uninstrumented
+fun:__pthread_get_minstack=uninstrumented
+fun:__pthread_getspecific=uninstrumented
+fun:__pthread_initialize_minimal=uninstrumented
+fun:__pthread_key_create=uninstrumented
+fun:__pthread_mutex_destroy=uninstrumented
+fun:__pthread_mutex_init=uninstrumented
+fun:__pthread_mutex_lock=uninstrumented
+fun:__pthread_mutex_trylock=uninstrumented
+fun:__pthread_mutex_unlock=uninstrumented
+fun:__pthread_mutexattr_destroy=uninstrumented
+fun:__pthread_mutexattr_init=uninstrumented
+fun:__pthread_mutexattr_settype=uninstrumented
+fun:__pthread_once=uninstrumented
+fun:__pthread_register_cancel=uninstrumented
+fun:__pthread_register_cancel_defer=uninstrumented
+fun:__pthread_rwlock_destroy=uninstrumented
+fun:__pthread_rwlock_init=uninstrumented
+fun:__pthread_rwlock_rdlock=uninstrumented
+fun:__pthread_rwlock_tryrdlock=uninstrumented
+fun:__pthread_rwlock_trywrlock=uninstrumented
+fun:__pthread_rwlock_unlock=uninstrumented
+fun:__pthread_rwlock_wrlock=uninstrumented
+fun:__pthread_setspecific=uninstrumented
+fun:__pthread_unregister_cancel=uninstrumented
+fun:__pthread_unregister_cancel_restore=uninstrumented
+fun:__pthread_unwind=uninstrumented
+fun:__pthread_unwind_next=uninstrumented
+fun:__ptsname_r_chk=uninstrumented
+fun:__putlong=uninstrumented
+fun:__putshort=uninstrumented
+fun:__pwrite64=uninstrumented
+fun:__rawmemchr=uninstrumented
+fun:__read=uninstrumented
+fun:__read_chk=uninstrumented
+fun:__readlink_chk=uninstrumented
+fun:__readlinkat_chk=uninstrumented
+fun:__realpath_chk=uninstrumented
+fun:__recv_chk=uninstrumented
+fun:__recvfrom_chk=uninstrumented
+fun:__register_atfork=uninstrumented
+fun:__register_frame=uninstrumented
+fun:__register_frame_info=uninstrumented
+fun:__register_frame_info_bases=uninstrumented
+fun:__register_frame_info_table=uninstrumented
+fun:__register_frame_info_table_bases=uninstrumented
+fun:__register_frame_table=uninstrumented
+fun:__remainder_finite=uninstrumented
+fun:__remainderf_finite=uninstrumented
+fun:__remainderl_finite=uninstrumented
+fun:__res_close=uninstrumented
+fun:__res_dnok=uninstrumented
+fun:__res_hnok=uninstrumented
+fun:__res_hostalias=uninstrumented
+fun:__res_iclose=uninstrumented
+fun:__res_init=uninstrumented
+fun:__res_isourserver=uninstrumented
+fun:__res_mailok=uninstrumented
+fun:__res_maybe_init=uninstrumented
+fun:__res_mkquery=uninstrumented
+fun:__res_nameinquery=uninstrumented
+fun:__res_nclose=uninstrumented
+fun:__res_ninit=uninstrumented
+fun:__res_nmkquery=uninstrumented
+fun:__res_nquery=uninstrumented
+fun:__res_nquerydomain=uninstrumented
+fun:__res_nsearch=uninstrumented
+fun:__res_nsend=uninstrumented
+fun:__res_ownok=uninstrumented
+fun:__res_queriesmatch=uninstrumented
+fun:__res_query=uninstrumented
+fun:__res_querydomain=uninstrumented
+fun:__res_randomid=uninstrumented
+fun:__res_search=uninstrumented
+fun:__res_send=uninstrumented
+fun:__res_state=uninstrumented
+fun:__rpc_thread_createerr=uninstrumented
+fun:__rpc_thread_svc_fdset=uninstrumented
+fun:__rpc_thread_svc_max_pollfd=uninstrumented
+fun:__rpc_thread_svc_pollfd=uninstrumented
+fun:__sbrk=uninstrumented
+fun:__scalb_finite=uninstrumented
+fun:__scalbf_finite=uninstrumented
+fun:__scalbl_finite=uninstrumented
+fun:__sched_cpualloc=uninstrumented
+fun:__sched_cpucount=uninstrumented
+fun:__sched_cpufree=uninstrumented
+fun:__sched_get_priority_max=uninstrumented
+fun:__sched_get_priority_min=uninstrumented
+fun:__sched_getparam=uninstrumented
+fun:__sched_getscheduler=uninstrumented
+fun:__sched_setscheduler=uninstrumented
+fun:__sched_yield=uninstrumented
+fun:__secure_getenv=uninstrumented
+fun:__select=uninstrumented
+fun:__send=uninstrumented
+fun:__sendmmsg=uninstrumented
+fun:__setmntent=uninstrumented
+fun:__setpgid=uninstrumented
+fun:__sfp_handle_exceptions=uninstrumented
+fun:__sigaction=uninstrumented
+fun:__sigaddset=uninstrumented
+fun:__sigdelset=uninstrumented
+fun:__sigismember=uninstrumented
+fun:__signbit=uninstrumented
+fun:__signbitf=uninstrumented
+fun:__signbitl=uninstrumented
+fun:__sigpause=uninstrumented
+fun:__sigsetjmp=uninstrumented
+fun:__sigsuspend=uninstrumented
+fun:__sinh_finite=uninstrumented
+fun:__sinhf_finite=uninstrumented
+fun:__sinhl_finite=uninstrumented
+fun:__snprintf_chk=uninstrumented
+fun:__splitstack_block_signals=uninstrumented
+fun:__splitstack_block_signals_context=uninstrumented
+fun:__splitstack_find=uninstrumented
+fun:__splitstack_find_context=uninstrumented
+fun:__splitstack_getcontext=uninstrumented
+fun:__splitstack_makecontext=uninstrumented
+fun:__splitstack_releasecontext=uninstrumented
+fun:__splitstack_resetcontext=uninstrumented
+fun:__splitstack_setcontext=uninstrumented
+fun:__sprintf_chk=uninstrumented
+fun:__sqrt_finite=uninstrumented
+fun:__sqrtf_finite=uninstrumented
+fun:__sqrtl_finite=uninstrumented
+fun:__stack_chk_fail=uninstrumented
+fun:__stack_chk_fail_local=uninstrumented
+fun:__stack_split_initialize=uninstrumented
+fun:__stat=uninstrumented
+fun:__statfs=uninstrumented
+fun:__stpcpy=uninstrumented
+fun:__stpcpy_chk=uninstrumented
+fun:__stpcpy_small=uninstrumented
+fun:__stpncpy=uninstrumented
+fun:__stpncpy_chk=uninstrumented
+fun:__strcasecmp=uninstrumented
+fun:__strcasecmp_l=uninstrumented
+fun:__strcasestr=uninstrumented
+fun:__strcat_chk=uninstrumented
+fun:__strcoll_l=uninstrumented
+fun:__strcpy_chk=uninstrumented
+fun:__strcpy_small=uninstrumented
+fun:__strcspn_c1=uninstrumented
+fun:__strcspn_c2=uninstrumented
+fun:__strcspn_c3=uninstrumented
+fun:__strdup=uninstrumented
+fun:__strerror_r=uninstrumented
+fun:__strfmon_l=uninstrumented
+fun:__strftime_l=uninstrumented
+fun:__strncasecmp_l=uninstrumented
+fun:__strncat_chk=uninstrumented
+fun:__strncpy_chk=uninstrumented
+fun:__strndup=uninstrumented
+fun:__strpbrk_c2=uninstrumented
+fun:__strpbrk_c3=uninstrumented
+fun:__strsep_1c=uninstrumented
+fun:__strsep_2c=uninstrumented
+fun:__strsep_3c=uninstrumented
+fun:__strsep_g=uninstrumented
+fun:__strspn_c1=uninstrumented
+fun:__strspn_c2=uninstrumented
+fun:__strspn_c3=uninstrumented
+fun:__strtod_internal=uninstrumented
+fun:__strtod_l=uninstrumented
+fun:__strtof_internal=uninstrumented
+fun:__strtof_l=uninstrumented
+fun:__strtok_r=uninstrumented
+fun:__strtok_r_1c=uninstrumented
+fun:__strtol_internal=uninstrumented
+fun:__strtol_l=uninstrumented
+fun:__strtold_internal=uninstrumented
+fun:__strtold_l=uninstrumented
+fun:__strtoll_internal=uninstrumented
+fun:__strtoll_l=uninstrumented
+fun:__strtoul_internal=uninstrumented
+fun:__strtoul_l=uninstrumented
+fun:__strtoull_internal=uninstrumented
+fun:__strtoull_l=uninstrumented
+fun:__strverscmp=uninstrumented
+fun:__strxfrm_l=uninstrumented
+fun:__subtf3=uninstrumented
+fun:__subvdi3=uninstrumented
+fun:__subvsi3=uninstrumented
+fun:__subvti3=uninstrumented
+fun:__swprintf_chk=uninstrumented
+fun:__sym_ntop=uninstrumented
+fun:__sym_ntos=uninstrumented
+fun:__sym_ston=uninstrumented
+fun:__sysconf=uninstrumented
+fun:__sysctl=uninstrumented
+fun:__syslog_chk=uninstrumented
+fun:__sysv_signal=uninstrumented
+fun:__tls_get_addr=uninstrumented
+fun:__toascii_l=uninstrumented
+fun:__tolower_l=uninstrumented
+fun:__toupper_l=uninstrumented
+fun:__towctrans=uninstrumented
+fun:__towctrans_l=uninstrumented
+fun:__towlower_l=uninstrumented
+fun:__towupper_l=uninstrumented
+fun:__trunctfdf2=uninstrumented
+fun:__trunctfsf2=uninstrumented
+fun:__trunctfxf2=uninstrumented
+fun:__ttyname_r_chk=uninstrumented
+fun:__ucmpti2=uninstrumented
+fun:__udiv_w_sdiv=uninstrumented
+fun:__udivmodti4=uninstrumented
+fun:__udivti3=uninstrumented
+fun:__uflow=uninstrumented
+fun:__umodti3=uninstrumented
+fun:__underflow=uninstrumented
+fun:__unordtf2=uninstrumented
+fun:__uselocale=uninstrumented
+fun:__vasprintf_chk=uninstrumented
+fun:__vdprintf_chk=uninstrumented
+fun:__vfork=uninstrumented
+fun:__vfprintf_chk=uninstrumented
+fun:__vfscanf=uninstrumented
+fun:__vfwprintf_chk=uninstrumented
+fun:__vprintf_chk=uninstrumented
+fun:__vsnprintf=uninstrumented
+fun:__vsnprintf_chk=uninstrumented
+fun:__vsprintf_chk=uninstrumented
+fun:__vsscanf=uninstrumented
+fun:__vswprintf_chk=uninstrumented
+fun:__vsyslog_chk=uninstrumented
+fun:__vwprintf_chk=uninstrumented
+fun:__wait=uninstrumented
+fun:__waitpid=uninstrumented
+fun:__warn_memset_zero_len=uninstrumented
+fun:__wcpcpy_chk=uninstrumented
+fun:__wcpncpy_chk=uninstrumented
+fun:__wcrtomb_chk=uninstrumented
+fun:__wcscasecmp_l=uninstrumented
+fun:__wcscat_chk=uninstrumented
+fun:__wcscoll_l=uninstrumented
+fun:__wcscpy_chk=uninstrumented
+fun:__wcsftime_l=uninstrumented
+fun:__wcsncasecmp_l=uninstrumented
+fun:__wcsncat_chk=uninstrumented
+fun:__wcsncpy_chk=uninstrumented
+fun:__wcsnrtombs_chk=uninstrumented
+fun:__wcsrtombs_chk=uninstrumented
+fun:__wcstod_internal=uninstrumented
+fun:__wcstod_l=uninstrumented
+fun:__wcstof_internal=uninstrumented
+fun:__wcstof_l=uninstrumented
+fun:__wcstol_internal=uninstrumented
+fun:__wcstol_l=uninstrumented
+fun:__wcstold_internal=uninstrumented
+fun:__wcstold_l=uninstrumented
+fun:__wcstoll_internal=uninstrumented
+fun:__wcstoll_l=uninstrumented
+fun:__wcstombs_chk=uninstrumented
+fun:__wcstoul_internal=uninstrumented
+fun:__wcstoul_l=uninstrumented
+fun:__wcstoull_internal=uninstrumented
+fun:__wcstoull_l=uninstrumented
+fun:__wcsxfrm_l=uninstrumented
+fun:__wctomb_chk=uninstrumented
+fun:__wctrans_l=uninstrumented
+fun:__wctype_l=uninstrumented
+fun:__wmemcpy_chk=uninstrumented
+fun:__wmemmove_chk=uninstrumented
+fun:__wmempcpy_chk=uninstrumented
+fun:__wmemset_chk=uninstrumented
+fun:__woverflow=uninstrumented
+fun:__wprintf_chk=uninstrumented
+fun:__wrap_pthread_create=uninstrumented
+fun:__write=uninstrumented
+fun:__wuflow=uninstrumented
+fun:__wunderflow=uninstrumented
+fun:__xmknod=uninstrumented
+fun:__xmknodat=uninstrumented
+fun:__xpg_basename=uninstrumented
+fun:__xpg_sigpause=uninstrumented
+fun:__xpg_strerror_r=uninstrumented
+fun:__xstat=uninstrumented
+fun:__xstat64=uninstrumented
+fun:__y0_finite=uninstrumented
+fun:__y0f_finite=uninstrumented
+fun:__y0l_finite=uninstrumented
+fun:__y1_finite=uninstrumented
+fun:__y1f_finite=uninstrumented
+fun:__y1l_finite=uninstrumented
+fun:__yn_finite=uninstrumented
+fun:__ynf_finite=uninstrumented
+fun:__ynl_finite=uninstrumented
+fun:__yp_check=uninstrumented
+fun:_authenticate=uninstrumented
+fun:_dl_addr=uninstrumented
+fun:_dl_allocate_tls=uninstrumented
+fun:_dl_allocate_tls_init=uninstrumented
+fun:_dl_deallocate_tls=uninstrumented
+fun:_dl_debug_state=uninstrumented
+fun:_dl_find_dso_for_object=uninstrumented
+fun:_dl_get_tls_static_info=uninstrumented
+fun:_dl_make_stack_executable=uninstrumented
+fun:_dl_mcount=uninstrumented
+fun:_dl_mcount_wrapper=uninstrumented
+fun:_dl_mcount_wrapper_check=uninstrumented
+fun:_dl_rtld_di_serinfo=uninstrumented
+fun:_dl_sym=uninstrumented
+fun:_dl_tls_setup=uninstrumented
+fun:_dl_vsym=uninstrumented
+fun:_exit=uninstrumented
+fun:_flushlbf=uninstrumented
+fun:_gethtbyaddr=uninstrumented
+fun:_gethtbyname=uninstrumented
+fun:_gethtbyname2=uninstrumented
+fun:_gethtent=uninstrumented
+fun:_getlong=uninstrumented
+fun:_getshort=uninstrumented
+fun:_longjmp=uninstrumented
+fun:_mcleanup=uninstrumented
+fun:_mcount=uninstrumented
+fun:_nsl_default_nss=uninstrumented
+fun:_nss_files_parse_grent=uninstrumented
+fun:_nss_files_parse_pwent=uninstrumented
+fun:_nss_files_parse_sgent=uninstrumented
+fun:_nss_files_parse_spent=uninstrumented
+fun:_obstack_allocated_p=uninstrumented
+fun:_obstack_begin=uninstrumented
+fun:_obstack_begin_1=uninstrumented
+fun:_obstack_free=uninstrumented
+fun:_obstack_memory_used=uninstrumented
+fun:_obstack_newchunk=uninstrumented
+fun:_pthread_cleanup_pop=uninstrumented
+fun:_pthread_cleanup_pop_restore=uninstrumented
+fun:_pthread_cleanup_push=uninstrumented
+fun:_pthread_cleanup_push_defer=uninstrumented
+fun:_rpc_dtablesize=uninstrumented
+fun:_seterr_reply=uninstrumented
+fun:_sethtent=uninstrumented
+fun:_setjmp=uninstrumented
+fun:_tolower=uninstrumented
+fun:_toupper=uninstrumented
+fun:_xdr_ib_request=uninstrumented
+fun:_xdr_nis_result=uninstrumented
+fun:a64l=uninstrumented
+fun:abort=uninstrumented
+fun:abs=uninstrumented
+fun:accept=uninstrumented
+fun:accept4=uninstrumented
+fun:access=uninstrumented
+fun:acct=uninstrumented
+fun:acos=uninstrumented
+fun:acosf=uninstrumented
+fun:acosh=uninstrumented
+fun:acoshf=uninstrumented
+fun:acoshl=uninstrumented
+fun:acosl=uninstrumented
+fun:addmntent=uninstrumented
+fun:addseverity=uninstrumented
+fun:adjtime=uninstrumented
+fun:adjtimex=uninstrumented
+fun:advance=uninstrumented
+fun:aio_cancel=uninstrumented
+fun:aio_cancel64=uninstrumented
+fun:aio_error=uninstrumented
+fun:aio_error64=uninstrumented
+fun:aio_fsync=uninstrumented
+fun:aio_fsync64=uninstrumented
+fun:aio_init=uninstrumented
+fun:aio_read=uninstrumented
+fun:aio_read64=uninstrumented
+fun:aio_return=uninstrumented
+fun:aio_return64=uninstrumented
+fun:aio_suspend=uninstrumented
+fun:aio_suspend64=uninstrumented
+fun:aio_write=uninstrumented
+fun:aio_write64=uninstrumented
+fun:alarm=uninstrumented
+fun:aligned_alloc=uninstrumented
+fun:alphasort=uninstrumented
+fun:alphasort64=uninstrumented
+fun:arch_prctl=uninstrumented
+fun:argp_error=uninstrumented
+fun:argp_failure=uninstrumented
+fun:argp_help=uninstrumented
+fun:argp_parse=uninstrumented
+fun:argp_state_help=uninstrumented
+fun:argp_usage=uninstrumented
+fun:argz_add=uninstrumented
+fun:argz_add_sep=uninstrumented
+fun:argz_append=uninstrumented
+fun:argz_count=uninstrumented
+fun:argz_create=uninstrumented
+fun:argz_create_sep=uninstrumented
+fun:argz_delete=uninstrumented
+fun:argz_extract=uninstrumented
+fun:argz_insert=uninstrumented
+fun:argz_next=uninstrumented
+fun:argz_replace=uninstrumented
+fun:argz_stringify=uninstrumented
+fun:asctime=uninstrumented
+fun:asctime_r=uninstrumented
+fun:asin=uninstrumented
+fun:asinf=uninstrumented
+fun:asinh=uninstrumented
+fun:asinhf=uninstrumented
+fun:asinhl=uninstrumented
+fun:asinl=uninstrumented
+fun:asprintf=uninstrumented
+fun:at_quick_exit=uninstrumented
+fun:atan=uninstrumented
+fun:atan2=uninstrumented
+fun:atan2f=uninstrumented
+fun:atan2l=uninstrumented
+fun:atanf=uninstrumented
+fun:atanh=uninstrumented
+fun:atanhf=uninstrumented
+fun:atanhl=uninstrumented
+fun:atanl=uninstrumented
+fun:atexit=uninstrumented
+fun:atof=uninstrumented
+fun:atoi=uninstrumented
+fun:atol=uninstrumented
+fun:atoll=uninstrumented
+fun:authdes_create=uninstrumented
+fun:authdes_getucred=uninstrumented
+fun:authdes_pk_create=uninstrumented
+fun:authnone_create=uninstrumented
+fun:authunix_create=uninstrumented
+fun:authunix_create_default=uninstrumented
+fun:backtrace=uninstrumented
+fun:backtrace_symbols=uninstrumented
+fun:backtrace_symbols_fd=uninstrumented
+fun:basename=uninstrumented
+fun:bcmp=uninstrumented
+fun:bcopy=uninstrumented
+fun:bdflush=uninstrumented
+fun:bind=uninstrumented
+fun:bind_textdomain_codeset=uninstrumented
+fun:bindresvport=uninstrumented
+fun:bindtextdomain=uninstrumented
+fun:brk=uninstrumented
+fun:bsd_signal=uninstrumented
+fun:bsearch=uninstrumented
+fun:btowc=uninstrumented
+fun:bzero=uninstrumented
+fun:c16rtomb=uninstrumented
+fun:c32rtomb=uninstrumented
+fun:cabs=uninstrumented
+fun:cabsf=uninstrumented
+fun:cabsl=uninstrumented
+fun:cacos=uninstrumented
+fun:cacosf=uninstrumented
+fun:cacosh=uninstrumented
+fun:cacoshf=uninstrumented
+fun:cacoshl=uninstrumented
+fun:cacosl=uninstrumented
+fun:calloc=uninstrumented
+fun:callrpc=uninstrumented
+fun:canonicalize_file_name=uninstrumented
+fun:capget=uninstrumented
+fun:capset=uninstrumented
+fun:carg=uninstrumented
+fun:cargf=uninstrumented
+fun:cargl=uninstrumented
+fun:casin=uninstrumented
+fun:casinf=uninstrumented
+fun:casinh=uninstrumented
+fun:casinhf=uninstrumented
+fun:casinhl=uninstrumented
+fun:casinl=uninstrumented
+fun:catan=uninstrumented
+fun:catanf=uninstrumented
+fun:catanh=uninstrumented
+fun:catanhf=uninstrumented
+fun:catanhl=uninstrumented
+fun:catanl=uninstrumented
+fun:catclose=uninstrumented
+fun:catgets=uninstrumented
+fun:catopen=uninstrumented
+fun:cbc_crypt=uninstrumented
+fun:cbrt=uninstrumented
+fun:cbrtf=uninstrumented
+fun:cbrtl=uninstrumented
+fun:ccos=uninstrumented
+fun:ccosf=uninstrumented
+fun:ccosh=uninstrumented
+fun:ccoshf=uninstrumented
+fun:ccoshl=uninstrumented
+fun:ccosl=uninstrumented
+fun:ceil=uninstrumented
+fun:ceilf=uninstrumented
+fun:ceill=uninstrumented
+fun:cexp=uninstrumented
+fun:cexpf=uninstrumented
+fun:cexpl=uninstrumented
+fun:cfgetispeed=uninstrumented
+fun:cfgetospeed=uninstrumented
+fun:cfmakeraw=uninstrumented
+fun:cfree=uninstrumented
+fun:cfsetispeed=uninstrumented
+fun:cfsetospeed=uninstrumented
+fun:cfsetspeed=uninstrumented
+fun:chdir=uninstrumented
+fun:chflags=uninstrumented
+fun:chmod=uninstrumented
+fun:chown=uninstrumented
+fun:chroot=uninstrumented
+fun:cimag=uninstrumented
+fun:cimagf=uninstrumented
+fun:cimagl=uninstrumented
+fun:clearenv=uninstrumented
+fun:clearerr=uninstrumented
+fun:clearerr_unlocked=uninstrumented
+fun:clnt_broadcast=uninstrumented
+fun:clnt_create=uninstrumented
+fun:clnt_pcreateerror=uninstrumented
+fun:clnt_perrno=uninstrumented
+fun:clnt_perror=uninstrumented
+fun:clnt_spcreateerror=uninstrumented
+fun:clnt_sperrno=uninstrumented
+fun:clnt_sperror=uninstrumented
+fun:clntraw_create=uninstrumented
+fun:clnttcp_create=uninstrumented
+fun:clntudp_bufcreate=uninstrumented
+fun:clntudp_create=uninstrumented
+fun:clntunix_create=uninstrumented
+fun:clock=uninstrumented
+fun:clock_adjtime=uninstrumented
+fun:clock_getcpuclockid=uninstrumented
+fun:clock_getres=uninstrumented
+fun:clock_gettime=uninstrumented
+fun:clock_nanosleep=uninstrumented
+fun:clock_settime=uninstrumented
+fun:clog=uninstrumented
+fun:clog10=uninstrumented
+fun:clog10f=uninstrumented
+fun:clog10l=uninstrumented
+fun:clogf=uninstrumented
+fun:clogl=uninstrumented
+fun:clone=uninstrumented
+fun:close=uninstrumented
+fun:closedir=uninstrumented
+fun:closelog=uninstrumented
+fun:confstr=uninstrumented
+fun:conj=uninstrumented
+fun:conjf=uninstrumented
+fun:conjl=uninstrumented
+fun:connect=uninstrumented
+fun:copysign=uninstrumented
+fun:copysignf=uninstrumented
+fun:copysignl=uninstrumented
+fun:cos=uninstrumented
+fun:cosf=uninstrumented
+fun:cosh=uninstrumented
+fun:coshf=uninstrumented
+fun:coshl=uninstrumented
+fun:cosl=uninstrumented
+fun:cpow=uninstrumented
+fun:cpowf=uninstrumented
+fun:cpowl=uninstrumented
+fun:cproj=uninstrumented
+fun:cprojf=uninstrumented
+fun:cprojl=uninstrumented
+fun:creal=uninstrumented
+fun:crealf=uninstrumented
+fun:creall=uninstrumented
+fun:creat=uninstrumented
+fun:creat64=uninstrumented
+fun:create_module=uninstrumented
+fun:crypt=uninstrumented
+fun:crypt_r=uninstrumented
+fun:csin=uninstrumented
+fun:csinf=uninstrumented
+fun:csinh=uninstrumented
+fun:csinhf=uninstrumented
+fun:csinhl=uninstrumented
+fun:csinl=uninstrumented
+fun:csqrt=uninstrumented
+fun:csqrtf=uninstrumented
+fun:csqrtl=uninstrumented
+fun:ctan=uninstrumented
+fun:ctanf=uninstrumented
+fun:ctanh=uninstrumented
+fun:ctanhf=uninstrumented
+fun:ctanhl=uninstrumented
+fun:ctanl=uninstrumented
+fun:ctermid=uninstrumented
+fun:ctime=uninstrumented
+fun:ctime_r=uninstrumented
+fun:cuserid=uninstrumented
+fun:daemon=uninstrumented
+fun:dcgettext=uninstrumented
+fun:dcngettext=uninstrumented
+fun:delete_module=uninstrumented
+fun:des_setparity=uninstrumented
+fun:dgettext=uninstrumented
+fun:difftime=uninstrumented
+fun:dirfd=uninstrumented
+fun:dirname=uninstrumented
+fun:div=uninstrumented
+fun:dl_iterate_phdr=uninstrumented
+fun:dladdr=uninstrumented
+fun:dladdr1=uninstrumented
+fun:dlclose=uninstrumented
+fun:dlerror=uninstrumented
+fun:dlinfo=uninstrumented
+fun:dlmopen=uninstrumented
+fun:dlopen=uninstrumented
+fun:dlsym=uninstrumented
+fun:dlvsym=uninstrumented
+fun:dngettext=uninstrumented
+fun:dprintf=uninstrumented
+fun:drand48=uninstrumented
+fun:drand48_r=uninstrumented
+fun:drem=uninstrumented
+fun:dremf=uninstrumented
+fun:dreml=uninstrumented
+fun:dup=uninstrumented
+fun:dup2=uninstrumented
+fun:dup3=uninstrumented
+fun:duplocale=uninstrumented
+fun:dysize=uninstrumented
+fun:eaccess=uninstrumented
+fun:ecb_crypt=uninstrumented
+fun:ecvt=uninstrumented
+fun:ecvt_r=uninstrumented
+fun:encrypt=uninstrumented
+fun:encrypt_r=uninstrumented
+fun:endaliasent=uninstrumented
+fun:endfsent=uninstrumented
+fun:endgrent=uninstrumented
+fun:endhostent=uninstrumented
+fun:endmntent=uninstrumented
+fun:endnetent=uninstrumented
+fun:endnetgrent=uninstrumented
+fun:endprotoent=uninstrumented
+fun:endpwent=uninstrumented
+fun:endrpcent=uninstrumented
+fun:endservent=uninstrumented
+fun:endsgent=uninstrumented
+fun:endspent=uninstrumented
+fun:endttyent=uninstrumented
+fun:endusershell=uninstrumented
+fun:endutent=uninstrumented
+fun:endutxent=uninstrumented
+fun:envz_add=uninstrumented
+fun:envz_entry=uninstrumented
+fun:envz_get=uninstrumented
+fun:envz_merge=uninstrumented
+fun:envz_remove=uninstrumented
+fun:envz_strip=uninstrumented
+fun:epoll_create=uninstrumented
+fun:epoll_create1=uninstrumented
+fun:epoll_ctl=uninstrumented
+fun:epoll_pwait=uninstrumented
+fun:epoll_wait=uninstrumented
+fun:erand48=uninstrumented
+fun:erand48_r=uninstrumented
+fun:erf=uninstrumented
+fun:erfc=uninstrumented
+fun:erfcf=uninstrumented
+fun:erfcl=uninstrumented
+fun:erff=uninstrumented
+fun:erfl=uninstrumented
+fun:err=uninstrumented
+fun:error=uninstrumented
+fun:error_at_line=uninstrumented
+fun:errx=uninstrumented
+fun:ether_aton=uninstrumented
+fun:ether_aton_r=uninstrumented
+fun:ether_hostton=uninstrumented
+fun:ether_line=uninstrumented
+fun:ether_ntoa=uninstrumented
+fun:ether_ntoa_r=uninstrumented
+fun:ether_ntohost=uninstrumented
+fun:euidaccess=uninstrumented
+fun:eventfd=uninstrumented
+fun:eventfd_read=uninstrumented
+fun:eventfd_write=uninstrumented
+fun:execl=uninstrumented
+fun:execle=uninstrumented
+fun:execlp=uninstrumented
+fun:execv=uninstrumented
+fun:execve=uninstrumented
+fun:execvp=uninstrumented
+fun:execvpe=uninstrumented
+fun:exit=uninstrumented
+fun:exp=uninstrumented
+fun:exp10=uninstrumented
+fun:exp10f=uninstrumented
+fun:exp10l=uninstrumented
+fun:exp2=uninstrumented
+fun:exp2f=uninstrumented
+fun:exp2l=uninstrumented
+fun:expf=uninstrumented
+fun:expl=uninstrumented
+fun:expm1=uninstrumented
+fun:expm1f=uninstrumented
+fun:expm1l=uninstrumented
+fun:fabs=uninstrumented
+fun:fabsf=uninstrumented
+fun:fabsl=uninstrumented
+fun:faccessat=uninstrumented
+fun:fallocate=uninstrumented
+fun:fallocate64=uninstrumented
+fun:fanotify_init=uninstrumented
+fun:fanotify_mark=uninstrumented
+fun:fattach=uninstrumented
+fun:fchdir=uninstrumented
+fun:fchflags=uninstrumented
+fun:fchmod=uninstrumented
+fun:fchmodat=uninstrumented
+fun:fchown=uninstrumented
+fun:fchownat=uninstrumented
+fun:fclose=uninstrumented
+fun:fcloseall=uninstrumented
+fun:fcntl=uninstrumented
+fun:fcrypt=uninstrumented
+fun:fcvt=uninstrumented
+fun:fcvt_r=uninstrumented
+fun:fdatasync=uninstrumented
+fun:fdetach=uninstrumented
+fun:fdim=uninstrumented
+fun:fdimf=uninstrumented
+fun:fdiml=uninstrumented
+fun:fdopen=uninstrumented
+fun:fdopendir=uninstrumented
+fun:feclearexcept=uninstrumented
+fun:fedisableexcept=uninstrumented
+fun:feenableexcept=uninstrumented
+fun:fegetenv=uninstrumented
+fun:fegetexcept=uninstrumented
+fun:fegetexceptflag=uninstrumented
+fun:fegetround=uninstrumented
+fun:feholdexcept=uninstrumented
+fun:feof=uninstrumented
+fun:feof_unlocked=uninstrumented
+fun:feraiseexcept=uninstrumented
+fun:ferror=uninstrumented
+fun:ferror_unlocked=uninstrumented
+fun:fesetenv=uninstrumented
+fun:fesetexceptflag=uninstrumented
+fun:fesetround=uninstrumented
+fun:fetestexcept=uninstrumented
+fun:feupdateenv=uninstrumented
+fun:fexecve=uninstrumented
+fun:fflush=uninstrumented
+fun:fflush_unlocked=uninstrumented
+fun:ffs=uninstrumented
+fun:ffsl=uninstrumented
+fun:ffsll=uninstrumented
+fun:fgetc=uninstrumented
+fun:fgetc_unlocked=uninstrumented
+fun:fgetgrent=uninstrumented
+fun:fgetgrent_r=uninstrumented
+fun:fgetpos=uninstrumented
+fun:fgetpos64=uninstrumented
+fun:fgetpwent=uninstrumented
+fun:fgetpwent_r=uninstrumented
+fun:fgets=uninstrumented
+fun:fgets_unlocked=uninstrumented
+fun:fgetsgent=uninstrumented
+fun:fgetsgent_r=uninstrumented
+fun:fgetspent=uninstrumented
+fun:fgetspent_r=uninstrumented
+fun:fgetwc=uninstrumented
+fun:fgetwc_unlocked=uninstrumented
+fun:fgetws=uninstrumented
+fun:fgetws_unlocked=uninstrumented
+fun:fgetxattr=uninstrumented
+fun:fileno=uninstrumented
+fun:fileno_unlocked=uninstrumented
+fun:finite=uninstrumented
+fun:finitef=uninstrumented
+fun:finitel=uninstrumented
+fun:flistxattr=uninstrumented
+fun:flock=uninstrumented
+fun:flockfile=uninstrumented
+fun:floor=uninstrumented
+fun:floorf=uninstrumented
+fun:floorl=uninstrumented
+fun:fma=uninstrumented
+fun:fmaf=uninstrumented
+fun:fmal=uninstrumented
+fun:fmax=uninstrumented
+fun:fmaxf=uninstrumented
+fun:fmaxl=uninstrumented
+fun:fmemopen=uninstrumented
+fun:fmin=uninstrumented
+fun:fminf=uninstrumented
+fun:fminl=uninstrumented
+fun:fmod=uninstrumented
+fun:fmodf=uninstrumented
+fun:fmodl=uninstrumented
+fun:fmtmsg=uninstrumented
+fun:fnmatch=uninstrumented
+fun:fopen=uninstrumented
+fun:fopen64=uninstrumented
+fun:fopencookie=uninstrumented
+fun:fork=uninstrumented
+fun:forkpty=uninstrumented
+fun:fpathconf=uninstrumented
+fun:fprintf=uninstrumented
+fun:fputc=uninstrumented
+fun:fputc_unlocked=uninstrumented
+fun:fputs=uninstrumented
+fun:fputs_unlocked=uninstrumented
+fun:fputwc=uninstrumented
+fun:fputwc_unlocked=uninstrumented
+fun:fputws=uninstrumented
+fun:fputws_unlocked=uninstrumented
+fun:fread=uninstrumented
+fun:fread_unlocked=uninstrumented
+fun:free=uninstrumented
+fun:freeaddrinfo=uninstrumented
+fun:freeifaddrs=uninstrumented
+fun:freelocale=uninstrumented
+fun:fremovexattr=uninstrumented
+fun:freopen=uninstrumented
+fun:freopen64=uninstrumented
+fun:frexp=uninstrumented
+fun:frexpf=uninstrumented
+fun:frexpl=uninstrumented
+fun:fscanf=uninstrumented
+fun:fseek=uninstrumented
+fun:fseeko=uninstrumented
+fun:fseeko64=uninstrumented
+fun:fsetpos=uninstrumented
+fun:fsetpos64=uninstrumented
+fun:fsetxattr=uninstrumented
+fun:fstat=uninstrumented
+fun:fstat64=uninstrumented
+fun:fstatat=uninstrumented
+fun:fstatat64=uninstrumented
+fun:fstatfs=uninstrumented
+fun:fstatfs64=uninstrumented
+fun:fstatvfs=uninstrumented
+fun:fstatvfs64=uninstrumented
+fun:fsync=uninstrumented
+fun:ftell=uninstrumented
+fun:ftello=uninstrumented
+fun:ftello64=uninstrumented
+fun:ftime=uninstrumented
+fun:ftok=uninstrumented
+fun:ftruncate=uninstrumented
+fun:ftruncate64=uninstrumented
+fun:ftrylockfile=uninstrumented
+fun:fts_children=uninstrumented
+fun:fts_close=uninstrumented
+fun:fts_open=uninstrumented
+fun:fts_read=uninstrumented
+fun:fts_set=uninstrumented
+fun:ftw=uninstrumented
+fun:ftw64=uninstrumented
+fun:funlockfile=uninstrumented
+fun:futimens=uninstrumented
+fun:futimes=uninstrumented
+fun:futimesat=uninstrumented
+fun:fwide=uninstrumented
+fun:fwprintf=uninstrumented
+fun:fwrite=uninstrumented
+fun:fwrite_unlocked=uninstrumented
+fun:fwscanf=uninstrumented
+fun:gai_cancel=uninstrumented
+fun:gai_error=uninstrumented
+fun:gai_strerror=uninstrumented
+fun:gai_suspend=uninstrumented
+fun:gamma=uninstrumented
+fun:gammaf=uninstrumented
+fun:gammal=uninstrumented
+fun:gcvt=uninstrumented
+fun:get_avphys_pages=uninstrumented
+fun:get_current_dir_name=uninstrumented
+fun:get_kernel_syms=uninstrumented
+fun:get_myaddress=uninstrumented
+fun:get_nprocs=uninstrumented
+fun:get_nprocs_conf=uninstrumented
+fun:get_phys_pages=uninstrumented
+fun:getaddrinfo=uninstrumented
+fun:getaddrinfo_a=uninstrumented
+fun:getaliasbyname=uninstrumented
+fun:getaliasbyname_r=uninstrumented
+fun:getaliasent=uninstrumented
+fun:getaliasent_r=uninstrumented
+fun:getauxval=uninstrumented
+fun:getc=uninstrumented
+fun:getc_unlocked=uninstrumented
+fun:getchar=uninstrumented
+fun:getchar_unlocked=uninstrumented
+fun:getcontext=uninstrumented
+fun:getcwd=uninstrumented
+fun:getdate=uninstrumented
+fun:getdate_r=uninstrumented
+fun:getdelim=uninstrumented
+fun:getdirentries=uninstrumented
+fun:getdirentries64=uninstrumented
+fun:getdomainname=uninstrumented
+fun:getdtablesize=uninstrumented
+fun:getegid=uninstrumented
+fun:getenv=uninstrumented
+fun:geteuid=uninstrumented
+fun:getfsent=uninstrumented
+fun:getfsfile=uninstrumented
+fun:getfsspec=uninstrumented
+fun:getgid=uninstrumented
+fun:getgrent=uninstrumented
+fun:getgrent_r=uninstrumented
+fun:getgrgid=uninstrumented
+fun:getgrgid_r=uninstrumented
+fun:getgrnam=uninstrumented
+fun:getgrnam_r=uninstrumented
+fun:getgrouplist=uninstrumented
+fun:getgroups=uninstrumented
+fun:gethostbyaddr=uninstrumented
+fun:gethostbyaddr_r=uninstrumented
+fun:gethostbyname=uninstrumented
+fun:gethostbyname2=uninstrumented
+fun:gethostbyname2_r=uninstrumented
+fun:gethostbyname_r=uninstrumented
+fun:gethostent=uninstrumented
+fun:gethostent_r=uninstrumented
+fun:gethostid=uninstrumented
+fun:gethostname=uninstrumented
+fun:getifaddrs=uninstrumented
+fun:getipv4sourcefilter=uninstrumented
+fun:getitimer=uninstrumented
+fun:getline=uninstrumented
+fun:getloadavg=uninstrumented
+fun:getlogin=uninstrumented
+fun:getlogin_r=uninstrumented
+fun:getmntent=uninstrumented
+fun:getmntent_r=uninstrumented
+fun:getmsg=uninstrumented
+fun:getnameinfo=uninstrumented
+fun:getnetbyaddr=uninstrumented
+fun:getnetbyaddr_r=uninstrumented
+fun:getnetbyname=uninstrumented
+fun:getnetbyname_r=uninstrumented
+fun:getnetent=uninstrumented
+fun:getnetent_r=uninstrumented
+fun:getnetgrent=uninstrumented
+fun:getnetgrent_r=uninstrumented
+fun:getnetname=uninstrumented
+fun:getopt=uninstrumented
+fun:getopt_long=uninstrumented
+fun:getopt_long_only=uninstrumented
+fun:getpagesize=uninstrumented
+fun:getpass=uninstrumented
+fun:getpeername=uninstrumented
+fun:getpgid=uninstrumented
+fun:getpgrp=uninstrumented
+fun:getpid=uninstrumented
+fun:getpmsg=uninstrumented
+fun:getppid=uninstrumented
+fun:getpriority=uninstrumented
+fun:getprotobyname=uninstrumented
+fun:getprotobyname_r=uninstrumented
+fun:getprotobynumber=uninstrumented
+fun:getprotobynumber_r=uninstrumented
+fun:getprotoent=uninstrumented
+fun:getprotoent_r=uninstrumented
+fun:getpt=uninstrumented
+fun:getpublickey=uninstrumented
+fun:getpw=uninstrumented
+fun:getpwent=uninstrumented
+fun:getpwent_r=uninstrumented
+fun:getpwnam=uninstrumented
+fun:getpwnam_r=uninstrumented
+fun:getpwuid=uninstrumented
+fun:getpwuid_r=uninstrumented
+fun:getresgid=uninstrumented
+fun:getresuid=uninstrumented
+fun:getrlimit=uninstrumented
+fun:getrlimit64=uninstrumented
+fun:getrpcbyname=uninstrumented
+fun:getrpcbyname_r=uninstrumented
+fun:getrpcbynumber=uninstrumented
+fun:getrpcbynumber_r=uninstrumented
+fun:getrpcent=uninstrumented
+fun:getrpcent_r=uninstrumented
+fun:getrpcport=uninstrumented
+fun:getrusage=uninstrumented
+fun:gets=uninstrumented
+fun:getsecretkey=uninstrumented
+fun:getservbyname=uninstrumented
+fun:getservbyname_r=uninstrumented
+fun:getservbyport=uninstrumented
+fun:getservbyport_r=uninstrumented
+fun:getservent=uninstrumented
+fun:getservent_r=uninstrumented
+fun:getsgent=uninstrumented
+fun:getsgent_r=uninstrumented
+fun:getsgnam=uninstrumented
+fun:getsgnam_r=uninstrumented
+fun:getsid=uninstrumented
+fun:getsockname=uninstrumented
+fun:getsockopt=uninstrumented
+fun:getsourcefilter=uninstrumented
+fun:getspent=uninstrumented
+fun:getspent_r=uninstrumented
+fun:getspnam=uninstrumented
+fun:getspnam_r=uninstrumented
+fun:getsubopt=uninstrumented
+fun:gettext=uninstrumented
+fun:gettimeofday=uninstrumented
+fun:getttyent=uninstrumented
+fun:getttynam=uninstrumented
+fun:getuid=uninstrumented
+fun:getusershell=uninstrumented
+fun:getutent=uninstrumented
+fun:getutent_r=uninstrumented
+fun:getutid=uninstrumented
+fun:getutid_r=uninstrumented
+fun:getutline=uninstrumented
+fun:getutline_r=uninstrumented
+fun:getutmp=uninstrumented
+fun:getutmpx=uninstrumented
+fun:getutxent=uninstrumented
+fun:getutxid=uninstrumented
+fun:getutxline=uninstrumented
+fun:getw=uninstrumented
+fun:getwc=uninstrumented
+fun:getwc_unlocked=uninstrumented
+fun:getwchar=uninstrumented
+fun:getwchar_unlocked=uninstrumented
+fun:getwd=uninstrumented
+fun:getxattr=uninstrumented
+fun:glob=uninstrumented
+fun:glob64=uninstrumented
+fun:glob_pattern_p=uninstrumented
+fun:globfree=uninstrumented
+fun:globfree64=uninstrumented
+fun:gmtime=uninstrumented
+fun:gmtime_r=uninstrumented
+fun:gnu_dev_major=uninstrumented
+fun:gnu_dev_makedev=uninstrumented
+fun:gnu_dev_minor=uninstrumented
+fun:gnu_get_libc_release=uninstrumented
+fun:gnu_get_libc_version=uninstrumented
+fun:grantpt=uninstrumented
+fun:group_member=uninstrumented
+fun:gsignal=uninstrumented
+fun:gtty=uninstrumented
+fun:hasmntopt=uninstrumented
+fun:hcreate=uninstrumented
+fun:hcreate_r=uninstrumented
+fun:hdestroy=uninstrumented
+fun:hdestroy_r=uninstrumented
+fun:herror=uninstrumented
+fun:host2netname=uninstrumented
+fun:hsearch=uninstrumented
+fun:hsearch_r=uninstrumented
+fun:hstrerror=uninstrumented
+fun:htonl=uninstrumented
+fun:htons=uninstrumented
+fun:hypot=uninstrumented
+fun:hypotf=uninstrumented
+fun:hypotl=uninstrumented
+fun:iconv=uninstrumented
+fun:iconv_close=uninstrumented
+fun:iconv_open=uninstrumented
+fun:idna_to_ascii_lz=uninstrumented
+fun:idna_to_unicode_lzlz=uninstrumented
+fun:if_freenameindex=uninstrumented
+fun:if_indextoname=uninstrumented
+fun:if_nameindex=uninstrumented
+fun:if_nametoindex=uninstrumented
+fun:ilogb=uninstrumented
+fun:ilogbf=uninstrumented
+fun:ilogbl=uninstrumented
+fun:imaxabs=uninstrumented
+fun:imaxdiv=uninstrumented
+fun:index=uninstrumented
+fun:inet6_opt_append=uninstrumented
+fun:inet6_opt_find=uninstrumented
+fun:inet6_opt_finish=uninstrumented
+fun:inet6_opt_get_val=uninstrumented
+fun:inet6_opt_init=uninstrumented
+fun:inet6_opt_next=uninstrumented
+fun:inet6_opt_set_val=uninstrumented
+fun:inet6_option_alloc=uninstrumented
+fun:inet6_option_append=uninstrumented
+fun:inet6_option_find=uninstrumented
+fun:inet6_option_init=uninstrumented
+fun:inet6_option_next=uninstrumented
+fun:inet6_option_space=uninstrumented
+fun:inet6_rth_add=uninstrumented
+fun:inet6_rth_getaddr=uninstrumented
+fun:inet6_rth_init=uninstrumented
+fun:inet6_rth_reverse=uninstrumented
+fun:inet6_rth_segments=uninstrumented
+fun:inet6_rth_space=uninstrumented
+fun:inet_addr=uninstrumented
+fun:inet_aton=uninstrumented
+fun:inet_lnaof=uninstrumented
+fun:inet_makeaddr=uninstrumented
+fun:inet_net_ntop=uninstrumented
+fun:inet_net_pton=uninstrumented
+fun:inet_neta=uninstrumented
+fun:inet_netof=uninstrumented
+fun:inet_network=uninstrumented
+fun:inet_nsap_addr=uninstrumented
+fun:inet_nsap_ntoa=uninstrumented
+fun:inet_ntoa=uninstrumented
+fun:inet_ntop=uninstrumented
+fun:inet_pton=uninstrumented
+fun:init_module=uninstrumented
+fun:initgroups=uninstrumented
+fun:initstate=uninstrumented
+fun:initstate_r=uninstrumented
+fun:innetgr=uninstrumented
+fun:inotify_add_watch=uninstrumented
+fun:inotify_init=uninstrumented
+fun:inotify_init1=uninstrumented
+fun:inotify_rm_watch=uninstrumented
+fun:insque=uninstrumented
+fun:ioctl=uninstrumented
+fun:ioperm=uninstrumented
+fun:iopl=uninstrumented
+fun:iruserok=uninstrumented
+fun:iruserok_af=uninstrumented
+fun:isalnum=uninstrumented
+fun:isalnum_l=uninstrumented
+fun:isalpha=uninstrumented
+fun:isalpha_l=uninstrumented
+fun:isascii=uninstrumented
+fun:isastream=uninstrumented
+fun:isatty=uninstrumented
+fun:isblank=uninstrumented
+fun:isblank_l=uninstrumented
+fun:iscntrl=uninstrumented
+fun:iscntrl_l=uninstrumented
+fun:isctype=uninstrumented
+fun:isdigit=uninstrumented
+fun:isdigit_l=uninstrumented
+fun:isfdtype=uninstrumented
+fun:isgraph=uninstrumented
+fun:isgraph_l=uninstrumented
+fun:isinf=uninstrumented
+fun:isinfd128=uninstrumented
+fun:isinfd32=uninstrumented
+fun:isinfd64=uninstrumented
+fun:isinff=uninstrumented
+fun:isinfl=uninstrumented
+fun:islower=uninstrumented
+fun:islower_l=uninstrumented
+fun:isnan=uninstrumented
+fun:isnanf=uninstrumented
+fun:isnanl=uninstrumented
+fun:isprint=uninstrumented
+fun:isprint_l=uninstrumented
+fun:ispunct=uninstrumented
+fun:ispunct_l=uninstrumented
+fun:isspace=uninstrumented
+fun:isspace_l=uninstrumented
+fun:isupper=uninstrumented
+fun:isupper_l=uninstrumented
+fun:iswalnum=uninstrumented
+fun:iswalnum_l=uninstrumented
+fun:iswalpha=uninstrumented
+fun:iswalpha_l=uninstrumented
+fun:iswblank=uninstrumented
+fun:iswblank_l=uninstrumented
+fun:iswcntrl=uninstrumented
+fun:iswcntrl_l=uninstrumented
+fun:iswctype=uninstrumented
+fun:iswctype_l=uninstrumented
+fun:iswdigit=uninstrumented
+fun:iswdigit_l=uninstrumented
+fun:iswgraph=uninstrumented
+fun:iswgraph_l=uninstrumented
+fun:iswlower=uninstrumented
+fun:iswlower_l=uninstrumented
+fun:iswprint=uninstrumented
+fun:iswprint_l=uninstrumented
+fun:iswpunct=uninstrumented
+fun:iswpunct_l=uninstrumented
+fun:iswspace=uninstrumented
+fun:iswspace_l=uninstrumented
+fun:iswupper=uninstrumented
+fun:iswupper_l=uninstrumented
+fun:iswxdigit=uninstrumented
+fun:iswxdigit_l=uninstrumented
+fun:isxdigit=uninstrumented
+fun:isxdigit_l=uninstrumented
+fun:j0=uninstrumented
+fun:j0f=uninstrumented
+fun:j0l=uninstrumented
+fun:j1=uninstrumented
+fun:j1f=uninstrumented
+fun:j1l=uninstrumented
+fun:jn=uninstrumented
+fun:jnf=uninstrumented
+fun:jnl=uninstrumented
+fun:jrand48=uninstrumented
+fun:jrand48_r=uninstrumented
+fun:key_decryptsession=uninstrumented
+fun:key_decryptsession_pk=uninstrumented
+fun:key_encryptsession=uninstrumented
+fun:key_encryptsession_pk=uninstrumented
+fun:key_gendes=uninstrumented
+fun:key_get_conv=uninstrumented
+fun:key_secretkey_is_set=uninstrumented
+fun:key_setnet=uninstrumented
+fun:key_setsecret=uninstrumented
+fun:kill=uninstrumented
+fun:killpg=uninstrumented
+fun:klogctl=uninstrumented
+fun:l64a=uninstrumented
+fun:labs=uninstrumented
+fun:lchmod=uninstrumented
+fun:lchown=uninstrumented
+fun:lckpwdf=uninstrumented
+fun:lcong48=uninstrumented
+fun:lcong48_r=uninstrumented
+fun:ldexp=uninstrumented
+fun:ldexpf=uninstrumented
+fun:ldexpl=uninstrumented
+fun:ldiv=uninstrumented
+fun:lfind=uninstrumented
+fun:lgamma=uninstrumented
+fun:lgamma_r=uninstrumented
+fun:lgammaf=uninstrumented
+fun:lgammaf_r=uninstrumented
+fun:lgammal=uninstrumented
+fun:lgammal_r=uninstrumented
+fun:lgetxattr=uninstrumented
+fun:link=uninstrumented
+fun:linkat=uninstrumented
+fun:lio_listio=uninstrumented
+fun:lio_listio64=uninstrumented
+fun:listen=uninstrumented
+fun:listxattr=uninstrumented
+fun:llabs=uninstrumented
+fun:lldiv=uninstrumented
+fun:llistxattr=uninstrumented
+fun:llrint=uninstrumented
+fun:llrintf=uninstrumented
+fun:llrintl=uninstrumented
+fun:llround=uninstrumented
+fun:llroundf=uninstrumented
+fun:llroundl=uninstrumented
+fun:llseek=uninstrumented
+fun:localeconv=uninstrumented
+fun:localtime=uninstrumented
+fun:localtime_r=uninstrumented
+fun:lockf=uninstrumented
+fun:lockf64=uninstrumented
+fun:log=uninstrumented
+fun:log10=uninstrumented
+fun:log10f=uninstrumented
+fun:log10l=uninstrumented
+fun:log1p=uninstrumented
+fun:log1pf=uninstrumented
+fun:log1pl=uninstrumented
+fun:log2=uninstrumented
+fun:log2f=uninstrumented
+fun:log2l=uninstrumented
+fun:logb=uninstrumented
+fun:logbf=uninstrumented
+fun:logbl=uninstrumented
+fun:logf=uninstrumented
+fun:login=uninstrumented
+fun:login_tty=uninstrumented
+fun:logl=uninstrumented
+fun:logout=uninstrumented
+fun:logwtmp=uninstrumented
+fun:longjmp=uninstrumented
+fun:lrand48=uninstrumented
+fun:lrand48_r=uninstrumented
+fun:lremovexattr=uninstrumented
+fun:lrint=uninstrumented
+fun:lrintf=uninstrumented
+fun:lrintl=uninstrumented
+fun:lround=uninstrumented
+fun:lroundf=uninstrumented
+fun:lroundl=uninstrumented
+fun:lsearch=uninstrumented
+fun:lseek=uninstrumented
+fun:lseek64=uninstrumented
+fun:lsetxattr=uninstrumented
+fun:lstat=uninstrumented
+fun:lstat64=uninstrumented
+fun:lutimes=uninstrumented
+fun:madvise=uninstrumented
+fun:makecontext=uninstrumented
+fun:mallinfo=uninstrumented
+fun:malloc=uninstrumented
+fun:malloc_get_state=uninstrumented
+fun:malloc_info=uninstrumented
+fun:malloc_set_state=uninstrumented
+fun:malloc_stats=uninstrumented
+fun:malloc_trim=uninstrumented
+fun:malloc_usable_size=uninstrumented
+fun:mallopt=uninstrumented
+fun:matherr=uninstrumented
+fun:mblen=uninstrumented
+fun:mbrlen=uninstrumented
+fun:mbrtoc16=uninstrumented
+fun:mbrtoc32=uninstrumented
+fun:mbrtowc=uninstrumented
+fun:mbsinit=uninstrumented
+fun:mbsnrtowcs=uninstrumented
+fun:mbsrtowcs=uninstrumented
+fun:mbstowcs=uninstrumented
+fun:mbtowc=uninstrumented
+fun:mcheck=uninstrumented
+fun:mcheck_check_all=uninstrumented
+fun:mcheck_pedantic=uninstrumented
+fun:mcount=uninstrumented
+fun:memalign=uninstrumented
+fun:memccpy=uninstrumented
+fun:memchr=uninstrumented
+fun:memcmp=uninstrumented
+fun:memcpy=uninstrumented
+fun:memfrob=uninstrumented
+fun:memmem=uninstrumented
+fun:memmove=uninstrumented
+fun:mempcpy=uninstrumented
+fun:memrchr=uninstrumented
+fun:memset=uninstrumented
+fun:mincore=uninstrumented
+fun:mkdir=uninstrumented
+fun:mkdirat=uninstrumented
+fun:mkdtemp=uninstrumented
+fun:mkfifo=uninstrumented
+fun:mkfifoat=uninstrumented
+fun:mknod=uninstrumented
+fun:mknodat=uninstrumented
+fun:mkostemp=uninstrumented
+fun:mkostemp64=uninstrumented
+fun:mkostemps=uninstrumented
+fun:mkostemps64=uninstrumented
+fun:mkstemp=uninstrumented
+fun:mkstemp64=uninstrumented
+fun:mkstemps=uninstrumented
+fun:mkstemps64=uninstrumented
+fun:mktemp=uninstrumented
+fun:mktime=uninstrumented
+fun:mlock=uninstrumented
+fun:mlockall=uninstrumented
+fun:mmap=uninstrumented
+fun:mmap64=uninstrumented
+fun:modf=uninstrumented
+fun:modff=uninstrumented
+fun:modfl=uninstrumented
+fun:modify_ldt=uninstrumented
+fun:moncontrol=uninstrumented
+fun:monstartup=uninstrumented
+fun:mount=uninstrumented
+fun:mprobe=uninstrumented
+fun:mprotect=uninstrumented
+fun:mq_close=uninstrumented
+fun:mq_getattr=uninstrumented
+fun:mq_notify=uninstrumented
+fun:mq_open=uninstrumented
+fun:mq_receive=uninstrumented
+fun:mq_send=uninstrumented
+fun:mq_setattr=uninstrumented
+fun:mq_timedreceive=uninstrumented
+fun:mq_timedsend=uninstrumented
+fun:mq_unlink=uninstrumented
+fun:mrand48=uninstrumented
+fun:mrand48_r=uninstrumented
+fun:mremap=uninstrumented
+fun:msgctl=uninstrumented
+fun:msgget=uninstrumented
+fun:msgrcv=uninstrumented
+fun:msgsnd=uninstrumented
+fun:msync=uninstrumented
+fun:mtrace=uninstrumented
+fun:munlock=uninstrumented
+fun:munlockall=uninstrumented
+fun:munmap=uninstrumented
+fun:muntrace=uninstrumented
+fun:name_to_handle_at=uninstrumented
+fun:nan=uninstrumented
+fun:nanf=uninstrumented
+fun:nanl=uninstrumented
+fun:nanosleep=uninstrumented
+fun:nearbyint=uninstrumented
+fun:nearbyintf=uninstrumented
+fun:nearbyintl=uninstrumented
+fun:netname2host=uninstrumented
+fun:netname2user=uninstrumented
+fun:newlocale=uninstrumented
+fun:nextafter=uninstrumented
+fun:nextafterf=uninstrumented
+fun:nextafterl=uninstrumented
+fun:nexttoward=uninstrumented
+fun:nexttowardf=uninstrumented
+fun:nexttowardl=uninstrumented
+fun:nfsservctl=uninstrumented
+fun:nftw=uninstrumented
+fun:nftw64=uninstrumented
+fun:ngettext=uninstrumented
+fun:nice=uninstrumented
+fun:nis_add=uninstrumented
+fun:nis_add_entry=uninstrumented
+fun:nis_addmember=uninstrumented
+fun:nis_checkpoint=uninstrumented
+fun:nis_clone_directory=uninstrumented
+fun:nis_clone_object=uninstrumented
+fun:nis_clone_result=uninstrumented
+fun:nis_creategroup=uninstrumented
+fun:nis_destroy_object=uninstrumented
+fun:nis_destroygroup=uninstrumented
+fun:nis_dir_cmp=uninstrumented
+fun:nis_domain_of=uninstrumented
+fun:nis_domain_of_r=uninstrumented
+fun:nis_first_entry=uninstrumented
+fun:nis_free_directory=uninstrumented
+fun:nis_free_object=uninstrumented
+fun:nis_free_request=uninstrumented
+fun:nis_freenames=uninstrumented
+fun:nis_freeresult=uninstrumented
+fun:nis_freeservlist=uninstrumented
+fun:nis_freetags=uninstrumented
+fun:nis_getnames=uninstrumented
+fun:nis_getservlist=uninstrumented
+fun:nis_ismember=uninstrumented
+fun:nis_leaf_of=uninstrumented
+fun:nis_leaf_of_r=uninstrumented
+fun:nis_lerror=uninstrumented
+fun:nis_list=uninstrumented
+fun:nis_local_directory=uninstrumented
+fun:nis_local_group=uninstrumented
+fun:nis_local_host=uninstrumented
+fun:nis_local_principal=uninstrumented
+fun:nis_lookup=uninstrumented
+fun:nis_mkdir=uninstrumented
+fun:nis_modify=uninstrumented
+fun:nis_modify_entry=uninstrumented
+fun:nis_name_of=uninstrumented
+fun:nis_name_of_r=uninstrumented
+fun:nis_next_entry=uninstrumented
+fun:nis_perror=uninstrumented
+fun:nis_ping=uninstrumented
+fun:nis_print_directory=uninstrumented
+fun:nis_print_entry=uninstrumented
+fun:nis_print_group=uninstrumented
+fun:nis_print_group_entry=uninstrumented
+fun:nis_print_link=uninstrumented
+fun:nis_print_object=uninstrumented
+fun:nis_print_result=uninstrumented
+fun:nis_print_rights=uninstrumented
+fun:nis_print_table=uninstrumented
+fun:nis_read_obj=uninstrumented
+fun:nis_remove=uninstrumented
+fun:nis_remove_entry=uninstrumented
+fun:nis_removemember=uninstrumented
+fun:nis_rmdir=uninstrumented
+fun:nis_servstate=uninstrumented
+fun:nis_sperrno=uninstrumented
+fun:nis_sperror=uninstrumented
+fun:nis_sperror_r=uninstrumented
+fun:nis_stats=uninstrumented
+fun:nis_verifygroup=uninstrumented
+fun:nis_write_obj=uninstrumented
+fun:nl_langinfo=uninstrumented
+fun:nl_langinfo_l=uninstrumented
+fun:nrand48=uninstrumented
+fun:nrand48_r=uninstrumented
+fun:ns_datetosecs=uninstrumented
+fun:ns_format_ttl=uninstrumented
+fun:ns_get16=uninstrumented
+fun:ns_get32=uninstrumented
+fun:ns_initparse=uninstrumented
+fun:ns_makecanon=uninstrumented
+fun:ns_msg_getflag=uninstrumented
+fun:ns_name_compress=uninstrumented
+fun:ns_name_ntol=uninstrumented
+fun:ns_name_ntop=uninstrumented
+fun:ns_name_pack=uninstrumented
+fun:ns_name_pton=uninstrumented
+fun:ns_name_rollback=uninstrumented
+fun:ns_name_skip=uninstrumented
+fun:ns_name_uncompress=uninstrumented
+fun:ns_name_unpack=uninstrumented
+fun:ns_parse_ttl=uninstrumented
+fun:ns_parserr=uninstrumented
+fun:ns_put16=uninstrumented
+fun:ns_put32=uninstrumented
+fun:ns_samedomain=uninstrumented
+fun:ns_samename=uninstrumented
+fun:ns_skiprr=uninstrumented
+fun:ns_sprintrr=uninstrumented
+fun:ns_sprintrrf=uninstrumented
+fun:ns_subdomain=uninstrumented
+fun:ntohl=uninstrumented
+fun:ntohs=uninstrumented
+fun:ntp_adjtime=uninstrumented
+fun:ntp_gettime=uninstrumented
+fun:ntp_gettimex=uninstrumented
+fun:obstack_free=uninstrumented
+fun:obstack_printf=uninstrumented
+fun:obstack_vprintf=uninstrumented
+fun:on_exit=uninstrumented
+fun:open=uninstrumented
+fun:open64=uninstrumented
+fun:open_by_handle_at=uninstrumented
+fun:open_memstream=uninstrumented
+fun:open_wmemstream=uninstrumented
+fun:openat=uninstrumented
+fun:openat64=uninstrumented
+fun:opendir=uninstrumented
+fun:openlog=uninstrumented
+fun:openpty=uninstrumented
+fun:parse_printf_format=uninstrumented
+fun:passwd2des=uninstrumented
+fun:pathconf=uninstrumented
+fun:pause=uninstrumented
+fun:pclose=uninstrumented
+fun:perror=uninstrumented
+fun:personality=uninstrumented
+fun:pipe=uninstrumented
+fun:pipe2=uninstrumented
+fun:pivot_root=uninstrumented
+fun:pmap_getmaps=uninstrumented
+fun:pmap_getport=uninstrumented
+fun:pmap_rmtcall=uninstrumented
+fun:pmap_set=uninstrumented
+fun:pmap_unset=uninstrumented
+fun:poll=uninstrumented
+fun:popen=uninstrumented
+fun:posix_fadvise=uninstrumented
+fun:posix_fadvise64=uninstrumented
+fun:posix_fallocate=uninstrumented
+fun:posix_fallocate64=uninstrumented
+fun:posix_madvise=uninstrumented
+fun:posix_memalign=uninstrumented
+fun:posix_openpt=uninstrumented
+fun:posix_spawn=uninstrumented
+fun:posix_spawn_file_actions_addclose=uninstrumented
+fun:posix_spawn_file_actions_adddup2=uninstrumented
+fun:posix_spawn_file_actions_addopen=uninstrumented
+fun:posix_spawn_file_actions_destroy=uninstrumented
+fun:posix_spawn_file_actions_init=uninstrumented
+fun:posix_spawnattr_destroy=uninstrumented
+fun:posix_spawnattr_getflags=uninstrumented
+fun:posix_spawnattr_getpgroup=uninstrumented
+fun:posix_spawnattr_getschedparam=uninstrumented
+fun:posix_spawnattr_getschedpolicy=uninstrumented
+fun:posix_spawnattr_getsigdefault=uninstrumented
+fun:posix_spawnattr_getsigmask=uninstrumented
+fun:posix_spawnattr_init=uninstrumented
+fun:posix_spawnattr_setflags=uninstrumented
+fun:posix_spawnattr_setpgroup=uninstrumented
+fun:posix_spawnattr_setschedparam=uninstrumented
+fun:posix_spawnattr_setschedpolicy=uninstrumented
+fun:posix_spawnattr_setsigdefault=uninstrumented
+fun:posix_spawnattr_setsigmask=uninstrumented
+fun:posix_spawnp=uninstrumented
+fun:pow=uninstrumented
+fun:pow10=uninstrumented
+fun:pow10f=uninstrumented
+fun:pow10l=uninstrumented
+fun:powf=uninstrumented
+fun:powl=uninstrumented
+fun:ppoll=uninstrumented
+fun:prctl=uninstrumented
+fun:pread=uninstrumented
+fun:pread64=uninstrumented
+fun:preadv=uninstrumented
+fun:preadv64=uninstrumented
+fun:printf=uninstrumented
+fun:printf_size=uninstrumented
+fun:printf_size_info=uninstrumented
+fun:prlimit=uninstrumented
+fun:prlimit64=uninstrumented
+fun:process_vm_readv=uninstrumented
+fun:process_vm_writev=uninstrumented
+fun:profil=uninstrumented
+fun:pselect=uninstrumented
+fun:psiginfo=uninstrumented
+fun:psignal=uninstrumented
+fun:pthread_atfork=uninstrumented
+fun:pthread_attr_destroy=uninstrumented
+fun:pthread_attr_getaffinity_np=uninstrumented
+fun:pthread_attr_getdetachstate=uninstrumented
+fun:pthread_attr_getguardsize=uninstrumented
+fun:pthread_attr_getinheritsched=uninstrumented
+fun:pthread_attr_getschedparam=uninstrumented
+fun:pthread_attr_getschedpolicy=uninstrumented
+fun:pthread_attr_getscope=uninstrumented
+fun:pthread_attr_getstack=uninstrumented
+fun:pthread_attr_getstackaddr=uninstrumented
+fun:pthread_attr_getstacksize=uninstrumented
+fun:pthread_attr_init=uninstrumented
+fun:pthread_attr_setaffinity_np=uninstrumented
+fun:pthread_attr_setdetachstate=uninstrumented
+fun:pthread_attr_setguardsize=uninstrumented
+fun:pthread_attr_setinheritsched=uninstrumented
+fun:pthread_attr_setschedparam=uninstrumented
+fun:pthread_attr_setschedpolicy=uninstrumented
+fun:pthread_attr_setscope=uninstrumented
+fun:pthread_attr_setstack=uninstrumented
+fun:pthread_attr_setstackaddr=uninstrumented
+fun:pthread_attr_setstacksize=uninstrumented
+fun:pthread_barrier_destroy=uninstrumented
+fun:pthread_barrier_init=uninstrumented
+fun:pthread_barrier_wait=uninstrumented
+fun:pthread_barrierattr_destroy=uninstrumented
+fun:pthread_barrierattr_getpshared=uninstrumented
+fun:pthread_barrierattr_init=uninstrumented
+fun:pthread_barrierattr_setpshared=uninstrumented
+fun:pthread_cancel=uninstrumented
+fun:pthread_cond_broadcast=uninstrumented
+fun:pthread_cond_destroy=uninstrumented
+fun:pthread_cond_init=uninstrumented
+fun:pthread_cond_signal=uninstrumented
+fun:pthread_cond_timedwait=uninstrumented
+fun:pthread_cond_wait=uninstrumented
+fun:pthread_condattr_destroy=uninstrumented
+fun:pthread_condattr_getclock=uninstrumented
+fun:pthread_condattr_getpshared=uninstrumented
+fun:pthread_condattr_init=uninstrumented
+fun:pthread_condattr_setclock=uninstrumented
+fun:pthread_condattr_setpshared=uninstrumented
+fun:pthread_create=uninstrumented
+fun:pthread_detach=uninstrumented
+fun:pthread_equal=uninstrumented
+fun:pthread_exit=uninstrumented
+fun:pthread_getaffinity_np=uninstrumented
+fun:pthread_getattr_default_np=uninstrumented
+fun:pthread_getattr_np=uninstrumented
+fun:pthread_getconcurrency=uninstrumented
+fun:pthread_getcpuclockid=uninstrumented
+fun:pthread_getname_np=uninstrumented
+fun:pthread_getschedparam=uninstrumented
+fun:pthread_getspecific=uninstrumented
+fun:pthread_join=uninstrumented
+fun:pthread_key_create=uninstrumented
+fun:pthread_key_delete=uninstrumented
+fun:pthread_kill=uninstrumented
+fun:pthread_kill_other_threads_np=uninstrumented
+fun:pthread_mutex_consistent=uninstrumented
+fun:pthread_mutex_consistent_np=uninstrumented
+fun:pthread_mutex_destroy=uninstrumented
+fun:pthread_mutex_getprioceiling=uninstrumented
+fun:pthread_mutex_init=uninstrumented
+fun:pthread_mutex_lock=uninstrumented
+fun:pthread_mutex_setprioceiling=uninstrumented
+fun:pthread_mutex_timedlock=uninstrumented
+fun:pthread_mutex_trylock=uninstrumented
+fun:pthread_mutex_unlock=uninstrumented
+fun:pthread_mutexattr_destroy=uninstrumented
+fun:pthread_mutexattr_getkind_np=uninstrumented
+fun:pthread_mutexattr_getprioceiling=uninstrumented
+fun:pthread_mutexattr_getprotocol=uninstrumented
+fun:pthread_mutexattr_getpshared=uninstrumented
+fun:pthread_mutexattr_getrobust=uninstrumented
+fun:pthread_mutexattr_getrobust_np=uninstrumented
+fun:pthread_mutexattr_gettype=uninstrumented
+fun:pthread_mutexattr_init=uninstrumented
+fun:pthread_mutexattr_setkind_np=uninstrumented
+fun:pthread_mutexattr_setprioceiling=uninstrumented
+fun:pthread_mutexattr_setprotocol=uninstrumented
+fun:pthread_mutexattr_setpshared=uninstrumented
+fun:pthread_mutexattr_setrobust=uninstrumented
+fun:pthread_mutexattr_setrobust_np=uninstrumented
+fun:pthread_mutexattr_settype=uninstrumented
+fun:pthread_once=uninstrumented
+fun:pthread_rwlock_destroy=uninstrumented
+fun:pthread_rwlock_init=uninstrumented
+fun:pthread_rwlock_rdlock=uninstrumented
+fun:pthread_rwlock_timedrdlock=uninstrumented
+fun:pthread_rwlock_timedwrlock=uninstrumented
+fun:pthread_rwlock_tryrdlock=uninstrumented
+fun:pthread_rwlock_trywrlock=uninstrumented
+fun:pthread_rwlock_unlock=uninstrumented
+fun:pthread_rwlock_wrlock=uninstrumented
+fun:pthread_rwlockattr_destroy=uninstrumented
+fun:pthread_rwlockattr_getkind_np=uninstrumented
+fun:pthread_rwlockattr_getpshared=uninstrumented
+fun:pthread_rwlockattr_init=uninstrumented
+fun:pthread_rwlockattr_setkind_np=uninstrumented
+fun:pthread_rwlockattr_setpshared=uninstrumented
+fun:pthread_self=uninstrumented
+fun:pthread_setaffinity_np=uninstrumented
+fun:pthread_setattr_default_np=uninstrumented
+fun:pthread_setcancelstate=uninstrumented
+fun:pthread_setcanceltype=uninstrumented
+fun:pthread_setconcurrency=uninstrumented
+fun:pthread_setname_np=uninstrumented
+fun:pthread_setschedparam=uninstrumented
+fun:pthread_setschedprio=uninstrumented
+fun:pthread_setspecific=uninstrumented
+fun:pthread_sigmask=uninstrumented
+fun:pthread_sigqueue=uninstrumented
+fun:pthread_spin_destroy=uninstrumented
+fun:pthread_spin_init=uninstrumented
+fun:pthread_spin_lock=uninstrumented
+fun:pthread_spin_trylock=uninstrumented
+fun:pthread_spin_unlock=uninstrumented
+fun:pthread_testcancel=uninstrumented
+fun:pthread_timedjoin_np=uninstrumented
+fun:pthread_tryjoin_np=uninstrumented
+fun:pthread_yield=uninstrumented
+fun:ptrace=uninstrumented
+fun:ptsname=uninstrumented
+fun:ptsname_r=uninstrumented
+fun:putc=uninstrumented
+fun:putc_unlocked=uninstrumented
+fun:putchar=uninstrumented
+fun:putchar_unlocked=uninstrumented
+fun:putenv=uninstrumented
+fun:putgrent=uninstrumented
+fun:putmsg=uninstrumented
+fun:putpmsg=uninstrumented
+fun:putpwent=uninstrumented
+fun:puts=uninstrumented
+fun:putsgent=uninstrumented
+fun:putspent=uninstrumented
+fun:pututline=uninstrumented
+fun:pututxline=uninstrumented
+fun:putw=uninstrumented
+fun:putwc=uninstrumented
+fun:putwc_unlocked=uninstrumented
+fun:putwchar=uninstrumented
+fun:putwchar_unlocked=uninstrumented
+fun:pvalloc=uninstrumented
+fun:pwrite=uninstrumented
+fun:pwrite64=uninstrumented
+fun:pwritev=uninstrumented
+fun:pwritev64=uninstrumented
+fun:qecvt=uninstrumented
+fun:qecvt_r=uninstrumented
+fun:qfcvt=uninstrumented
+fun:qfcvt_r=uninstrumented
+fun:qgcvt=uninstrumented
+fun:qsort=uninstrumented
+fun:qsort_r=uninstrumented
+fun:query_module=uninstrumented
+fun:quick_exit=uninstrumented
+fun:quotactl=uninstrumented
+fun:raise=uninstrumented
+fun:rand=uninstrumented
+fun:rand_r=uninstrumented
+fun:random=uninstrumented
+fun:random_r=uninstrumented
+fun:rawmemchr=uninstrumented
+fun:rcmd=uninstrumented
+fun:rcmd_af=uninstrumented
+fun:re_comp=uninstrumented
+fun:re_compile_fastmap=uninstrumented
+fun:re_compile_pattern=uninstrumented
+fun:re_exec=uninstrumented
+fun:re_match=uninstrumented
+fun:re_match_2=uninstrumented
+fun:re_search=uninstrumented
+fun:re_search_2=uninstrumented
+fun:re_set_registers=uninstrumented
+fun:re_set_syntax=uninstrumented
+fun:read=uninstrumented
+fun:readColdStartFile=uninstrumented
+fun:readahead=uninstrumented
+fun:readdir=uninstrumented
+fun:readdir64=uninstrumented
+fun:readdir64_r=uninstrumented
+fun:readdir_r=uninstrumented
+fun:readlink=uninstrumented
+fun:readlinkat=uninstrumented
+fun:readv=uninstrumented
+fun:realloc=uninstrumented
+fun:realpath=uninstrumented
+fun:reboot=uninstrumented
+fun:recv=uninstrumented
+fun:recvfrom=uninstrumented
+fun:recvmmsg=uninstrumented
+fun:recvmsg=uninstrumented
+fun:regcomp=uninstrumented
+fun:regerror=uninstrumented
+fun:regexec=uninstrumented
+fun:regfree=uninstrumented
+fun:register_printf_function=uninstrumented
+fun:register_printf_modifier=uninstrumented
+fun:register_printf_specifier=uninstrumented
+fun:register_printf_type=uninstrumented
+fun:registerrpc=uninstrumented
+fun:remainder=uninstrumented
+fun:remainderf=uninstrumented
+fun:remainderl=uninstrumented
+fun:remap_file_pages=uninstrumented
+fun:remove=uninstrumented
+fun:removexattr=uninstrumented
+fun:remque=uninstrumented
+fun:remquo=uninstrumented
+fun:remquof=uninstrumented
+fun:remquol=uninstrumented
+fun:rename=uninstrumented
+fun:renameat=uninstrumented
+fun:res_gethostbyaddr=uninstrumented
+fun:res_gethostbyname=uninstrumented
+fun:res_gethostbyname2=uninstrumented
+fun:res_send_setqhook=uninstrumented
+fun:res_send_setrhook=uninstrumented
+fun:revoke=uninstrumented
+fun:rewind=uninstrumented
+fun:rewinddir=uninstrumented
+fun:rexec=uninstrumented
+fun:rexec_af=uninstrumented
+fun:rindex=uninstrumented
+fun:rint=uninstrumented
+fun:rintf=uninstrumented
+fun:rintl=uninstrumented
+fun:rmdir=uninstrumented
+fun:round=uninstrumented
+fun:roundf=uninstrumented
+fun:roundl=uninstrumented
+fun:rpmatch=uninstrumented
+fun:rresvport=uninstrumented
+fun:rresvport_af=uninstrumented
+fun:rtime=uninstrumented
+fun:ruserok=uninstrumented
+fun:ruserok_af=uninstrumented
+fun:ruserpass=uninstrumented
+fun:sbrk=uninstrumented
+fun:scalb=uninstrumented
+fun:scalbf=uninstrumented
+fun:scalbl=uninstrumented
+fun:scalbln=uninstrumented
+fun:scalblnf=uninstrumented
+fun:scalblnl=uninstrumented
+fun:scalbn=uninstrumented
+fun:scalbnf=uninstrumented
+fun:scalbnl=uninstrumented
+fun:scandir=uninstrumented
+fun:scandir64=uninstrumented
+fun:scandirat=uninstrumented
+fun:scandirat64=uninstrumented
+fun:scanf=uninstrumented
+fun:sched_get_priority_max=uninstrumented
+fun:sched_get_priority_min=uninstrumented
+fun:sched_getaffinity=uninstrumented
+fun:sched_getcpu=uninstrumented
+fun:sched_getparam=uninstrumented
+fun:sched_getscheduler=uninstrumented
+fun:sched_rr_get_interval=uninstrumented
+fun:sched_setaffinity=uninstrumented
+fun:sched_setparam=uninstrumented
+fun:sched_setscheduler=uninstrumented
+fun:sched_yield=uninstrumented
+fun:secure_getenv=uninstrumented
+fun:seed48=uninstrumented
+fun:seed48_r=uninstrumented
+fun:seekdir=uninstrumented
+fun:select=uninstrumented
+fun:sem_close=uninstrumented
+fun:sem_destroy=uninstrumented
+fun:sem_getvalue=uninstrumented
+fun:sem_init=uninstrumented
+fun:sem_open=uninstrumented
+fun:sem_post=uninstrumented
+fun:sem_timedwait=uninstrumented
+fun:sem_trywait=uninstrumented
+fun:sem_unlink=uninstrumented
+fun:sem_wait=uninstrumented
+fun:semctl=uninstrumented
+fun:semget=uninstrumented
+fun:semop=uninstrumented
+fun:semtimedop=uninstrumented
+fun:send=uninstrumented
+fun:sendfile=uninstrumented
+fun:sendfile64=uninstrumented
+fun:sendmmsg=uninstrumented
+fun:sendmsg=uninstrumented
+fun:sendto=uninstrumented
+fun:setaliasent=uninstrumented
+fun:setbuf=uninstrumented
+fun:setbuffer=uninstrumented
+fun:setcontext=uninstrumented
+fun:setdomainname=uninstrumented
+fun:setegid=uninstrumented
+fun:setenv=uninstrumented
+fun:seteuid=uninstrumented
+fun:setfsent=uninstrumented
+fun:setfsgid=uninstrumented
+fun:setfsuid=uninstrumented
+fun:setgid=uninstrumented
+fun:setgrent=uninstrumented
+fun:setgroups=uninstrumented
+fun:sethostent=uninstrumented
+fun:sethostid=uninstrumented
+fun:sethostname=uninstrumented
+fun:setipv4sourcefilter=uninstrumented
+fun:setitimer=uninstrumented
+fun:setjmp=uninstrumented
+fun:setkey=uninstrumented
+fun:setkey_r=uninstrumented
+fun:setlinebuf=uninstrumented
+fun:setlocale=uninstrumented
+fun:setlogin=uninstrumented
+fun:setlogmask=uninstrumented
+fun:setmntent=uninstrumented
+fun:setnetent=uninstrumented
+fun:setnetgrent=uninstrumented
+fun:setns=uninstrumented
+fun:setpgid=uninstrumented
+fun:setpgrp=uninstrumented
+fun:setpriority=uninstrumented
+fun:setprotoent=uninstrumented
+fun:setpwent=uninstrumented
+fun:setregid=uninstrumented
+fun:setresgid=uninstrumented
+fun:setresuid=uninstrumented
+fun:setreuid=uninstrumented
+fun:setrlimit=uninstrumented
+fun:setrlimit64=uninstrumented
+fun:setrpcent=uninstrumented
+fun:setservent=uninstrumented
+fun:setsgent=uninstrumented
+fun:setsid=uninstrumented
+fun:setsockopt=uninstrumented
+fun:setsourcefilter=uninstrumented
+fun:setspent=uninstrumented
+fun:setstate=uninstrumented
+fun:setstate_r=uninstrumented
+fun:settimeofday=uninstrumented
+fun:setttyent=uninstrumented
+fun:setuid=uninstrumented
+fun:setusershell=uninstrumented
+fun:setutent=uninstrumented
+fun:setutxent=uninstrumented
+fun:setvbuf=uninstrumented
+fun:setxattr=uninstrumented
+fun:sgetsgent=uninstrumented
+fun:sgetsgent_r=uninstrumented
+fun:sgetspent=uninstrumented
+fun:sgetspent_r=uninstrumented
+fun:shm_open=uninstrumented
+fun:shm_unlink=uninstrumented
+fun:shmat=uninstrumented
+fun:shmctl=uninstrumented
+fun:shmdt=uninstrumented
+fun:shmget=uninstrumented
+fun:shutdown=uninstrumented
+fun:sigaction=uninstrumented
+fun:sigaddset=uninstrumented
+fun:sigaltstack=uninstrumented
+fun:sigandset=uninstrumented
+fun:sigblock=uninstrumented
+fun:sigdelset=uninstrumented
+fun:sigemptyset=uninstrumented
+fun:sigfillset=uninstrumented
+fun:siggetmask=uninstrumented
+fun:sighold=uninstrumented
+fun:sigignore=uninstrumented
+fun:siginterrupt=uninstrumented
+fun:sigisemptyset=uninstrumented
+fun:sigismember=uninstrumented
+fun:siglongjmp=uninstrumented
+fun:signal=uninstrumented
+fun:signalfd=uninstrumented
+fun:significand=uninstrumented
+fun:significandf=uninstrumented
+fun:significandl=uninstrumented
+fun:sigorset=uninstrumented
+fun:sigpause=uninstrumented
+fun:sigpending=uninstrumented
+fun:sigprocmask=uninstrumented
+fun:sigqueue=uninstrumented
+fun:sigrelse=uninstrumented
+fun:sigreturn=uninstrumented
+fun:sigset=uninstrumented
+fun:sigsetmask=uninstrumented
+fun:sigstack=uninstrumented
+fun:sigsuspend=uninstrumented
+fun:sigtimedwait=uninstrumented
+fun:sigvec=uninstrumented
+fun:sigwait=uninstrumented
+fun:sigwaitinfo=uninstrumented
+fun:sin=uninstrumented
+fun:sincos=uninstrumented
+fun:sincosf=uninstrumented
+fun:sincosl=uninstrumented
+fun:sinf=uninstrumented
+fun:sinh=uninstrumented
+fun:sinhf=uninstrumented
+fun:sinhl=uninstrumented
+fun:sinl=uninstrumented
+fun:sleep=uninstrumented
+fun:snprintf=uninstrumented
+fun:sockatmark=uninstrumented
+fun:socket=uninstrumented
+fun:socketpair=uninstrumented
+fun:splice=uninstrumented
+fun:sprintf=uninstrumented
+fun:sprofil=uninstrumented
+fun:sqrt=uninstrumented
+fun:sqrtf=uninstrumented
+fun:sqrtl=uninstrumented
+fun:srand=uninstrumented
+fun:srand48=uninstrumented
+fun:srand48_r=uninstrumented
+fun:srandom=uninstrumented
+fun:srandom_r=uninstrumented
+fun:sscanf=uninstrumented
+fun:ssignal=uninstrumented
+fun:sstk=uninstrumented
+fun:stat=uninstrumented
+fun:stat64=uninstrumented
+fun:statfs=uninstrumented
+fun:statfs64=uninstrumented
+fun:statvfs=uninstrumented
+fun:statvfs64=uninstrumented
+fun:step=uninstrumented
+fun:stime=uninstrumented
+fun:stpcpy=uninstrumented
+fun:stpncpy=uninstrumented
+fun:strcasecmp=uninstrumented
+fun:strcasecmp_l=uninstrumented
+fun:strcasestr=uninstrumented
+fun:strcat=uninstrumented
+fun:strchr=uninstrumented
+fun:strchrnul=uninstrumented
+fun:strcmp=uninstrumented
+fun:strcoll=uninstrumented
+fun:strcoll_l=uninstrumented
+fun:strcpy=uninstrumented
+fun:strcspn=uninstrumented
+fun:strdup=uninstrumented
+fun:strerror=uninstrumented
+fun:strerror_l=uninstrumented
+fun:strerror_r=uninstrumented
+fun:strfmon=uninstrumented
+fun:strfmon_l=uninstrumented
+fun:strfry=uninstrumented
+fun:strftime=uninstrumented
+fun:strftime_l=uninstrumented
+fun:strlen=uninstrumented
+fun:strncasecmp=uninstrumented
+fun:strncasecmp_l=uninstrumented
+fun:strncat=uninstrumented
+fun:strncmp=uninstrumented
+fun:strncpy=uninstrumented
+fun:strndup=uninstrumented
+fun:strnlen=uninstrumented
+fun:strpbrk=uninstrumented
+fun:strptime=uninstrumented
+fun:strptime_l=uninstrumented
+fun:strrchr=uninstrumented
+fun:strsep=uninstrumented
+fun:strsignal=uninstrumented
+fun:strspn=uninstrumented
+fun:strstr=uninstrumented
+fun:strtod=uninstrumented
+fun:strtod_l=uninstrumented
+fun:strtof=uninstrumented
+fun:strtof_l=uninstrumented
+fun:strtoimax=uninstrumented
+fun:strtok=uninstrumented
+fun:strtok_r=uninstrumented
+fun:strtol=uninstrumented
+fun:strtol_l=uninstrumented
+fun:strtold=uninstrumented
+fun:strtold_l=uninstrumented
+fun:strtoll=uninstrumented
+fun:strtoll_l=uninstrumented
+fun:strtoq=uninstrumented
+fun:strtoul=uninstrumented
+fun:strtoul_l=uninstrumented
+fun:strtoull=uninstrumented
+fun:strtoull_l=uninstrumented
+fun:strtoumax=uninstrumented
+fun:strtouq=uninstrumented
+fun:strverscmp=uninstrumented
+fun:strxfrm=uninstrumented
+fun:strxfrm_l=uninstrumented
+fun:stty=uninstrumented
+fun:svc_exit=uninstrumented
+fun:svc_getreq=uninstrumented
+fun:svc_getreq_common=uninstrumented
+fun:svc_getreq_poll=uninstrumented
+fun:svc_getreqset=uninstrumented
+fun:svc_register=uninstrumented
+fun:svc_run=uninstrumented
+fun:svc_sendreply=uninstrumented
+fun:svc_unregister=uninstrumented
+fun:svcerr_auth=uninstrumented
+fun:svcerr_decode=uninstrumented
+fun:svcerr_noproc=uninstrumented
+fun:svcerr_noprog=uninstrumented
+fun:svcerr_progvers=uninstrumented
+fun:svcerr_systemerr=uninstrumented
+fun:svcerr_weakauth=uninstrumented
+fun:svcfd_create=uninstrumented
+fun:svcraw_create=uninstrumented
+fun:svctcp_create=uninstrumented
+fun:svcudp_bufcreate=uninstrumented
+fun:svcudp_create=uninstrumented
+fun:svcudp_enablecache=uninstrumented
+fun:svcunix_create=uninstrumented
+fun:svcunixfd_create=uninstrumented
+fun:swab=uninstrumented
+fun:swapcontext=uninstrumented
+fun:swapoff=uninstrumented
+fun:swapon=uninstrumented
+fun:swprintf=uninstrumented
+fun:swscanf=uninstrumented
+fun:symlink=uninstrumented
+fun:symlinkat=uninstrumented
+fun:sync=uninstrumented
+fun:sync_file_range=uninstrumented
+fun:syncfs=uninstrumented
+fun:syscall=uninstrumented
+fun:sysconf=uninstrumented
+fun:sysctl=uninstrumented
+fun:sysinfo=uninstrumented
+fun:syslog=uninstrumented
+fun:system=uninstrumented
+fun:sysv_signal=uninstrumented
+fun:tan=uninstrumented
+fun:tanf=uninstrumented
+fun:tanh=uninstrumented
+fun:tanhf=uninstrumented
+fun:tanhl=uninstrumented
+fun:tanl=uninstrumented
+fun:tcdrain=uninstrumented
+fun:tcflow=uninstrumented
+fun:tcflush=uninstrumented
+fun:tcgetattr=uninstrumented
+fun:tcgetpgrp=uninstrumented
+fun:tcgetsid=uninstrumented
+fun:tcsendbreak=uninstrumented
+fun:tcsetattr=uninstrumented
+fun:tcsetpgrp=uninstrumented
+fun:td_init=uninstrumented
+fun:td_log=uninstrumented
+fun:td_symbol_list=uninstrumented
+fun:td_ta_clear_event=uninstrumented
+fun:td_ta_delete=uninstrumented
+fun:td_ta_enable_stats=uninstrumented
+fun:td_ta_event_addr=uninstrumented
+fun:td_ta_event_getmsg=uninstrumented
+fun:td_ta_get_nthreads=uninstrumented
+fun:td_ta_get_ph=uninstrumented
+fun:td_ta_get_stats=uninstrumented
+fun:td_ta_map_id2thr=uninstrumented
+fun:td_ta_map_lwp2thr=uninstrumented
+fun:td_ta_new=uninstrumented
+fun:td_ta_reset_stats=uninstrumented
+fun:td_ta_set_event=uninstrumented
+fun:td_ta_setconcurrency=uninstrumented
+fun:td_ta_thr_iter=uninstrumented
+fun:td_ta_tsd_iter=uninstrumented
+fun:td_thr_clear_event=uninstrumented
+fun:td_thr_dbresume=uninstrumented
+fun:td_thr_dbsuspend=uninstrumented
+fun:td_thr_event_enable=uninstrumented
+fun:td_thr_event_getmsg=uninstrumented
+fun:td_thr_get_info=uninstrumented
+fun:td_thr_getfpregs=uninstrumented
+fun:td_thr_getgregs=uninstrumented
+fun:td_thr_getxregs=uninstrumented
+fun:td_thr_getxregsize=uninstrumented
+fun:td_thr_set_event=uninstrumented
+fun:td_thr_setfpregs=uninstrumented
+fun:td_thr_setgregs=uninstrumented
+fun:td_thr_setprio=uninstrumented
+fun:td_thr_setsigpending=uninstrumented
+fun:td_thr_setxregs=uninstrumented
+fun:td_thr_sigsetmask=uninstrumented
+fun:td_thr_tls_get_addr=uninstrumented
+fun:td_thr_tlsbase=uninstrumented
+fun:td_thr_tsd=uninstrumented
+fun:td_thr_validate=uninstrumented
+fun:tdelete=uninstrumented
+fun:tdestroy=uninstrumented
+fun:tee=uninstrumented
+fun:telldir=uninstrumented
+fun:tempnam=uninstrumented
+fun:textdomain=uninstrumented
+fun:tfind=uninstrumented
+fun:tgamma=uninstrumented
+fun:tgammaf=uninstrumented
+fun:tgammal=uninstrumented
+fun:time=uninstrumented
+fun:timegm=uninstrumented
+fun:timelocal=uninstrumented
+fun:timer_create=uninstrumented
+fun:timer_delete=uninstrumented
+fun:timer_getoverrun=uninstrumented
+fun:timer_gettime=uninstrumented
+fun:timer_settime=uninstrumented
+fun:timerfd_create=uninstrumented
+fun:timerfd_gettime=uninstrumented
+fun:timerfd_settime=uninstrumented
+fun:times=uninstrumented
+fun:timespec_get=uninstrumented
+fun:tmpfile=uninstrumented
+fun:tmpfile64=uninstrumented
+fun:tmpnam=uninstrumented
+fun:tmpnam_r=uninstrumented
+fun:toascii=uninstrumented
+fun:tolower=uninstrumented
+fun:tolower_l=uninstrumented
+fun:toupper=uninstrumented
+fun:toupper_l=uninstrumented
+fun:towctrans=uninstrumented
+fun:towctrans_l=uninstrumented
+fun:towlower=uninstrumented
+fun:towlower_l=uninstrumented
+fun:towupper=uninstrumented
+fun:towupper_l=uninstrumented
+fun:tr_break=uninstrumented
+fun:trunc=uninstrumented
+fun:truncate=uninstrumented
+fun:truncate64=uninstrumented
+fun:truncf=uninstrumented
+fun:truncl=uninstrumented
+fun:tsearch=uninstrumented
+fun:ttyname=uninstrumented
+fun:ttyname_r=uninstrumented
+fun:ttyslot=uninstrumented
+fun:twalk=uninstrumented
+fun:tzset=uninstrumented
+fun:ualarm=uninstrumented
+fun:ulckpwdf=uninstrumented
+fun:ulimit=uninstrumented
+fun:umask=uninstrumented
+fun:umount=uninstrumented
+fun:umount2=uninstrumented
+fun:uname=uninstrumented
+fun:ungetc=uninstrumented
+fun:ungetwc=uninstrumented
+fun:unlink=uninstrumented
+fun:unlinkat=uninstrumented
+fun:unlockpt=uninstrumented
+fun:unsetenv=uninstrumented
+fun:unshare=uninstrumented
+fun:updwtmp=uninstrumented
+fun:updwtmpx=uninstrumented
+fun:uselib=uninstrumented
+fun:uselocale=uninstrumented
+fun:user2netname=uninstrumented
+fun:usleep=uninstrumented
+fun:ustat=uninstrumented
+fun:utime=uninstrumented
+fun:utimensat=uninstrumented
+fun:utimes=uninstrumented
+fun:utmpname=uninstrumented
+fun:utmpxname=uninstrumented
+fun:valloc=uninstrumented
+fun:vasprintf=uninstrumented
+fun:vdprintf=uninstrumented
+fun:verr=uninstrumented
+fun:verrx=uninstrumented
+fun:versionsort=uninstrumented
+fun:versionsort64=uninstrumented
+fun:vfork=uninstrumented
+fun:vfprintf=uninstrumented
+fun:vfscanf=uninstrumented
+fun:vfwprintf=uninstrumented
+fun:vfwscanf=uninstrumented
+fun:vhangup=uninstrumented
+fun:vlimit=uninstrumented
+fun:vmsplice=uninstrumented
+fun:vprintf=uninstrumented
+fun:vscanf=uninstrumented
+fun:vsnprintf=uninstrumented
+fun:vsprintf=uninstrumented
+fun:vsscanf=uninstrumented
+fun:vswprintf=uninstrumented
+fun:vswscanf=uninstrumented
+fun:vsyslog=uninstrumented
+fun:vtimes=uninstrumented
+fun:vwarn=uninstrumented
+fun:vwarnx=uninstrumented
+fun:vwprintf=uninstrumented
+fun:vwscanf=uninstrumented
+fun:wait=uninstrumented
+fun:wait3=uninstrumented
+fun:wait4=uninstrumented
+fun:waitid=uninstrumented
+fun:waitpid=uninstrumented
+fun:warn=uninstrumented
+fun:warnx=uninstrumented
+fun:wcpcpy=uninstrumented
+fun:wcpncpy=uninstrumented
+fun:wcrtomb=uninstrumented
+fun:wcscasecmp=uninstrumented
+fun:wcscasecmp_l=uninstrumented
+fun:wcscat=uninstrumented
+fun:wcschr=uninstrumented
+fun:wcschrnul=uninstrumented
+fun:wcscmp=uninstrumented
+fun:wcscoll=uninstrumented
+fun:wcscoll_l=uninstrumented
+fun:wcscpy=uninstrumented
+fun:wcscspn=uninstrumented
+fun:wcsdup=uninstrumented
+fun:wcsftime=uninstrumented
+fun:wcsftime_l=uninstrumented
+fun:wcslen=uninstrumented
+fun:wcsncasecmp=uninstrumented
+fun:wcsncasecmp_l=uninstrumented
+fun:wcsncat=uninstrumented
+fun:wcsncmp=uninstrumented
+fun:wcsncpy=uninstrumented
+fun:wcsnlen=uninstrumented
+fun:wcsnrtombs=uninstrumented
+fun:wcspbrk=uninstrumented
+fun:wcsrchr=uninstrumented
+fun:wcsrtombs=uninstrumented
+fun:wcsspn=uninstrumented
+fun:wcsstr=uninstrumented
+fun:wcstod=uninstrumented
+fun:wcstod_l=uninstrumented
+fun:wcstof=uninstrumented
+fun:wcstof_l=uninstrumented
+fun:wcstoimax=uninstrumented
+fun:wcstok=uninstrumented
+fun:wcstol=uninstrumented
+fun:wcstol_l=uninstrumented
+fun:wcstold=uninstrumented
+fun:wcstold_l=uninstrumented
+fun:wcstoll=uninstrumented
+fun:wcstoll_l=uninstrumented
+fun:wcstombs=uninstrumented
+fun:wcstoq=uninstrumented
+fun:wcstoul=uninstrumented
+fun:wcstoul_l=uninstrumented
+fun:wcstoull=uninstrumented
+fun:wcstoull_l=uninstrumented
+fun:wcstoumax=uninstrumented
+fun:wcstouq=uninstrumented
+fun:wcswcs=uninstrumented
+fun:wcswidth=uninstrumented
+fun:wcsxfrm=uninstrumented
+fun:wcsxfrm_l=uninstrumented
+fun:wctob=uninstrumented
+fun:wctomb=uninstrumented
+fun:wctrans=uninstrumented
+fun:wctrans_l=uninstrumented
+fun:wctype=uninstrumented
+fun:wctype_l=uninstrumented
+fun:wcwidth=uninstrumented
+fun:wmemchr=uninstrumented
+fun:wmemcmp=uninstrumented
+fun:wmemcpy=uninstrumented
+fun:wmemmove=uninstrumented
+fun:wmempcpy=uninstrumented
+fun:wmemset=uninstrumented
+fun:wordexp=uninstrumented
+fun:wordfree=uninstrumented
+fun:wprintf=uninstrumented
+fun:write=uninstrumented
+fun:writeColdStartFile=uninstrumented
+fun:writev=uninstrumented
+fun:wscanf=uninstrumented
+fun:xdecrypt=uninstrumented
+fun:xdr_accepted_reply=uninstrumented
+fun:xdr_array=uninstrumented
+fun:xdr_authdes_cred=uninstrumented
+fun:xdr_authdes_verf=uninstrumented
+fun:xdr_authunix_parms=uninstrumented
+fun:xdr_bool=uninstrumented
+fun:xdr_bytes=uninstrumented
+fun:xdr_callhdr=uninstrumented
+fun:xdr_callmsg=uninstrumented
+fun:xdr_cback_data=uninstrumented
+fun:xdr_char=uninstrumented
+fun:xdr_cryptkeyarg=uninstrumented
+fun:xdr_cryptkeyarg2=uninstrumented
+fun:xdr_cryptkeyres=uninstrumented
+fun:xdr_des_block=uninstrumented
+fun:xdr_domainname=uninstrumented
+fun:xdr_double=uninstrumented
+fun:xdr_enum=uninstrumented
+fun:xdr_float=uninstrumented
+fun:xdr_free=uninstrumented
+fun:xdr_getcredres=uninstrumented
+fun:xdr_hyper=uninstrumented
+fun:xdr_int=uninstrumented
+fun:xdr_int16_t=uninstrumented
+fun:xdr_int32_t=uninstrumented
+fun:xdr_int64_t=uninstrumented
+fun:xdr_int8_t=uninstrumented
+fun:xdr_key_netstarg=uninstrumented
+fun:xdr_key_netstres=uninstrumented
+fun:xdr_keybuf=uninstrumented
+fun:xdr_keydat=uninstrumented
+fun:xdr_keystatus=uninstrumented
+fun:xdr_long=uninstrumented
+fun:xdr_longlong_t=uninstrumented
+fun:xdr_mapname=uninstrumented
+fun:xdr_netnamestr=uninstrumented
+fun:xdr_netobj=uninstrumented
+fun:xdr_obj_p=uninstrumented
+fun:xdr_opaque=uninstrumented
+fun:xdr_opaque_auth=uninstrumented
+fun:xdr_peername=uninstrumented
+fun:xdr_pmap=uninstrumented
+fun:xdr_pmaplist=uninstrumented
+fun:xdr_pointer=uninstrumented
+fun:xdr_quad_t=uninstrumented
+fun:xdr_reference=uninstrumented
+fun:xdr_rejected_reply=uninstrumented
+fun:xdr_replymsg=uninstrumented
+fun:xdr_rmtcall_args=uninstrumented
+fun:xdr_rmtcallres=uninstrumented
+fun:xdr_short=uninstrumented
+fun:xdr_sizeof=uninstrumented
+fun:xdr_string=uninstrumented
+fun:xdr_u_char=uninstrumented
+fun:xdr_u_hyper=uninstrumented
+fun:xdr_u_int=uninstrumented
+fun:xdr_u_long=uninstrumented
+fun:xdr_u_longlong_t=uninstrumented
+fun:xdr_u_quad_t=uninstrumented
+fun:xdr_u_short=uninstrumented
+fun:xdr_uint16_t=uninstrumented
+fun:xdr_uint32_t=uninstrumented
+fun:xdr_uint64_t=uninstrumented
+fun:xdr_uint8_t=uninstrumented
+fun:xdr_union=uninstrumented
+fun:xdr_unixcred=uninstrumented
+fun:xdr_valdat=uninstrumented
+fun:xdr_vector=uninstrumented
+fun:xdr_void=uninstrumented
+fun:xdr_wrapstring=uninstrumented
+fun:xdr_yp_buf=uninstrumented
+fun:xdr_ypall=uninstrumented
+fun:xdr_ypbind_binding=uninstrumented
+fun:xdr_ypbind_resp=uninstrumented
+fun:xdr_ypbind_resptype=uninstrumented
+fun:xdr_ypbind_setdom=uninstrumented
+fun:xdr_ypdelete_args=uninstrumented
+fun:xdr_ypmap_parms=uninstrumented
+fun:xdr_ypmaplist=uninstrumented
+fun:xdr_yppush_status=uninstrumented
+fun:xdr_yppushresp_xfr=uninstrumented
+fun:xdr_ypreq_key=uninstrumented
+fun:xdr_ypreq_nokey=uninstrumented
+fun:xdr_ypreq_xfr=uninstrumented
+fun:xdr_ypresp_all=uninstrumented
+fun:xdr_ypresp_key_val=uninstrumented
+fun:xdr_ypresp_maplist=uninstrumented
+fun:xdr_ypresp_master=uninstrumented
+fun:xdr_ypresp_order=uninstrumented
+fun:xdr_ypresp_val=uninstrumented
+fun:xdr_ypresp_xfr=uninstrumented
+fun:xdr_ypstat=uninstrumented
+fun:xdr_ypupdate_args=uninstrumented
+fun:xdr_ypxfrstat=uninstrumented
+fun:xdrmem_create=uninstrumented
+fun:xdrrec_create=uninstrumented
+fun:xdrrec_endofrecord=uninstrumented
+fun:xdrrec_eof=uninstrumented
+fun:xdrrec_skiprecord=uninstrumented
+fun:xdrstdio_create=uninstrumented
+fun:xencrypt=uninstrumented
+fun:xprt_register=uninstrumented
+fun:xprt_unregister=uninstrumented
+fun:y0=uninstrumented
+fun:y0f=uninstrumented
+fun:y0l=uninstrumented
+fun:y1=uninstrumented
+fun:y1f=uninstrumented
+fun:y1l=uninstrumented
+fun:yn=uninstrumented
+fun:ynf=uninstrumented
+fun:ynl=uninstrumented
+fun:yp_all=uninstrumented
+fun:yp_bind=uninstrumented
+fun:yp_first=uninstrumented
+fun:yp_get_default_domain=uninstrumented
+fun:yp_maplist=uninstrumented
+fun:yp_master=uninstrumented
+fun:yp_match=uninstrumented
+fun:yp_next=uninstrumented
+fun:yp_order=uninstrumented
+fun:yp_unbind=uninstrumented
+fun:yp_update=uninstrumented
+fun:ypbinderr_string=uninstrumented
+fun:yperr_string=uninstrumented
+fun:ypprot_err=uninstrumented
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/libc_ubuntu1404_abilist.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.syms.extra
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.syms.extra (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/dfsan/dfsan.syms.extra (revision 351984)
@@ -0,0 +1,3 @@
+dfsan_*
+__dfsan_*
+__dfsw_*
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerBuiltins.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerBuiltins.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerBuiltins.h (revision 351984)
@@ -0,0 +1,35 @@
+//===- FuzzerBuiltins.h - Internal header for builtins ----------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Wrapper functions and marcos around builtin functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_BUILTINS_H
+#define LLVM_FUZZER_BUILTINS_H
+
+#include "FuzzerDefs.h"
+
+#if !LIBFUZZER_MSVC
+#include <cstdint>
+
+#define GET_CALLER_PC() __builtin_return_address(0)
+
+namespace fuzzer {
+
+inline uint8_t Bswap(uint8_t x) { return x; }
+inline uint16_t Bswap(uint16_t x) { return __builtin_bswap16(x); }
+inline uint32_t Bswap(uint32_t x) { return __builtin_bswap32(x); }
+inline uint64_t Bswap(uint64_t x) { return __builtin_bswap64(x); }
+
+inline uint32_t Clzll(unsigned long long X) { return __builtin_clzll(X); }
+inline uint32_t Clz(unsigned long long X) { return __builtin_clz(X); }
+inline int Popcountll(unsigned long long X) { return __builtin_popcountll(X); }
+
+} // namespace fuzzer
+
+#endif // !LIBFUZZER_MSVC
+#endif // LLVM_FUZZER_BUILTINS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerBuiltinsMsvc.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerBuiltinsMsvc.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerBuiltinsMsvc.h (revision 351984)
@@ -0,0 +1,58 @@
+//===- FuzzerBuiltinsMSVC.h - Internal header for builtins ------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Wrapper functions and marcos that use intrinsics instead of builtin functions
+// which cannot be compiled by MSVC.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_BUILTINS_MSVC_H
+#define LLVM_FUZZER_BUILTINS_MSVC_H
+
+#include "FuzzerDefs.h"
+
+#if LIBFUZZER_MSVC
+#if !defined(_M_ARM) && !defined(_M_X64)
+#error "_BitScanReverse64 unavailable on this platform so MSVC is unsupported."
+#endif
+#include <intrin.h>
+#include <cstdint>
+#include <cstdlib>
+
+// __builtin_return_address() cannot be compiled with MSVC. Use the equivalent
+// from <intrin.h>
+#define GET_CALLER_PC() _ReturnAddress()
+
+namespace fuzzer {
+
+inline uint8_t Bswap(uint8_t x) { return x; }
+// Use alternatives to __builtin functions from <stdlib.h> and <intrin.h> on
+// Windows since the builtins are not supported by MSVC.
+inline uint16_t Bswap(uint16_t x) { return _byteswap_ushort(x); }
+inline uint32_t Bswap(uint32_t x) { return _byteswap_ulong(x); }
+inline uint64_t Bswap(uint64_t x) { return _byteswap_uint64(x); }
+
+// The functions below were mostly copied from
+// compiler-rt/lib/builtins/int_lib.h which defines the __builtin functions used
+// outside of Windows.
+inline uint32_t Clzll(uint64_t X) {
+ unsigned long LeadZeroIdx = 0;
+ if (_BitScanReverse64(&LeadZeroIdx, X)) return 63 - LeadZeroIdx;
+ return 64;
+}
+
+inline uint32_t Clz(uint32_t X) {
+ unsigned long LeadZeroIdx = 0;
+ if (_BitScanReverse(&LeadZeroIdx, X)) return 31 - LeadZeroIdx;
+ return 32;
+}
+
+inline int Popcountll(unsigned long long X) { return __popcnt64(X); }
+
+} // namespace fuzzer
+
+#endif // LIBFUZER_MSVC
+#endif // LLVM_FUZZER_BUILTINS_MSVC_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCommand.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCommand.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCommand.h (revision 351984)
@@ -0,0 +1,178 @@
+//===- FuzzerCommand.h - Interface representing a process -------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// FuzzerCommand represents a command to run in a subprocess. It allows callers
+// to manage command line arguments and output and error streams.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_COMMAND_H
+#define LLVM_FUZZER_COMMAND_H
+
+#include "FuzzerDefs.h"
+#include "FuzzerIO.h"
+
+#include <algorithm>
+#include <sstream>
+#include <string>
+#include <vector>
+
+namespace fuzzer {
+
+class Command final {
+public:
+ // This command line flag is used to indicate that the remaining command line
+ // is immutable, meaning this flag effectively marks the end of the mutable
+ // argument list.
+ static inline const char *ignoreRemainingArgs() {
+ return "-ignore_remaining_args=1";
+ }
+
+ Command() : CombinedOutAndErr(false) {}
+
+ explicit Command(const Vector<std::string> &ArgsToAdd)
+ : Args(ArgsToAdd), CombinedOutAndErr(false) {}
+
+ explicit Command(const Command &Other)
+ : Args(Other.Args), CombinedOutAndErr(Other.CombinedOutAndErr),
+ OutputFile(Other.OutputFile) {}
+
+ Command &operator=(const Command &Other) {
+ Args = Other.Args;
+ CombinedOutAndErr = Other.CombinedOutAndErr;
+ OutputFile = Other.OutputFile;
+ return *this;
+ }
+
+ ~Command() {}
+
+ // Returns true if the given Arg is present in Args. Only checks up to
+ // "-ignore_remaining_args=1".
+ bool hasArgument(const std::string &Arg) const {
+ auto i = endMutableArgs();
+ return std::find(Args.begin(), i, Arg) != i;
+ }
+
+ // Gets all of the current command line arguments, **including** those after
+ // "-ignore-remaining-args=1".
+ const Vector<std::string> &getArguments() const { return Args; }
+
+ // Adds the given argument before "-ignore_remaining_args=1", or at the end
+ // if that flag isn't present.
+ void addArgument(const std::string &Arg) {
+ Args.insert(endMutableArgs(), Arg);
+ }
+
+ // Adds all given arguments before "-ignore_remaining_args=1", or at the end
+ // if that flag isn't present.
+ void addArguments(const Vector<std::string> &ArgsToAdd) {
+ Args.insert(endMutableArgs(), ArgsToAdd.begin(), ArgsToAdd.end());
+ }
+
+ // Removes the given argument from the command argument list. Ignores any
+ // occurrences after "-ignore_remaining_args=1", if present.
+ void removeArgument(const std::string &Arg) {
+ auto i = endMutableArgs();
+ Args.erase(std::remove(Args.begin(), i, Arg), i);
+ }
+
+ // Like hasArgument, but checks for "-[Flag]=...".
+ bool hasFlag(const std::string &Flag) const {
+ std::string Arg("-" + Flag + "=");
+ auto IsMatch = [&](const std::string &Other) {
+ return Arg.compare(0, std::string::npos, Other, 0, Arg.length()) == 0;
+ };
+ return std::any_of(Args.begin(), endMutableArgs(), IsMatch);
+ }
+
+ // Returns the value of the first instance of a given flag, or an empty string
+ // if the flag isn't present. Ignores any occurrences after
+ // "-ignore_remaining_args=1", if present.
+ std::string getFlagValue(const std::string &Flag) const {
+ std::string Arg("-" + Flag + "=");
+ auto IsMatch = [&](const std::string &Other) {
+ return Arg.compare(0, std::string::npos, Other, 0, Arg.length()) == 0;
+ };
+ auto i = endMutableArgs();
+ auto j = std::find_if(Args.begin(), i, IsMatch);
+ std::string result;
+ if (j != i) {
+ result = j->substr(Arg.length());
+ }
+ return result;
+ }
+
+ // Like AddArgument, but adds "-[Flag]=[Value]".
+ void addFlag(const std::string &Flag, const std::string &Value) {
+ addArgument("-" + Flag + "=" + Value);
+ }
+
+ // Like RemoveArgument, but removes "-[Flag]=...".
+ void removeFlag(const std::string &Flag) {
+ std::string Arg("-" + Flag + "=");
+ auto IsMatch = [&](const std::string &Other) {
+ return Arg.compare(0, std::string::npos, Other, 0, Arg.length()) == 0;
+ };
+ auto i = endMutableArgs();
+ Args.erase(std::remove_if(Args.begin(), i, IsMatch), i);
+ }
+
+ // Returns whether the command's stdout is being written to an output file.
+ bool hasOutputFile() const { return !OutputFile.empty(); }
+
+ // Returns the currently set output file.
+ const std::string &getOutputFile() const { return OutputFile; }
+
+ // Configures the command to redirect its output to the name file.
+ void setOutputFile(const std::string &FileName) { OutputFile = FileName; }
+
+ // Returns whether the command's stderr is redirected to stdout.
+ bool isOutAndErrCombined() const { return CombinedOutAndErr; }
+
+ // Sets whether to redirect the command's stderr to its stdout.
+ void combineOutAndErr(bool combine = true) { CombinedOutAndErr = combine; }
+
+ // Returns a string representation of the command. On many systems this will
+ // be the equivalent command line.
+ std::string toString() const {
+ std::stringstream SS;
+ for (auto arg : getArguments())
+ SS << arg << " ";
+ if (hasOutputFile())
+ SS << ">" << getOutputFile() << " ";
+ if (isOutAndErrCombined())
+ SS << "2>&1 ";
+ std::string result = SS.str();
+ if (!result.empty())
+ result = result.substr(0, result.length() - 1);
+ return result;
+ }
+
+private:
+ Command(Command &&Other) = delete;
+ Command &operator=(Command &&Other) = delete;
+
+ Vector<std::string>::iterator endMutableArgs() {
+ return std::find(Args.begin(), Args.end(), ignoreRemainingArgs());
+ }
+
+ Vector<std::string>::const_iterator endMutableArgs() const {
+ return std::find(Args.begin(), Args.end(), ignoreRemainingArgs());
+ }
+
+ // The command arguments. Args[0] is the command name.
+ Vector<std::string> Args;
+
+ // True indicates stderr is redirected to stdout.
+ bool CombinedOutAndErr;
+
+ // If not empty, stdout is redirected to the named file.
+ std::string OutputFile;
+};
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_COMMAND_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCommand.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCorpus.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCorpus.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCorpus.h (revision 351984)
@@ -0,0 +1,310 @@
+//===- FuzzerCorpus.h - Internal header for the Fuzzer ----------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// fuzzer::InputCorpus
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_CORPUS
+#define LLVM_FUZZER_CORPUS
+
+#include "FuzzerDataFlowTrace.h"
+#include "FuzzerDefs.h"
+#include "FuzzerIO.h"
+#include "FuzzerRandom.h"
+#include "FuzzerSHA1.h"
+#include "FuzzerTracePC.h"
+#include <algorithm>
+#include <numeric>
+#include <random>
+#include <unordered_set>
+
+namespace fuzzer {
+
+struct InputInfo {
+ Unit U; // The actual input data.
+ uint8_t Sha1[kSHA1NumBytes]; // Checksum.
+ // Number of features that this input has and no smaller input has.
+ size_t NumFeatures = 0;
+ size_t Tmp = 0; // Used by ValidateFeatureSet.
+ // Stats.
+ size_t NumExecutedMutations = 0;
+ size_t NumSuccessfullMutations = 0;
+ bool MayDeleteFile = false;
+ bool Reduced = false;
+ bool HasFocusFunction = false;
+ Vector<uint32_t> UniqFeatureSet;
+ Vector<uint8_t> DataFlowTraceForFocusFunction;
+};
+
+class InputCorpus {
+ static const size_t kFeatureSetSize = 1 << 21;
+ public:
+ InputCorpus(const std::string &OutputCorpus) : OutputCorpus(OutputCorpus) {
+ memset(InputSizesPerFeature, 0, sizeof(InputSizesPerFeature));
+ memset(SmallestElementPerFeature, 0, sizeof(SmallestElementPerFeature));
+ }
+ ~InputCorpus() {
+ for (auto II : Inputs)
+ delete II;
+ }
+ size_t size() const { return Inputs.size(); }
+ size_t SizeInBytes() const {
+ size_t Res = 0;
+ for (auto II : Inputs)
+ Res += II->U.size();
+ return Res;
+ }
+ size_t NumActiveUnits() const {
+ size_t Res = 0;
+ for (auto II : Inputs)
+ Res += !II->U.empty();
+ return Res;
+ }
+ size_t MaxInputSize() const {
+ size_t Res = 0;
+ for (auto II : Inputs)
+ Res = std::max(Res, II->U.size());
+ return Res;
+ }
+
+ size_t NumInputsThatTouchFocusFunction() {
+ return std::count_if(Inputs.begin(), Inputs.end(), [](const InputInfo *II) {
+ return II->HasFocusFunction;
+ });
+ }
+
+ size_t NumInputsWithDataFlowTrace() {
+ return std::count_if(Inputs.begin(), Inputs.end(), [](const InputInfo *II) {
+ return !II->DataFlowTraceForFocusFunction.empty();
+ });
+ }
+
+ bool empty() const { return Inputs.empty(); }
+ const Unit &operator[] (size_t Idx) const { return Inputs[Idx]->U; }
+ InputInfo *AddToCorpus(const Unit &U, size_t NumFeatures, bool MayDeleteFile,
+ bool HasFocusFunction,
+ const Vector<uint32_t> &FeatureSet,
+ const DataFlowTrace &DFT, const InputInfo *BaseII) {
+ assert(!U.empty());
+ if (FeatureDebug)
+ Printf("ADD_TO_CORPUS %zd NF %zd\n", Inputs.size(), NumFeatures);
+ Inputs.push_back(new InputInfo());
+ InputInfo &II = *Inputs.back();
+ II.U = U;
+ II.NumFeatures = NumFeatures;
+ II.MayDeleteFile = MayDeleteFile;
+ II.UniqFeatureSet = FeatureSet;
+ II.HasFocusFunction = HasFocusFunction;
+ std::sort(II.UniqFeatureSet.begin(), II.UniqFeatureSet.end());
+ ComputeSHA1(U.data(), U.size(), II.Sha1);
+ auto Sha1Str = Sha1ToString(II.Sha1);
+ Hashes.insert(Sha1Str);
+ if (HasFocusFunction)
+ if (auto V = DFT.Get(Sha1Str))
+ II.DataFlowTraceForFocusFunction = *V;
+ // This is a gross heuristic.
+ // Ideally, when we add an element to a corpus we need to know its DFT.
+ // But if we don't, we'll use the DFT of its base input.
+ if (II.DataFlowTraceForFocusFunction.empty() && BaseII)
+ II.DataFlowTraceForFocusFunction = BaseII->DataFlowTraceForFocusFunction;
+ UpdateCorpusDistribution();
+ PrintCorpus();
+ // ValidateFeatureSet();
+ return &II;
+ }
+
+ // Debug-only
+ void PrintUnit(const Unit &U) {
+ if (!FeatureDebug) return;
+ for (uint8_t C : U) {
+ if (C != 'F' && C != 'U' && C != 'Z')
+ C = '.';
+ Printf("%c", C);
+ }
+ }
+
+ // Debug-only
+ void PrintFeatureSet(const Vector<uint32_t> &FeatureSet) {
+ if (!FeatureDebug) return;
+ Printf("{");
+ for (uint32_t Feature: FeatureSet)
+ Printf("%u,", Feature);
+ Printf("}");
+ }
+
+ // Debug-only
+ void PrintCorpus() {
+ if (!FeatureDebug) return;
+ Printf("======= CORPUS:\n");
+ int i = 0;
+ for (auto II : Inputs) {
+ if (std::find(II->U.begin(), II->U.end(), 'F') != II->U.end()) {
+ Printf("[%2d] ", i);
+ Printf("%s sz=%zd ", Sha1ToString(II->Sha1).c_str(), II->U.size());
+ PrintUnit(II->U);
+ Printf(" ");
+ PrintFeatureSet(II->UniqFeatureSet);
+ Printf("\n");
+ }
+ i++;
+ }
+ }
+
+ void Replace(InputInfo *II, const Unit &U) {
+ assert(II->U.size() > U.size());
+ Hashes.erase(Sha1ToString(II->Sha1));
+ DeleteFile(*II);
+ ComputeSHA1(U.data(), U.size(), II->Sha1);
+ Hashes.insert(Sha1ToString(II->Sha1));
+ II->U = U;
+ II->Reduced = true;
+ UpdateCorpusDistribution();
+ }
+
+ bool HasUnit(const Unit &U) { return Hashes.count(Hash(U)); }
+ bool HasUnit(const std::string &H) { return Hashes.count(H); }
+ InputInfo &ChooseUnitToMutate(Random &Rand) {
+ InputInfo &II = *Inputs[ChooseUnitIdxToMutate(Rand)];
+ assert(!II.U.empty());
+ return II;
+ }
+
+ // Returns an index of random unit from the corpus to mutate.
+ size_t ChooseUnitIdxToMutate(Random &Rand) {
+ size_t Idx = static_cast<size_t>(CorpusDistribution(Rand));
+ assert(Idx < Inputs.size());
+ return Idx;
+ }
+
+ void PrintStats() {
+ for (size_t i = 0; i < Inputs.size(); i++) {
+ const auto &II = *Inputs[i];
+ Printf(" [% 3zd %s] sz: % 5zd runs: % 5zd succ: % 5zd focus: %d\n", i,
+ Sha1ToString(II.Sha1).c_str(), II.U.size(),
+ II.NumExecutedMutations, II.NumSuccessfullMutations, II.HasFocusFunction);
+ }
+ }
+
+ void PrintFeatureSet() {
+ for (size_t i = 0; i < kFeatureSetSize; i++) {
+ if(size_t Sz = GetFeature(i))
+ Printf("[%zd: id %zd sz%zd] ", i, SmallestElementPerFeature[i], Sz);
+ }
+ Printf("\n\t");
+ for (size_t i = 0; i < Inputs.size(); i++)
+ if (size_t N = Inputs[i]->NumFeatures)
+ Printf(" %zd=>%zd ", i, N);
+ Printf("\n");
+ }
+
+ void DeleteFile(const InputInfo &II) {
+ if (!OutputCorpus.empty() && II.MayDeleteFile)
+ RemoveFile(DirPlusFile(OutputCorpus, Sha1ToString(II.Sha1)));
+ }
+
+ void DeleteInput(size_t Idx) {
+ InputInfo &II = *Inputs[Idx];
+ DeleteFile(II);
+ Unit().swap(II.U);
+ if (FeatureDebug)
+ Printf("EVICTED %zd\n", Idx);
+ }
+
+ bool AddFeature(size_t Idx, uint32_t NewSize, bool Shrink) {
+ assert(NewSize);
+ Idx = Idx % kFeatureSetSize;
+ uint32_t OldSize = GetFeature(Idx);
+ if (OldSize == 0 || (Shrink && OldSize > NewSize)) {
+ if (OldSize > 0) {
+ size_t OldIdx = SmallestElementPerFeature[Idx];
+ InputInfo &II = *Inputs[OldIdx];
+ assert(II.NumFeatures > 0);
+ II.NumFeatures--;
+ if (II.NumFeatures == 0)
+ DeleteInput(OldIdx);
+ } else {
+ NumAddedFeatures++;
+ }
+ NumUpdatedFeatures++;
+ if (FeatureDebug)
+ Printf("ADD FEATURE %zd sz %d\n", Idx, NewSize);
+ SmallestElementPerFeature[Idx] = Inputs.size();
+ InputSizesPerFeature[Idx] = NewSize;
+ return true;
+ }
+ return false;
+ }
+
+ size_t NumFeatures() const { return NumAddedFeatures; }
+ size_t NumFeatureUpdates() const { return NumUpdatedFeatures; }
+
+private:
+
+ static const bool FeatureDebug = false;
+
+ size_t GetFeature(size_t Idx) const { return InputSizesPerFeature[Idx]; }
+
+ void ValidateFeatureSet() {
+ if (FeatureDebug)
+ PrintFeatureSet();
+ for (size_t Idx = 0; Idx < kFeatureSetSize; Idx++)
+ if (GetFeature(Idx))
+ Inputs[SmallestElementPerFeature[Idx]]->Tmp++;
+ for (auto II: Inputs) {
+ if (II->Tmp != II->NumFeatures)
+ Printf("ZZZ %zd %zd\n", II->Tmp, II->NumFeatures);
+ assert(II->Tmp == II->NumFeatures);
+ II->Tmp = 0;
+ }
+ }
+
+ // Updates the probability distribution for the units in the corpus.
+ // Must be called whenever the corpus or unit weights are changed.
+ //
+ // Hypothesis: units added to the corpus last are more interesting.
+ //
+ // Hypothesis: inputs with infrequent features are more interesting.
+ void UpdateCorpusDistribution() {
+ size_t N = Inputs.size();
+ assert(N);
+ Intervals.resize(N + 1);
+ Weights.resize(N);
+ std::iota(Intervals.begin(), Intervals.end(), 0);
+ for (size_t i = 0; i < N; i++)
+ Weights[i] = Inputs[i]->NumFeatures
+ ? (i + 1) * (Inputs[i]->HasFocusFunction ? 1000 : 1)
+ : 0.;
+ if (FeatureDebug) {
+ for (size_t i = 0; i < N; i++)
+ Printf("%zd ", Inputs[i]->NumFeatures);
+ Printf("SCORE\n");
+ for (size_t i = 0; i < N; i++)
+ Printf("%f ", Weights[i]);
+ Printf("Weights\n");
+ }
+ CorpusDistribution = std::piecewise_constant_distribution<double>(
+ Intervals.begin(), Intervals.end(), Weights.begin());
+ }
+ std::piecewise_constant_distribution<double> CorpusDistribution;
+
+ Vector<double> Intervals;
+ Vector<double> Weights;
+
+ std::unordered_set<std::string> Hashes;
+ Vector<InputInfo*> Inputs;
+
+ size_t NumAddedFeatures = 0;
+ size_t NumUpdatedFeatures = 0;
+ uint32_t InputSizesPerFeature[kFeatureSetSize];
+ uint32_t SmallestElementPerFeature[kFeatureSetSize];
+
+ std::string OutputCorpus;
+};
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_CORPUS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCorpus.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCrossOver.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCrossOver.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCrossOver.cpp (revision 351984)
@@ -0,0 +1,51 @@
+//===- FuzzerCrossOver.cpp - Cross over two test inputs -------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Cross over test inputs.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerDefs.h"
+#include "FuzzerMutate.h"
+#include "FuzzerRandom.h"
+#include <cstring>
+
+namespace fuzzer {
+
+// Cross Data1 and Data2, store the result (up to MaxOutSize bytes) in Out.
+size_t MutationDispatcher::CrossOver(const uint8_t *Data1, size_t Size1,
+ const uint8_t *Data2, size_t Size2,
+ uint8_t *Out, size_t MaxOutSize) {
+ assert(Size1 || Size2);
+ MaxOutSize = Rand(MaxOutSize) + 1;
+ size_t OutPos = 0;
+ size_t Pos1 = 0;
+ size_t Pos2 = 0;
+ size_t *InPos = &Pos1;
+ size_t InSize = Size1;
+ const uint8_t *Data = Data1;
+ bool CurrentlyUsingFirstData = true;
+ while (OutPos < MaxOutSize && (Pos1 < Size1 || Pos2 < Size2)) {
+ // Merge a part of Data into Out.
+ size_t OutSizeLeft = MaxOutSize - OutPos;
+ if (*InPos < InSize) {
+ size_t InSizeLeft = InSize - *InPos;
+ size_t MaxExtraSize = std::min(OutSizeLeft, InSizeLeft);
+ size_t ExtraSize = Rand(MaxExtraSize) + 1;
+ memcpy(Out + OutPos, Data + *InPos, ExtraSize);
+ OutPos += ExtraSize;
+ (*InPos) += ExtraSize;
+ }
+ // Use the other input data on the next iteration.
+ InPos = CurrentlyUsingFirstData ? &Pos2 : &Pos1;
+ InSize = CurrentlyUsingFirstData ? Size2 : Size1;
+ Data = CurrentlyUsingFirstData ? Data2 : Data1;
+ CurrentlyUsingFirstData = !CurrentlyUsingFirstData;
+ }
+ return OutPos;
+}
+
+} // namespace fuzzer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerCrossOver.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDataFlowTrace.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDataFlowTrace.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDataFlowTrace.cpp (revision 351984)
@@ -0,0 +1,281 @@
+//===- FuzzerDataFlowTrace.cpp - DataFlowTrace ---*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// fuzzer::DataFlowTrace
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerDataFlowTrace.h"
+
+#include "FuzzerCommand.h"
+#include "FuzzerIO.h"
+#include "FuzzerRandom.h"
+#include "FuzzerSHA1.h"
+#include "FuzzerUtil.h"
+
+#include <cstdlib>
+#include <fstream>
+#include <numeric>
+#include <queue>
+#include <sstream>
+#include <string>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+namespace fuzzer {
+static const char *kFunctionsTxt = "functions.txt";
+
+bool BlockCoverage::AppendCoverage(const std::string &S) {
+ std::stringstream SS(S);
+ return AppendCoverage(SS);
+}
+
+// Coverage lines have this form:
+// CN X Y Z T
+// where N is the number of the function, T is the total number of instrumented
+// BBs, and X,Y,Z, if present, are the indecies of covered BB.
+// BB #0, which is the entry block, is not explicitly listed.
+bool BlockCoverage::AppendCoverage(std::istream &IN) {
+ std::string L;
+ while (std::getline(IN, L, '\n')) {
+ if (L.empty())
+ continue;
+ std::stringstream SS(L.c_str() + 1);
+ size_t FunctionId = 0;
+ SS >> FunctionId;
+ if (L[0] == 'F') {
+ FunctionsWithDFT.insert(FunctionId);
+ continue;
+ }
+ if (L[0] != 'C') continue;
+ Vector<uint32_t> CoveredBlocks;
+ while (true) {
+ uint32_t BB = 0;
+ SS >> BB;
+ if (!SS) break;
+ CoveredBlocks.push_back(BB);
+ }
+ if (CoveredBlocks.empty()) return false;
+ uint32_t NumBlocks = CoveredBlocks.back();
+ CoveredBlocks.pop_back();
+ for (auto BB : CoveredBlocks)
+ if (BB >= NumBlocks) return false;
+ auto It = Functions.find(FunctionId);
+ auto &Counters =
+ It == Functions.end()
+ ? Functions.insert({FunctionId, Vector<uint32_t>(NumBlocks)})
+ .first->second
+ : It->second;
+
+ if (Counters.size() != NumBlocks) return false; // wrong number of blocks.
+
+ Counters[0]++;
+ for (auto BB : CoveredBlocks)
+ Counters[BB]++;
+ }
+ return true;
+}
+
+// Assign weights to each function.
+// General principles:
+// * any uncovered function gets weight 0.
+// * a function with lots of uncovered blocks gets bigger weight.
+// * a function with a less frequently executed code gets bigger weight.
+Vector<double> BlockCoverage::FunctionWeights(size_t NumFunctions) const {
+ Vector<double> Res(NumFunctions);
+ for (auto It : Functions) {
+ auto FunctionID = It.first;
+ auto Counters = It.second;
+ assert(FunctionID < NumFunctions);
+ auto &Weight = Res[FunctionID];
+ // Give higher weight if the function has a DFT.
+ Weight = FunctionsWithDFT.count(FunctionID) ? 1000. : 1;
+ // Give higher weight to functions with less frequently seen basic blocks.
+ Weight /= SmallestNonZeroCounter(Counters);
+ // Give higher weight to functions with the most uncovered basic blocks.
+ Weight *= NumberOfUncoveredBlocks(Counters) + 1;
+ }
+ return Res;
+}
+
+void DataFlowTrace::ReadCoverage(const std::string &DirPath) {
+ Vector<SizedFile> Files;
+ GetSizedFilesFromDir(DirPath, &Files);
+ for (auto &SF : Files) {
+ auto Name = Basename(SF.File);
+ if (Name == kFunctionsTxt) continue;
+ if (!CorporaHashes.count(Name)) continue;
+ std::ifstream IF(SF.File);
+ Coverage.AppendCoverage(IF);
+ }
+}
+
+static void DFTStringAppendToVector(Vector<uint8_t> *DFT,
+ const std::string &DFTString) {
+ assert(DFT->size() == DFTString.size());
+ for (size_t I = 0, Len = DFT->size(); I < Len; I++)
+ (*DFT)[I] = DFTString[I] == '1';
+}
+
+// converts a string of '0' and '1' into a Vector<uint8_t>
+static Vector<uint8_t> DFTStringToVector(const std::string &DFTString) {
+ Vector<uint8_t> DFT(DFTString.size());
+ DFTStringAppendToVector(&DFT, DFTString);
+ return DFT;
+}
+
+static bool ParseError(const char *Err, const std::string &Line) {
+ Printf("DataFlowTrace: parse error: %s: Line: %s\n", Err, Line.c_str());
+ return false;
+}
+
+// TODO(metzman): replace std::string with std::string_view for
+// better performance. Need to figure our how to use string_view on Windows.
+static bool ParseDFTLine(const std::string &Line, size_t *FunctionNum,
+ std::string *DFTString) {
+ if (!Line.empty() && Line[0] != 'F')
+ return false; // Ignore coverage.
+ size_t SpacePos = Line.find(' ');
+ if (SpacePos == std::string::npos)
+ return ParseError("no space in the trace line", Line);
+ if (Line.empty() || Line[0] != 'F')
+ return ParseError("the trace line doesn't start with 'F'", Line);
+ *FunctionNum = std::atol(Line.c_str() + 1);
+ const char *Beg = Line.c_str() + SpacePos + 1;
+ const char *End = Line.c_str() + Line.size();
+ assert(Beg < End);
+ size_t Len = End - Beg;
+ for (size_t I = 0; I < Len; I++) {
+ if (Beg[I] != '0' && Beg[I] != '1')
+ return ParseError("the trace should contain only 0 or 1", Line);
+ }
+ *DFTString = Beg;
+ return true;
+}
+
+bool DataFlowTrace::Init(const std::string &DirPath, std::string *FocusFunction,
+ Vector<SizedFile> &CorporaFiles, Random &Rand) {
+ if (DirPath.empty()) return false;
+ Printf("INFO: DataFlowTrace: reading from '%s'\n", DirPath.c_str());
+ Vector<SizedFile> Files;
+ GetSizedFilesFromDir(DirPath, &Files);
+ std::string L;
+ size_t FocusFuncIdx = SIZE_MAX;
+ Vector<std::string> FunctionNames;
+
+ // Collect the hashes of the corpus files.
+ for (auto &SF : CorporaFiles)
+ CorporaHashes.insert(Hash(FileToVector(SF.File)));
+
+ // Read functions.txt
+ std::ifstream IF(DirPlusFile(DirPath, kFunctionsTxt));
+ size_t NumFunctions = 0;
+ while (std::getline(IF, L, '\n')) {
+ FunctionNames.push_back(L);
+ NumFunctions++;
+ if (*FocusFunction == L)
+ FocusFuncIdx = NumFunctions - 1;
+ }
+ if (!NumFunctions)
+ return false;
+
+ if (*FocusFunction == "auto") {
+ // AUTOFOCUS works like this:
+ // * reads the coverage data from the DFT files.
+ // * assigns weights to functions based on coverage.
+ // * chooses a random function according to the weights.
+ ReadCoverage(DirPath);
+ auto Weights = Coverage.FunctionWeights(NumFunctions);
+ Vector<double> Intervals(NumFunctions + 1);
+ std::iota(Intervals.begin(), Intervals.end(), 0);
+ auto Distribution = std::piecewise_constant_distribution<double>(
+ Intervals.begin(), Intervals.end(), Weights.begin());
+ FocusFuncIdx = static_cast<size_t>(Distribution(Rand));
+ *FocusFunction = FunctionNames[FocusFuncIdx];
+ assert(FocusFuncIdx < NumFunctions);
+ Printf("INFO: AUTOFOCUS: %zd %s\n", FocusFuncIdx,
+ FunctionNames[FocusFuncIdx].c_str());
+ for (size_t i = 0; i < NumFunctions; i++) {
+ if (!Weights[i]) continue;
+ Printf(" [%zd] W %g\tBB-tot %u\tBB-cov %u\tEntryFreq %u:\t%s\n", i,
+ Weights[i], Coverage.GetNumberOfBlocks(i),
+ Coverage.GetNumberOfCoveredBlocks(i), Coverage.GetCounter(i, 0),
+ FunctionNames[i].c_str());
+ }
+ }
+
+ if (!NumFunctions || FocusFuncIdx == SIZE_MAX || Files.size() <= 1)
+ return false;
+
+ // Read traces.
+ size_t NumTraceFiles = 0;
+ size_t NumTracesWithFocusFunction = 0;
+ for (auto &SF : Files) {
+ auto Name = Basename(SF.File);
+ if (Name == kFunctionsTxt) continue;
+ if (!CorporaHashes.count(Name)) continue; // not in the corpus.
+ NumTraceFiles++;
+ // Printf("=== %s\n", Name.c_str());
+ std::ifstream IF(SF.File);
+ while (std::getline(IF, L, '\n')) {
+ size_t FunctionNum = 0;
+ std::string DFTString;
+ if (ParseDFTLine(L, &FunctionNum, &DFTString) &&
+ FunctionNum == FocusFuncIdx) {
+ NumTracesWithFocusFunction++;
+
+ if (FunctionNum >= NumFunctions)
+ return ParseError("N is greater than the number of functions", L);
+ Traces[Name] = DFTStringToVector(DFTString);
+ // Print just a few small traces.
+ if (NumTracesWithFocusFunction <= 3 && DFTString.size() <= 16)
+ Printf("%s => |%s|\n", Name.c_str(), std::string(DFTString).c_str());
+ break; // No need to parse the following lines.
+ }
+ }
+ }
+ Printf("INFO: DataFlowTrace: %zd trace files, %zd functions, "
+ "%zd traces with focus function\n",
+ NumTraceFiles, NumFunctions, NumTracesWithFocusFunction);
+ return NumTraceFiles > 0;
+}
+
+int CollectDataFlow(const std::string &DFTBinary, const std::string &DirPath,
+ const Vector<SizedFile> &CorporaFiles) {
+ Printf("INFO: collecting data flow: bin: %s dir: %s files: %zd\n",
+ DFTBinary.c_str(), DirPath.c_str(), CorporaFiles.size());
+ static char DFSanEnv[] = "DFSAN_OPTIONS=fast16labels=1:warn_unimplemented=0";
+ putenv(DFSanEnv);
+ MkDir(DirPath);
+ for (auto &F : CorporaFiles) {
+ // For every input F we need to collect the data flow and the coverage.
+ // Data flow collection may fail if we request too many DFSan tags at once.
+ // So, we start from requesting all tags in range [0,Size) and if that fails
+ // we then request tags in [0,Size/2) and [Size/2, Size), and so on.
+ // Function number => DFT.
+ auto OutPath = DirPlusFile(DirPath, Hash(FileToVector(F.File)));
+ std::unordered_map<size_t, Vector<uint8_t>> DFTMap;
+ std::unordered_set<std::string> Cov;
+ Command Cmd;
+ Cmd.addArgument(DFTBinary);
+ Cmd.addArgument(F.File);
+ Cmd.addArgument(OutPath);
+ Printf("CMD: %s\n", Cmd.toString().c_str());
+ ExecuteCommand(Cmd);
+ }
+ // Write functions.txt if it's currently empty or doesn't exist.
+ auto FunctionsTxtPath = DirPlusFile(DirPath, kFunctionsTxt);
+ if (FileToString(FunctionsTxtPath).empty()) {
+ Command Cmd;
+ Cmd.addArgument(DFTBinary);
+ Cmd.setOutputFile(FunctionsTxtPath);
+ ExecuteCommand(Cmd);
+ }
+ return 0;
+}
+
+} // namespace fuzzer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDataFlowTrace.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDataFlowTrace.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDataFlowTrace.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDataFlowTrace.h (revision 351984)
@@ -0,0 +1,135 @@
+//===- FuzzerDataFlowTrace.h - Internal header for the Fuzzer ---*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// fuzzer::DataFlowTrace; reads and handles a data-flow trace.
+//
+// A data flow trace is generated by e.g. dataflow/DataFlow.cpp
+// and is stored on disk in a separate directory.
+//
+// The trace dir contains a file 'functions.txt' which lists function names,
+// oner per line, e.g.
+// ==> functions.txt <==
+// Func2
+// LLVMFuzzerTestOneInput
+// Func1
+//
+// All other files in the dir are the traces, see dataflow/DataFlow.cpp.
+// The name of the file is sha1 of the input used to generate the trace.
+//
+// Current status:
+// the data is parsed and the summary is printed, but the data is not yet
+// used in any other way.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_DATA_FLOW_TRACE
+#define LLVM_FUZZER_DATA_FLOW_TRACE
+
+#include "FuzzerDefs.h"
+#include "FuzzerIO.h"
+
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+#include <string>
+
+namespace fuzzer {
+
+int CollectDataFlow(const std::string &DFTBinary, const std::string &DirPath,
+ const Vector<SizedFile> &CorporaFiles);
+
+class BlockCoverage {
+ public:
+ bool AppendCoverage(std::istream &IN);
+ bool AppendCoverage(const std::string &S);
+
+ size_t NumCoveredFunctions() const { return Functions.size(); }
+
+ uint32_t GetCounter(size_t FunctionId, size_t BasicBlockId) {
+ auto It = Functions.find(FunctionId);
+ if (It == Functions.end()) return 0;
+ const auto &Counters = It->second;
+ if (BasicBlockId < Counters.size())
+ return Counters[BasicBlockId];
+ return 0;
+ }
+
+ uint32_t GetNumberOfBlocks(size_t FunctionId) {
+ auto It = Functions.find(FunctionId);
+ if (It == Functions.end()) return 0;
+ const auto &Counters = It->second;
+ return Counters.size();
+ }
+
+ uint32_t GetNumberOfCoveredBlocks(size_t FunctionId) {
+ auto It = Functions.find(FunctionId);
+ if (It == Functions.end()) return 0;
+ const auto &Counters = It->second;
+ uint32_t Result = 0;
+ for (auto Cnt: Counters)
+ if (Cnt)
+ Result++;
+ return Result;
+ }
+
+ Vector<double> FunctionWeights(size_t NumFunctions) const;
+ void clear() { Functions.clear(); }
+
+ private:
+
+ typedef Vector<uint32_t> CoverageVector;
+
+ uint32_t NumberOfCoveredBlocks(const CoverageVector &Counters) const {
+ uint32_t Res = 0;
+ for (auto Cnt : Counters)
+ if (Cnt)
+ Res++;
+ return Res;
+ }
+
+ uint32_t NumberOfUncoveredBlocks(const CoverageVector &Counters) const {
+ return Counters.size() - NumberOfCoveredBlocks(Counters);
+ }
+
+ uint32_t SmallestNonZeroCounter(const CoverageVector &Counters) const {
+ assert(!Counters.empty());
+ uint32_t Res = Counters[0];
+ for (auto Cnt : Counters)
+ if (Cnt)
+ Res = Min(Res, Cnt);
+ assert(Res);
+ return Res;
+ }
+
+ // Function ID => vector of counters.
+ // Each counter represents how many input files trigger the given basic block.
+ std::unordered_map<size_t, CoverageVector> Functions;
+ // Functions that have DFT entry.
+ std::unordered_set<size_t> FunctionsWithDFT;
+};
+
+class DataFlowTrace {
+ public:
+ void ReadCoverage(const std::string &DirPath);
+ bool Init(const std::string &DirPath, std::string *FocusFunction,
+ Vector<SizedFile> &CorporaFiles, Random &Rand);
+ void Clear() { Traces.clear(); }
+ const Vector<uint8_t> *Get(const std::string &InputSha1) const {
+ auto It = Traces.find(InputSha1);
+ if (It != Traces.end())
+ return &It->second;
+ return nullptr;
+ }
+
+ private:
+ // Input's sha1 => DFT for the FocusFunction.
+ std::unordered_map<std::string, Vector<uint8_t> > Traces;
+ BlockCoverage Coverage;
+ std::unordered_set<std::string> CorporaHashes;
+};
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_DATA_FLOW_TRACE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDataFlowTrace.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDefs.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDefs.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDefs.h (revision 351984)
@@ -0,0 +1,208 @@
+//===- FuzzerDefs.h - Internal header for the Fuzzer ------------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Basic definitions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_DEFS_H
+#define LLVM_FUZZER_DEFS_H
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <string>
+#include <vector>
+#include <set>
+#include <memory>
+
+// Platform detection.
+#ifdef __linux__
+#define LIBFUZZER_APPLE 0
+#define LIBFUZZER_FUCHSIA 0
+#define LIBFUZZER_LINUX 1
+#define LIBFUZZER_NETBSD 0
+#define LIBFUZZER_FREEBSD 0
+#define LIBFUZZER_OPENBSD 0
+#define LIBFUZZER_WINDOWS 0
+#elif __APPLE__
+#define LIBFUZZER_APPLE 1
+#define LIBFUZZER_FUCHSIA 0
+#define LIBFUZZER_LINUX 0
+#define LIBFUZZER_NETBSD 0
+#define LIBFUZZER_FREEBSD 0
+#define LIBFUZZER_OPENBSD 0
+#define LIBFUZZER_WINDOWS 0
+#elif __NetBSD__
+#define LIBFUZZER_APPLE 0
+#define LIBFUZZER_FUCHSIA 0
+#define LIBFUZZER_LINUX 0
+#define LIBFUZZER_NETBSD 1
+#define LIBFUZZER_FREEBSD 0
+#define LIBFUZZER_OPENBSD 0
+#define LIBFUZZER_WINDOWS 0
+#elif __FreeBSD__
+#define LIBFUZZER_APPLE 0
+#define LIBFUZZER_FUCHSIA 0
+#define LIBFUZZER_LINUX 0
+#define LIBFUZZER_NETBSD 0
+#define LIBFUZZER_FREEBSD 1
+#define LIBFUZZER_OPENBSD 0
+#define LIBFUZZER_WINDOWS 0
+#elif __OpenBSD__
+#define LIBFUZZER_APPLE 0
+#define LIBFUZZER_FUCHSIA 0
+#define LIBFUZZER_LINUX 0
+#define LIBFUZZER_NETBSD 0
+#define LIBFUZZER_FREEBSD 0
+#define LIBFUZZER_OPENBSD 1
+#define LIBFUZZER_WINDOWS 0
+#elif _WIN32
+#define LIBFUZZER_APPLE 0
+#define LIBFUZZER_FUCHSIA 0
+#define LIBFUZZER_LINUX 0
+#define LIBFUZZER_NETBSD 0
+#define LIBFUZZER_FREEBSD 0
+#define LIBFUZZER_OPENBSD 0
+#define LIBFUZZER_WINDOWS 1
+#elif __Fuchsia__
+#define LIBFUZZER_APPLE 0
+#define LIBFUZZER_FUCHSIA 1
+#define LIBFUZZER_LINUX 0
+#define LIBFUZZER_NETBSD 0
+#define LIBFUZZER_FREEBSD 0
+#define LIBFUZZER_OPENBSD 0
+#define LIBFUZZER_WINDOWS 0
+#else
+#error "Support for your platform has not been implemented"
+#endif
+
+#if defined(_MSC_VER) && !defined(__clang__)
+// MSVC compiler is being used.
+#define LIBFUZZER_MSVC 1
+#else
+#define LIBFUZZER_MSVC 0
+#endif
+
+#ifndef __has_attribute
+# define __has_attribute(x) 0
+#endif
+
+#define LIBFUZZER_POSIX \
+ (LIBFUZZER_APPLE || LIBFUZZER_LINUX || LIBFUZZER_NETBSD || \
+ LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD)
+
+#ifdef __x86_64
+# if __has_attribute(target)
+# define ATTRIBUTE_TARGET_POPCNT __attribute__((target("popcnt")))
+# else
+# define ATTRIBUTE_TARGET_POPCNT
+# endif
+#else
+# define ATTRIBUTE_TARGET_POPCNT
+#endif
+
+
+#ifdef __clang__ // avoid gcc warning.
+# if __has_attribute(no_sanitize)
+# define ATTRIBUTE_NO_SANITIZE_MEMORY __attribute__((no_sanitize("memory")))
+# else
+# define ATTRIBUTE_NO_SANITIZE_MEMORY
+# endif
+# define ALWAYS_INLINE __attribute__((always_inline))
+#else
+# define ATTRIBUTE_NO_SANITIZE_MEMORY
+# define ALWAYS_INLINE
+#endif // __clang__
+
+#if LIBFUZZER_WINDOWS
+#define ATTRIBUTE_NO_SANITIZE_ADDRESS
+#else
+#define ATTRIBUTE_NO_SANITIZE_ADDRESS __attribute__((no_sanitize_address))
+#endif
+
+#if LIBFUZZER_WINDOWS
+#define ATTRIBUTE_ALIGNED(X) __declspec(align(X))
+#define ATTRIBUTE_INTERFACE __declspec(dllexport)
+// This is used for __sancov_lowest_stack which is needed for
+// -fsanitize-coverage=stack-depth. That feature is not yet available on
+// Windows, so make the symbol static to avoid linking errors.
+#define ATTRIBUTES_INTERFACE_TLS_INITIAL_EXEC static
+#define ATTRIBUTE_NOINLINE __declspec(noinline)
+#else
+#define ATTRIBUTE_ALIGNED(X) __attribute__((aligned(X)))
+#define ATTRIBUTE_INTERFACE __attribute__((visibility("default")))
+#define ATTRIBUTES_INTERFACE_TLS_INITIAL_EXEC \
+ ATTRIBUTE_INTERFACE __attribute__((tls_model("initial-exec"))) thread_local
+
+#define ATTRIBUTE_NOINLINE __attribute__((noinline))
+#endif
+
+#if defined(__has_feature)
+# if __has_feature(address_sanitizer)
+# define ATTRIBUTE_NO_SANITIZE_ALL ATTRIBUTE_NO_SANITIZE_ADDRESS
+# elif __has_feature(memory_sanitizer)
+# define ATTRIBUTE_NO_SANITIZE_ALL ATTRIBUTE_NO_SANITIZE_MEMORY
+# else
+# define ATTRIBUTE_NO_SANITIZE_ALL
+# endif
+#else
+# define ATTRIBUTE_NO_SANITIZE_ALL
+#endif
+
+namespace fuzzer {
+
+template <class T> T Min(T a, T b) { return a < b ? a : b; }
+template <class T> T Max(T a, T b) { return a > b ? a : b; }
+
+class Random;
+class Dictionary;
+class DictionaryEntry;
+class MutationDispatcher;
+struct FuzzingOptions;
+class InputCorpus;
+struct InputInfo;
+struct ExternalFunctions;
+
+// Global interface to functions that may or may not be available.
+extern ExternalFunctions *EF;
+
+// We are using a custom allocator to give a different symbol name to STL
+// containers in order to avoid ODR violations.
+template<typename T>
+ class fuzzer_allocator: public std::allocator<T> {
+ public:
+ fuzzer_allocator() = default;
+
+ template<class U>
+ fuzzer_allocator(const fuzzer_allocator<U>&) {}
+
+ template<class Other>
+ struct rebind { typedef fuzzer_allocator<Other> other; };
+ };
+
+template<typename T>
+using Vector = std::vector<T, fuzzer_allocator<T>>;
+
+template<typename T>
+using Set = std::set<T, std::less<T>, fuzzer_allocator<T>>;
+
+typedef Vector<uint8_t> Unit;
+typedef Vector<Unit> UnitVector;
+typedef int (*UserCallback)(const uint8_t *Data, size_t Size);
+
+int FuzzerDriver(int *argc, char ***argv, UserCallback Callback);
+
+uint8_t *ExtraCountersBegin();
+uint8_t *ExtraCountersEnd();
+void ClearExtraCounters();
+
+extern bool RunningUserCallback;
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_DEFS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDefs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDictionary.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDictionary.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDictionary.h (revision 351984)
@@ -0,0 +1,118 @@
+//===- FuzzerDictionary.h - Internal header for the Fuzzer ------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// fuzzer::Dictionary
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_DICTIONARY_H
+#define LLVM_FUZZER_DICTIONARY_H
+
+#include "FuzzerDefs.h"
+#include "FuzzerIO.h"
+#include "FuzzerUtil.h"
+#include <algorithm>
+#include <limits>
+
+namespace fuzzer {
+// A simple POD sized array of bytes.
+template <size_t kMaxSizeT> class FixedWord {
+public:
+ static const size_t kMaxSize = kMaxSizeT;
+ FixedWord() {}
+ FixedWord(const uint8_t *B, uint8_t S) { Set(B, S); }
+
+ void Set(const uint8_t *B, uint8_t S) {
+ assert(S <= kMaxSize);
+ memcpy(Data, B, S);
+ Size = S;
+ }
+
+ bool operator==(const FixedWord<kMaxSize> &w) const {
+ return Size == w.Size && 0 == memcmp(Data, w.Data, Size);
+ }
+
+ static size_t GetMaxSize() { return kMaxSize; }
+ const uint8_t *data() const { return Data; }
+ uint8_t size() const { return Size; }
+
+private:
+ uint8_t Size = 0;
+ uint8_t Data[kMaxSize];
+};
+
+typedef FixedWord<64> Word;
+
+class DictionaryEntry {
+ public:
+ DictionaryEntry() {}
+ DictionaryEntry(Word W) : W(W) {}
+ DictionaryEntry(Word W, size_t PositionHint) : W(W), PositionHint(PositionHint) {}
+ const Word &GetW() const { return W; }
+
+ bool HasPositionHint() const { return PositionHint != std::numeric_limits<size_t>::max(); }
+ size_t GetPositionHint() const {
+ assert(HasPositionHint());
+ return PositionHint;
+ }
+ void IncUseCount() { UseCount++; }
+ void IncSuccessCount() { SuccessCount++; }
+ size_t GetUseCount() const { return UseCount; }
+ size_t GetSuccessCount() const {return SuccessCount; }
+
+ void Print(const char *PrintAfter = "\n") {
+ PrintASCII(W.data(), W.size());
+ if (HasPositionHint())
+ Printf("@%zd", GetPositionHint());
+ Printf("%s", PrintAfter);
+ }
+
+private:
+ Word W;
+ size_t PositionHint = std::numeric_limits<size_t>::max();
+ size_t UseCount = 0;
+ size_t SuccessCount = 0;
+};
+
+class Dictionary {
+ public:
+ static const size_t kMaxDictSize = 1 << 14;
+
+ bool ContainsWord(const Word &W) const {
+ return std::any_of(begin(), end(), [&](const DictionaryEntry &DE) {
+ return DE.GetW() == W;
+ });
+ }
+ const DictionaryEntry *begin() const { return &DE[0]; }
+ const DictionaryEntry *end() const { return begin() + Size; }
+ DictionaryEntry & operator[] (size_t Idx) {
+ assert(Idx < Size);
+ return DE[Idx];
+ }
+ void push_back(DictionaryEntry DE) {
+ if (Size < kMaxDictSize)
+ this->DE[Size++] = DE;
+ }
+ void clear() { Size = 0; }
+ bool empty() const { return Size == 0; }
+ size_t size() const { return Size; }
+
+private:
+ DictionaryEntry DE[kMaxDictSize];
+ size_t Size = 0;
+};
+
+// Parses one dictionary entry.
+// If successful, write the enty to Unit and returns true,
+// otherwise returns false.
+bool ParseOneDictionaryEntry(const std::string &Str, Unit *U);
+// Parses the dictionary file, fills Units, returns true iff all lines
+// were parsed successfully.
+bool ParseDictionaryFile(const std::string &Text, Vector<Unit> *Units);
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_DICTIONARY_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDictionary.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDriver.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDriver.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDriver.cpp (revision 351984)
@@ -0,0 +1,838 @@
+//===- FuzzerDriver.cpp - FuzzerDriver function and flags -----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// FuzzerDriver and flag parsing.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerCommand.h"
+#include "FuzzerCorpus.h"
+#include "FuzzerFork.h"
+#include "FuzzerIO.h"
+#include "FuzzerInterface.h"
+#include "FuzzerInternal.h"
+#include "FuzzerMerge.h"
+#include "FuzzerMutate.h"
+#include "FuzzerRandom.h"
+#include "FuzzerTracePC.h"
+#include <algorithm>
+#include <atomic>
+#include <chrono>
+#include <cstdlib>
+#include <cstring>
+#include <mutex>
+#include <string>
+#include <thread>
+#include <fstream>
+
+// This function should be present in the libFuzzer so that the client
+// binary can test for its existence.
+#if LIBFUZZER_MSVC
+extern "C" void __libfuzzer_is_present() {}
+#pragma comment(linker, "/include:__libfuzzer_is_present")
+#else
+extern "C" __attribute__((used)) void __libfuzzer_is_present() {}
+#endif // LIBFUZZER_MSVC
+
+namespace fuzzer {
+
+// Program arguments.
+struct FlagDescription {
+ const char *Name;
+ const char *Description;
+ int Default;
+ int *IntFlag;
+ const char **StrFlag;
+ unsigned int *UIntFlag;
+};
+
+struct {
+#define FUZZER_DEPRECATED_FLAG(Name)
+#define FUZZER_FLAG_INT(Name, Default, Description) int Name;
+#define FUZZER_FLAG_UNSIGNED(Name, Default, Description) unsigned int Name;
+#define FUZZER_FLAG_STRING(Name, Description) const char *Name;
+#include "FuzzerFlags.def"
+#undef FUZZER_DEPRECATED_FLAG
+#undef FUZZER_FLAG_INT
+#undef FUZZER_FLAG_UNSIGNED
+#undef FUZZER_FLAG_STRING
+} Flags;
+
+static const FlagDescription FlagDescriptions [] {
+#define FUZZER_DEPRECATED_FLAG(Name) \
+ {#Name, "Deprecated; don't use", 0, nullptr, nullptr, nullptr},
+#define FUZZER_FLAG_INT(Name, Default, Description) \
+ {#Name, Description, Default, &Flags.Name, nullptr, nullptr},
+#define FUZZER_FLAG_UNSIGNED(Name, Default, Description) \
+ {#Name, Description, static_cast<int>(Default), \
+ nullptr, nullptr, &Flags.Name},
+#define FUZZER_FLAG_STRING(Name, Description) \
+ {#Name, Description, 0, nullptr, &Flags.Name, nullptr},
+#include "FuzzerFlags.def"
+#undef FUZZER_DEPRECATED_FLAG
+#undef FUZZER_FLAG_INT
+#undef FUZZER_FLAG_UNSIGNED
+#undef FUZZER_FLAG_STRING
+};
+
+static const size_t kNumFlags =
+ sizeof(FlagDescriptions) / sizeof(FlagDescriptions[0]);
+
+static Vector<std::string> *Inputs;
+static std::string *ProgName;
+
+static void PrintHelp() {
+ Printf("Usage:\n");
+ auto Prog = ProgName->c_str();
+ Printf("\nTo run fuzzing pass 0 or more directories.\n");
+ Printf("%s [-flag1=val1 [-flag2=val2 ...] ] [dir1 [dir2 ...] ]\n", Prog);
+
+ Printf("\nTo run individual tests without fuzzing pass 1 or more files:\n");
+ Printf("%s [-flag1=val1 [-flag2=val2 ...] ] file1 [file2 ...]\n", Prog);
+
+ Printf("\nFlags: (strictly in form -flag=value)\n");
+ size_t MaxFlagLen = 0;
+ for (size_t F = 0; F < kNumFlags; F++)
+ MaxFlagLen = std::max(strlen(FlagDescriptions[F].Name), MaxFlagLen);
+
+ for (size_t F = 0; F < kNumFlags; F++) {
+ const auto &D = FlagDescriptions[F];
+ if (strstr(D.Description, "internal flag") == D.Description) continue;
+ Printf(" %s", D.Name);
+ for (size_t i = 0, n = MaxFlagLen - strlen(D.Name); i < n; i++)
+ Printf(" ");
+ Printf("\t");
+ Printf("%d\t%s\n", D.Default, D.Description);
+ }
+ Printf("\nFlags starting with '--' will be ignored and "
+ "will be passed verbatim to subprocesses.\n");
+}
+
+static const char *FlagValue(const char *Param, const char *Name) {
+ size_t Len = strlen(Name);
+ if (Param[0] == '-' && strstr(Param + 1, Name) == Param + 1 &&
+ Param[Len + 1] == '=')
+ return &Param[Len + 2];
+ return nullptr;
+}
+
+// Avoid calling stol as it triggers a bug in clang/glibc build.
+static long MyStol(const char *Str) {
+ long Res = 0;
+ long Sign = 1;
+ if (*Str == '-') {
+ Str++;
+ Sign = -1;
+ }
+ for (size_t i = 0; Str[i]; i++) {
+ char Ch = Str[i];
+ if (Ch < '0' || Ch > '9')
+ return Res;
+ Res = Res * 10 + (Ch - '0');
+ }
+ return Res * Sign;
+}
+
+static bool ParseOneFlag(const char *Param) {
+ if (Param[0] != '-') return false;
+ if (Param[1] == '-') {
+ static bool PrintedWarning = false;
+ if (!PrintedWarning) {
+ PrintedWarning = true;
+ Printf("INFO: libFuzzer ignores flags that start with '--'\n");
+ }
+ for (size_t F = 0; F < kNumFlags; F++)
+ if (FlagValue(Param + 1, FlagDescriptions[F].Name))
+ Printf("WARNING: did you mean '%s' (single dash)?\n", Param + 1);
+ return true;
+ }
+ for (size_t F = 0; F < kNumFlags; F++) {
+ const char *Name = FlagDescriptions[F].Name;
+ const char *Str = FlagValue(Param, Name);
+ if (Str) {
+ if (FlagDescriptions[F].IntFlag) {
+ int Val = MyStol(Str);
+ *FlagDescriptions[F].IntFlag = Val;
+ if (Flags.verbosity >= 2)
+ Printf("Flag: %s %d\n", Name, Val);
+ return true;
+ } else if (FlagDescriptions[F].UIntFlag) {
+ unsigned int Val = std::stoul(Str);
+ *FlagDescriptions[F].UIntFlag = Val;
+ if (Flags.verbosity >= 2)
+ Printf("Flag: %s %u\n", Name, Val);
+ return true;
+ } else if (FlagDescriptions[F].StrFlag) {
+ *FlagDescriptions[F].StrFlag = Str;
+ if (Flags.verbosity >= 2)
+ Printf("Flag: %s %s\n", Name, Str);
+ return true;
+ } else { // Deprecated flag.
+ Printf("Flag: %s: deprecated, don't use\n", Name);
+ return true;
+ }
+ }
+ }
+ Printf("\n\nWARNING: unrecognized flag '%s'; "
+ "use -help=1 to list all flags\n\n", Param);
+ return true;
+}
+
+// We don't use any library to minimize dependencies.
+static void ParseFlags(const Vector<std::string> &Args,
+ const ExternalFunctions *EF) {
+ for (size_t F = 0; F < kNumFlags; F++) {
+ if (FlagDescriptions[F].IntFlag)
+ *FlagDescriptions[F].IntFlag = FlagDescriptions[F].Default;
+ if (FlagDescriptions[F].UIntFlag)
+ *FlagDescriptions[F].UIntFlag =
+ static_cast<unsigned int>(FlagDescriptions[F].Default);
+ if (FlagDescriptions[F].StrFlag)
+ *FlagDescriptions[F].StrFlag = nullptr;
+ }
+
+ // Disable len_control by default, if LLVMFuzzerCustomMutator is used.
+ if (EF->LLVMFuzzerCustomMutator)
+ Flags.len_control = 0;
+
+ Inputs = new Vector<std::string>;
+ for (size_t A = 1; A < Args.size(); A++) {
+ if (ParseOneFlag(Args[A].c_str())) {
+ if (Flags.ignore_remaining_args)
+ break;
+ continue;
+ }
+ Inputs->push_back(Args[A]);
+ }
+}
+
+static std::mutex Mu;
+
+static void PulseThread() {
+ while (true) {
+ SleepSeconds(600);
+ std::lock_guard<std::mutex> Lock(Mu);
+ Printf("pulse...\n");
+ }
+}
+
+static void WorkerThread(const Command &BaseCmd, std::atomic<unsigned> *Counter,
+ unsigned NumJobs, std::atomic<bool> *HasErrors) {
+ while (true) {
+ unsigned C = (*Counter)++;
+ if (C >= NumJobs) break;
+ std::string Log = "fuzz-" + std::to_string(C) + ".log";
+ Command Cmd(BaseCmd);
+ Cmd.setOutputFile(Log);
+ Cmd.combineOutAndErr();
+ if (Flags.verbosity) {
+ std::string CommandLine = Cmd.toString();
+ Printf("%s\n", CommandLine.c_str());
+ }
+ int ExitCode = ExecuteCommand(Cmd);
+ if (ExitCode != 0)
+ *HasErrors = true;
+ std::lock_guard<std::mutex> Lock(Mu);
+ Printf("================== Job %u exited with exit code %d ============\n",
+ C, ExitCode);
+ fuzzer::CopyFileToErr(Log);
+ }
+}
+
+std::string CloneArgsWithoutX(const Vector<std::string> &Args,
+ const char *X1, const char *X2) {
+ std::string Cmd;
+ for (auto &S : Args) {
+ if (FlagValue(S.c_str(), X1) || FlagValue(S.c_str(), X2))
+ continue;
+ Cmd += S + " ";
+ }
+ return Cmd;
+}
+
+static int RunInMultipleProcesses(const Vector<std::string> &Args,
+ unsigned NumWorkers, unsigned NumJobs) {
+ std::atomic<unsigned> Counter(0);
+ std::atomic<bool> HasErrors(false);
+ Command Cmd(Args);
+ Cmd.removeFlag("jobs");
+ Cmd.removeFlag("workers");
+ Vector<std::thread> V;
+ std::thread Pulse(PulseThread);
+ Pulse.detach();
+ for (unsigned i = 0; i < NumWorkers; i++)
+ V.push_back(std::thread(WorkerThread, std::ref(Cmd), &Counter, NumJobs, &HasErrors));
+ for (auto &T : V)
+ T.join();
+ return HasErrors ? 1 : 0;
+}
+
+static void RssThread(Fuzzer *F, size_t RssLimitMb) {
+ while (true) {
+ SleepSeconds(1);
+ size_t Peak = GetPeakRSSMb();
+ if (Peak > RssLimitMb)
+ F->RssLimitCallback();
+ }
+}
+
+static void StartRssThread(Fuzzer *F, size_t RssLimitMb) {
+ if (!RssLimitMb) return;
+ std::thread T(RssThread, F, RssLimitMb);
+ T.detach();
+}
+
+int RunOneTest(Fuzzer *F, const char *InputFilePath, size_t MaxLen) {
+ Unit U = FileToVector(InputFilePath);
+ if (MaxLen && MaxLen < U.size())
+ U.resize(MaxLen);
+ F->ExecuteCallback(U.data(), U.size());
+ F->TryDetectingAMemoryLeak(U.data(), U.size(), true);
+ return 0;
+}
+
+static bool AllInputsAreFiles() {
+ if (Inputs->empty()) return false;
+ for (auto &Path : *Inputs)
+ if (!IsFile(Path))
+ return false;
+ return true;
+}
+
+static std::string GetDedupTokenFromFile(const std::string &Path) {
+ auto S = FileToString(Path);
+ auto Beg = S.find("DEDUP_TOKEN:");
+ if (Beg == std::string::npos)
+ return "";
+ auto End = S.find('\n', Beg);
+ if (End == std::string::npos)
+ return "";
+ return S.substr(Beg, End - Beg);
+}
+
+int CleanseCrashInput(const Vector<std::string> &Args,
+ const FuzzingOptions &Options) {
+ if (Inputs->size() != 1 || !Flags.exact_artifact_path) {
+ Printf("ERROR: -cleanse_crash should be given one input file and"
+ " -exact_artifact_path\n");
+ exit(1);
+ }
+ std::string InputFilePath = Inputs->at(0);
+ std::string OutputFilePath = Flags.exact_artifact_path;
+ Command Cmd(Args);
+ Cmd.removeFlag("cleanse_crash");
+
+ assert(Cmd.hasArgument(InputFilePath));
+ Cmd.removeArgument(InputFilePath);
+
+ auto LogFilePath = TempPath(".txt");
+ auto TmpFilePath = TempPath(".repro");
+ Cmd.addArgument(TmpFilePath);
+ Cmd.setOutputFile(LogFilePath);
+ Cmd.combineOutAndErr();
+
+ std::string CurrentFilePath = InputFilePath;
+ auto U = FileToVector(CurrentFilePath);
+ size_t Size = U.size();
+
+ const Vector<uint8_t> ReplacementBytes = {' ', 0xff};
+ for (int NumAttempts = 0; NumAttempts < 5; NumAttempts++) {
+ bool Changed = false;
+ for (size_t Idx = 0; Idx < Size; Idx++) {
+ Printf("CLEANSE[%d]: Trying to replace byte %zd of %zd\n", NumAttempts,
+ Idx, Size);
+ uint8_t OriginalByte = U[Idx];
+ if (ReplacementBytes.end() != std::find(ReplacementBytes.begin(),
+ ReplacementBytes.end(),
+ OriginalByte))
+ continue;
+ for (auto NewByte : ReplacementBytes) {
+ U[Idx] = NewByte;
+ WriteToFile(U, TmpFilePath);
+ auto ExitCode = ExecuteCommand(Cmd);
+ RemoveFile(TmpFilePath);
+ if (!ExitCode) {
+ U[Idx] = OriginalByte;
+ } else {
+ Changed = true;
+ Printf("CLEANSE: Replaced byte %zd with 0x%x\n", Idx, NewByte);
+ WriteToFile(U, OutputFilePath);
+ break;
+ }
+ }
+ }
+ if (!Changed) break;
+ }
+ RemoveFile(LogFilePath);
+ return 0;
+}
+
+int MinimizeCrashInput(const Vector<std::string> &Args,
+ const FuzzingOptions &Options) {
+ if (Inputs->size() != 1) {
+ Printf("ERROR: -minimize_crash should be given one input file\n");
+ exit(1);
+ }
+ std::string InputFilePath = Inputs->at(0);
+ Command BaseCmd(Args);
+ BaseCmd.removeFlag("minimize_crash");
+ BaseCmd.removeFlag("exact_artifact_path");
+ assert(BaseCmd.hasArgument(InputFilePath));
+ BaseCmd.removeArgument(InputFilePath);
+ if (Flags.runs <= 0 && Flags.max_total_time == 0) {
+ Printf("INFO: you need to specify -runs=N or "
+ "-max_total_time=N with -minimize_crash=1\n"
+ "INFO: defaulting to -max_total_time=600\n");
+ BaseCmd.addFlag("max_total_time", "600");
+ }
+
+ auto LogFilePath = TempPath(".txt");
+ BaseCmd.setOutputFile(LogFilePath);
+ BaseCmd.combineOutAndErr();
+
+ std::string CurrentFilePath = InputFilePath;
+ while (true) {
+ Unit U = FileToVector(CurrentFilePath);
+ Printf("CRASH_MIN: minimizing crash input: '%s' (%zd bytes)\n",
+ CurrentFilePath.c_str(), U.size());
+
+ Command Cmd(BaseCmd);
+ Cmd.addArgument(CurrentFilePath);
+
+ std::string CommandLine = Cmd.toString();
+ Printf("CRASH_MIN: executing: %s\n", CommandLine.c_str());
+ int ExitCode = ExecuteCommand(Cmd);
+ if (ExitCode == 0) {
+ Printf("ERROR: the input %s did not crash\n", CurrentFilePath.c_str());
+ exit(1);
+ }
+ Printf("CRASH_MIN: '%s' (%zd bytes) caused a crash. Will try to minimize "
+ "it further\n",
+ CurrentFilePath.c_str(), U.size());
+ auto DedupToken1 = GetDedupTokenFromFile(LogFilePath);
+ if (!DedupToken1.empty())
+ Printf("CRASH_MIN: DedupToken1: %s\n", DedupToken1.c_str());
+
+ std::string ArtifactPath =
+ Flags.exact_artifact_path
+ ? Flags.exact_artifact_path
+ : Options.ArtifactPrefix + "minimized-from-" + Hash(U);
+ Cmd.addFlag("minimize_crash_internal_step", "1");
+ Cmd.addFlag("exact_artifact_path", ArtifactPath);
+ CommandLine = Cmd.toString();
+ Printf("CRASH_MIN: executing: %s\n", CommandLine.c_str());
+ ExitCode = ExecuteCommand(Cmd);
+ CopyFileToErr(LogFilePath);
+ if (ExitCode == 0) {
+ if (Flags.exact_artifact_path) {
+ CurrentFilePath = Flags.exact_artifact_path;
+ WriteToFile(U, CurrentFilePath);
+ }
+ Printf("CRASH_MIN: failed to minimize beyond %s (%d bytes), exiting\n",
+ CurrentFilePath.c_str(), U.size());
+ break;
+ }
+ auto DedupToken2 = GetDedupTokenFromFile(LogFilePath);
+ if (!DedupToken2.empty())
+ Printf("CRASH_MIN: DedupToken2: %s\n", DedupToken2.c_str());
+
+ if (DedupToken1 != DedupToken2) {
+ if (Flags.exact_artifact_path) {
+ CurrentFilePath = Flags.exact_artifact_path;
+ WriteToFile(U, CurrentFilePath);
+ }
+ Printf("CRASH_MIN: mismatch in dedup tokens"
+ " (looks like a different bug). Won't minimize further\n");
+ break;
+ }
+
+ CurrentFilePath = ArtifactPath;
+ Printf("*********************************\n");
+ }
+ RemoveFile(LogFilePath);
+ return 0;
+}
+
+int MinimizeCrashInputInternalStep(Fuzzer *F, InputCorpus *Corpus) {
+ assert(Inputs->size() == 1);
+ std::string InputFilePath = Inputs->at(0);
+ Unit U = FileToVector(InputFilePath);
+ Printf("INFO: Starting MinimizeCrashInputInternalStep: %zd\n", U.size());
+ if (U.size() < 2) {
+ Printf("INFO: The input is small enough, exiting\n");
+ exit(0);
+ }
+ F->SetMaxInputLen(U.size());
+ F->SetMaxMutationLen(U.size() - 1);
+ F->MinimizeCrashLoop(U);
+ Printf("INFO: Done MinimizeCrashInputInternalStep, no crashes found\n");
+ exit(0);
+ return 0;
+}
+
+void Merge(Fuzzer *F, FuzzingOptions &Options, const Vector<std::string> &Args,
+ const Vector<std::string> &Corpora, const char *CFPathOrNull) {
+ if (Corpora.size() < 2) {
+ Printf("INFO: Merge requires two or more corpus dirs\n");
+ exit(0);
+ }
+
+ Vector<SizedFile> OldCorpus, NewCorpus;
+ GetSizedFilesFromDir(Corpora[0], &OldCorpus);
+ for (size_t i = 1; i < Corpora.size(); i++)
+ GetSizedFilesFromDir(Corpora[i], &NewCorpus);
+ std::sort(OldCorpus.begin(), OldCorpus.end());
+ std::sort(NewCorpus.begin(), NewCorpus.end());
+
+ std::string CFPath = CFPathOrNull ? CFPathOrNull : TempPath(".txt");
+ Vector<std::string> NewFiles;
+ Set<uint32_t> NewFeatures, NewCov;
+ CrashResistantMerge(Args, OldCorpus, NewCorpus, &NewFiles, {}, &NewFeatures,
+ {}, &NewCov, CFPath, true);
+ for (auto &Path : NewFiles)
+ F->WriteToOutputCorpus(FileToVector(Path, Options.MaxLen));
+ // We are done, delete the control file if it was a temporary one.
+ if (!Flags.merge_control_file)
+ RemoveFile(CFPath);
+
+ exit(0);
+}
+
+int AnalyzeDictionary(Fuzzer *F, const Vector<Unit>& Dict,
+ UnitVector& Corpus) {
+ Printf("Started dictionary minimization (up to %d tests)\n",
+ Dict.size() * Corpus.size() * 2);
+
+ // Scores and usage count for each dictionary unit.
+ Vector<int> Scores(Dict.size());
+ Vector<int> Usages(Dict.size());
+
+ Vector<size_t> InitialFeatures;
+ Vector<size_t> ModifiedFeatures;
+ for (auto &C : Corpus) {
+ // Get coverage for the testcase without modifications.
+ F->ExecuteCallback(C.data(), C.size());
+ InitialFeatures.clear();
+ TPC.CollectFeatures([&](size_t Feature) {
+ InitialFeatures.push_back(Feature);
+ });
+
+ for (size_t i = 0; i < Dict.size(); ++i) {
+ Vector<uint8_t> Data = C;
+ auto StartPos = std::search(Data.begin(), Data.end(),
+ Dict[i].begin(), Dict[i].end());
+ // Skip dictionary unit, if the testcase does not contain it.
+ if (StartPos == Data.end())
+ continue;
+
+ ++Usages[i];
+ while (StartPos != Data.end()) {
+ // Replace all occurrences of dictionary unit in the testcase.
+ auto EndPos = StartPos + Dict[i].size();
+ for (auto It = StartPos; It != EndPos; ++It)
+ *It ^= 0xFF;
+
+ StartPos = std::search(EndPos, Data.end(),
+ Dict[i].begin(), Dict[i].end());
+ }
+
+ // Get coverage for testcase with masked occurrences of dictionary unit.
+ F->ExecuteCallback(Data.data(), Data.size());
+ ModifiedFeatures.clear();
+ TPC.CollectFeatures([&](size_t Feature) {
+ ModifiedFeatures.push_back(Feature);
+ });
+
+ if (InitialFeatures == ModifiedFeatures)
+ --Scores[i];
+ else
+ Scores[i] += 2;
+ }
+ }
+
+ Printf("###### Useless dictionary elements. ######\n");
+ for (size_t i = 0; i < Dict.size(); ++i) {
+ // Dictionary units with positive score are treated as useful ones.
+ if (Scores[i] > 0)
+ continue;
+
+ Printf("\"");
+ PrintASCII(Dict[i].data(), Dict[i].size(), "\"");
+ Printf(" # Score: %d, Used: %d\n", Scores[i], Usages[i]);
+ }
+ Printf("###### End of useless dictionary elements. ######\n");
+ return 0;
+}
+
+Vector<std::string> ParseSeedInuts(const char *seed_inputs) {
+ // Parse -seed_inputs=file1,file2,... or -seed_inputs=@seed_inputs_file
+ Vector<std::string> Files;
+ if (!seed_inputs) return Files;
+ std::string SeedInputs;
+ if (Flags.seed_inputs[0] == '@')
+ SeedInputs = FileToString(Flags.seed_inputs + 1); // File contains list.
+ else
+ SeedInputs = Flags.seed_inputs; // seed_inputs contains the list.
+ if (SeedInputs.empty()) {
+ Printf("seed_inputs is empty or @file does not exist.\n");
+ exit(1);
+ }
+ // Parse SeedInputs.
+ size_t comma_pos = 0;
+ while ((comma_pos = SeedInputs.find_last_of(',')) != std::string::npos) {
+ Files.push_back(SeedInputs.substr(comma_pos + 1));
+ SeedInputs = SeedInputs.substr(0, comma_pos);
+ }
+ Files.push_back(SeedInputs);
+ return Files;
+}
+
+static Vector<SizedFile> ReadCorpora(const Vector<std::string> &CorpusDirs,
+ const Vector<std::string> &ExtraSeedFiles) {
+ Vector<SizedFile> SizedFiles;
+ size_t LastNumFiles = 0;
+ for (auto &Dir : CorpusDirs) {
+ GetSizedFilesFromDir(Dir, &SizedFiles);
+ Printf("INFO: % 8zd files found in %s\n", SizedFiles.size() - LastNumFiles,
+ Dir.c_str());
+ LastNumFiles = SizedFiles.size();
+ }
+ for (auto &File : ExtraSeedFiles)
+ if (auto Size = FileSize(File))
+ SizedFiles.push_back({File, Size});
+ return SizedFiles;
+}
+
+int FuzzerDriver(int *argc, char ***argv, UserCallback Callback) {
+ using namespace fuzzer;
+ assert(argc && argv && "Argument pointers cannot be nullptr");
+ std::string Argv0((*argv)[0]);
+ EF = new ExternalFunctions();
+ if (EF->LLVMFuzzerInitialize)
+ EF->LLVMFuzzerInitialize(argc, argv);
+ if (EF->__msan_scoped_disable_interceptor_checks)
+ EF->__msan_scoped_disable_interceptor_checks();
+ const Vector<std::string> Args(*argv, *argv + *argc);
+ assert(!Args.empty());
+ ProgName = new std::string(Args[0]);
+ if (Argv0 != *ProgName) {
+ Printf("ERROR: argv[0] has been modified in LLVMFuzzerInitialize\n");
+ exit(1);
+ }
+ ParseFlags(Args, EF);
+ if (Flags.help) {
+ PrintHelp();
+ return 0;
+ }
+
+ if (Flags.close_fd_mask & 2)
+ DupAndCloseStderr();
+ if (Flags.close_fd_mask & 1)
+ CloseStdout();
+
+ if (Flags.jobs > 0 && Flags.workers == 0) {
+ Flags.workers = std::min(NumberOfCpuCores() / 2, Flags.jobs);
+ if (Flags.workers > 1)
+ Printf("Running %u workers\n", Flags.workers);
+ }
+
+ if (Flags.workers > 0 && Flags.jobs > 0)
+ return RunInMultipleProcesses(Args, Flags.workers, Flags.jobs);
+
+ FuzzingOptions Options;
+ Options.Verbosity = Flags.verbosity;
+ Options.MaxLen = Flags.max_len;
+ Options.LenControl = Flags.len_control;
+ Options.UnitTimeoutSec = Flags.timeout;
+ Options.ErrorExitCode = Flags.error_exitcode;
+ Options.TimeoutExitCode = Flags.timeout_exitcode;
+ Options.IgnoreTimeouts = Flags.ignore_timeouts;
+ Options.IgnoreOOMs = Flags.ignore_ooms;
+ Options.IgnoreCrashes = Flags.ignore_crashes;
+ Options.MaxTotalTimeSec = Flags.max_total_time;
+ Options.DoCrossOver = Flags.cross_over;
+ Options.MutateDepth = Flags.mutate_depth;
+ Options.ReduceDepth = Flags.reduce_depth;
+ Options.UseCounters = Flags.use_counters;
+ Options.UseMemmem = Flags.use_memmem;
+ Options.UseCmp = Flags.use_cmp;
+ Options.UseValueProfile = Flags.use_value_profile;
+ Options.Shrink = Flags.shrink;
+ Options.ReduceInputs = Flags.reduce_inputs;
+ Options.ShuffleAtStartUp = Flags.shuffle;
+ Options.PreferSmall = Flags.prefer_small;
+ Options.ReloadIntervalSec = Flags.reload;
+ Options.OnlyASCII = Flags.only_ascii;
+ Options.DetectLeaks = Flags.detect_leaks;
+ Options.PurgeAllocatorIntervalSec = Flags.purge_allocator_interval;
+ Options.TraceMalloc = Flags.trace_malloc;
+ Options.RssLimitMb = Flags.rss_limit_mb;
+ Options.MallocLimitMb = Flags.malloc_limit_mb;
+ if (!Options.MallocLimitMb)
+ Options.MallocLimitMb = Options.RssLimitMb;
+ if (Flags.runs >= 0)
+ Options.MaxNumberOfRuns = Flags.runs;
+ if (!Inputs->empty() && !Flags.minimize_crash_internal_step)
+ Options.OutputCorpus = (*Inputs)[0];
+ Options.ReportSlowUnits = Flags.report_slow_units;
+ if (Flags.artifact_prefix)
+ Options.ArtifactPrefix = Flags.artifact_prefix;
+ if (Flags.exact_artifact_path)
+ Options.ExactArtifactPath = Flags.exact_artifact_path;
+ Vector<Unit> Dictionary;
+ if (Flags.dict)
+ if (!ParseDictionaryFile(FileToString(Flags.dict), &Dictionary))
+ return 1;
+ if (Flags.verbosity > 0 && !Dictionary.empty())
+ Printf("Dictionary: %zd entries\n", Dictionary.size());
+ bool RunIndividualFiles = AllInputsAreFiles();
+ Options.SaveArtifacts =
+ !RunIndividualFiles || Flags.minimize_crash_internal_step;
+ Options.PrintNewCovPcs = Flags.print_pcs;
+ Options.PrintNewCovFuncs = Flags.print_funcs;
+ Options.PrintFinalStats = Flags.print_final_stats;
+ Options.PrintCorpusStats = Flags.print_corpus_stats;
+ Options.PrintCoverage = Flags.print_coverage;
+ if (Flags.exit_on_src_pos)
+ Options.ExitOnSrcPos = Flags.exit_on_src_pos;
+ if (Flags.exit_on_item)
+ Options.ExitOnItem = Flags.exit_on_item;
+ if (Flags.focus_function)
+ Options.FocusFunction = Flags.focus_function;
+ if (Flags.data_flow_trace)
+ Options.DataFlowTrace = Flags.data_flow_trace;
+ if (Flags.features_dir)
+ Options.FeaturesDir = Flags.features_dir;
+ if (Flags.collect_data_flow)
+ Options.CollectDataFlow = Flags.collect_data_flow;
+ Options.LazyCounters = Flags.lazy_counters;
+ if (Flags.stop_file)
+ Options.StopFile = Flags.stop_file;
+
+ unsigned Seed = Flags.seed;
+ // Initialize Seed.
+ if (Seed == 0)
+ Seed =
+ std::chrono::system_clock::now().time_since_epoch().count() + GetPid();
+ if (Flags.verbosity)
+ Printf("INFO: Seed: %u\n", Seed);
+
+ if (Flags.collect_data_flow && !Flags.fork && !Flags.merge) {
+ if (RunIndividualFiles)
+ return CollectDataFlow(Flags.collect_data_flow, Flags.data_flow_trace,
+ ReadCorpora({}, *Inputs));
+ else
+ return CollectDataFlow(Flags.collect_data_flow, Flags.data_flow_trace,
+ ReadCorpora(*Inputs, {}));
+ }
+
+ Random Rand(Seed);
+ auto *MD = new MutationDispatcher(Rand, Options);
+ auto *Corpus = new InputCorpus(Options.OutputCorpus);
+ auto *F = new Fuzzer(Callback, *Corpus, *MD, Options);
+
+ for (auto &U: Dictionary)
+ if (U.size() <= Word::GetMaxSize())
+ MD->AddWordToManualDictionary(Word(U.data(), U.size()));
+
+ StartRssThread(F, Flags.rss_limit_mb);
+
+ Options.HandleAbrt = Flags.handle_abrt;
+ Options.HandleBus = Flags.handle_bus;
+ Options.HandleFpe = Flags.handle_fpe;
+ Options.HandleIll = Flags.handle_ill;
+ Options.HandleInt = Flags.handle_int;
+ Options.HandleSegv = Flags.handle_segv;
+ Options.HandleTerm = Flags.handle_term;
+ Options.HandleXfsz = Flags.handle_xfsz;
+ Options.HandleUsr1 = Flags.handle_usr1;
+ Options.HandleUsr2 = Flags.handle_usr2;
+ SetSignalHandler(Options);
+
+ std::atexit(Fuzzer::StaticExitCallback);
+
+ if (Flags.minimize_crash)
+ return MinimizeCrashInput(Args, Options);
+
+ if (Flags.minimize_crash_internal_step)
+ return MinimizeCrashInputInternalStep(F, Corpus);
+
+ if (Flags.cleanse_crash)
+ return CleanseCrashInput(Args, Options);
+
+ if (RunIndividualFiles) {
+ Options.SaveArtifacts = false;
+ int Runs = std::max(1, Flags.runs);
+ Printf("%s: Running %zd inputs %d time(s) each.\n", ProgName->c_str(),
+ Inputs->size(), Runs);
+ for (auto &Path : *Inputs) {
+ auto StartTime = system_clock::now();
+ Printf("Running: %s\n", Path.c_str());
+ for (int Iter = 0; Iter < Runs; Iter++)
+ RunOneTest(F, Path.c_str(), Options.MaxLen);
+ auto StopTime = system_clock::now();
+ auto MS = duration_cast<milliseconds>(StopTime - StartTime).count();
+ Printf("Executed %s in %zd ms\n", Path.c_str(), (long)MS);
+ }
+ Printf("***\n"
+ "*** NOTE: fuzzing was not performed, you have only\n"
+ "*** executed the target code on a fixed set of inputs.\n"
+ "***\n");
+ F->PrintFinalStats();
+ exit(0);
+ }
+
+ if (Flags.fork)
+ FuzzWithFork(F->GetMD().GetRand(), Options, Args, *Inputs, Flags.fork);
+
+ if (Flags.merge)
+ Merge(F, Options, Args, *Inputs, Flags.merge_control_file);
+
+ if (Flags.merge_inner) {
+ const size_t kDefaultMaxMergeLen = 1 << 20;
+ if (Options.MaxLen == 0)
+ F->SetMaxInputLen(kDefaultMaxMergeLen);
+ assert(Flags.merge_control_file);
+ F->CrashResistantMergeInternalStep(Flags.merge_control_file);
+ exit(0);
+ }
+
+ if (Flags.analyze_dict) {
+ size_t MaxLen = INT_MAX; // Large max length.
+ UnitVector InitialCorpus;
+ for (auto &Inp : *Inputs) {
+ Printf("Loading corpus dir: %s\n", Inp.c_str());
+ ReadDirToVectorOfUnits(Inp.c_str(), &InitialCorpus, nullptr,
+ MaxLen, /*ExitOnError=*/false);
+ }
+
+ if (Dictionary.empty() || Inputs->empty()) {
+ Printf("ERROR: can't analyze dict without dict and corpus provided\n");
+ return 1;
+ }
+ if (AnalyzeDictionary(F, Dictionary, InitialCorpus)) {
+ Printf("Dictionary analysis failed\n");
+ exit(1);
+ }
+ Printf("Dictionary analysis succeeded\n");
+ exit(0);
+ }
+
+ auto CorporaFiles = ReadCorpora(*Inputs, ParseSeedInuts(Flags.seed_inputs));
+ F->Loop(CorporaFiles);
+
+ if (Flags.verbosity)
+ Printf("Done %zd runs in %zd second(s)\n", F->getTotalNumberOfRuns(),
+ F->secondsSinceProcessStartUp());
+ F->PrintFinalStats();
+
+ exit(0); // Don't let F destroy itself.
+}
+
+// Storage for global ExternalFunctions object.
+ExternalFunctions *EF = nullptr;
+
+} // namespace fuzzer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerDriver.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctions.def
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctions.def (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctions.def (revision 351984)
@@ -0,0 +1,49 @@
+//===- FuzzerExtFunctions.def - External functions --------------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This defines the external function pointers that
+// ``fuzzer::ExternalFunctions`` should contain and try to initialize. The
+// EXT_FUNC macro must be defined at the point of inclusion. The signature of
+// the macro is:
+//
+// EXT_FUNC(<name>, <return_type>, <function_signature>, <warn_if_missing>)
+//===----------------------------------------------------------------------===//
+
+// Optional user functions
+EXT_FUNC(LLVMFuzzerInitialize, int, (int *argc, char ***argv), false);
+EXT_FUNC(LLVMFuzzerCustomMutator, size_t,
+ (uint8_t * Data, size_t Size, size_t MaxSize, unsigned int Seed),
+ false);
+EXT_FUNC(LLVMFuzzerCustomCrossOver, size_t,
+ (const uint8_t * Data1, size_t Size1,
+ const uint8_t * Data2, size_t Size2,
+ uint8_t * Out, size_t MaxOutSize, unsigned int Seed),
+ false);
+
+// Sanitizer functions
+EXT_FUNC(__lsan_enable, void, (), false);
+EXT_FUNC(__lsan_disable, void, (), false);
+EXT_FUNC(__lsan_do_recoverable_leak_check, int, (), false);
+EXT_FUNC(__sanitizer_acquire_crash_state, int, (), true);
+EXT_FUNC(__sanitizer_install_malloc_and_free_hooks, int,
+ (void (*malloc_hook)(const volatile void *, size_t),
+ void (*free_hook)(const volatile void *)),
+ false);
+EXT_FUNC(__sanitizer_purge_allocator, void, (), false);
+EXT_FUNC(__sanitizer_print_memory_profile, int, (size_t, size_t), false);
+EXT_FUNC(__sanitizer_print_stack_trace, void, (), true);
+EXT_FUNC(__sanitizer_symbolize_pc, void,
+ (void *, const char *fmt, char *out_buf, size_t out_buf_size), false);
+EXT_FUNC(__sanitizer_get_module_and_offset_for_pc, int,
+ (void *pc, char *module_path,
+ size_t module_path_len,void **pc_offset), false);
+EXT_FUNC(__sanitizer_set_death_callback, void, (void (*)(void)), true);
+EXT_FUNC(__sanitizer_set_report_fd, void, (void*), false);
+EXT_FUNC(__msan_scoped_disable_interceptor_checks, void, (), false);
+EXT_FUNC(__msan_scoped_enable_interceptor_checks, void, (), false);
+EXT_FUNC(__msan_unpoison, void, (const volatile void *, size_t size), false);
+EXT_FUNC(__msan_unpoison_param, void, (size_t n), false);
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctions.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctions.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctions.h (revision 351984)
@@ -0,0 +1,34 @@
+//===- FuzzerExtFunctions.h - Interface to external functions ---*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Defines an interface to (possibly optional) functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_EXT_FUNCTIONS_H
+#define LLVM_FUZZER_EXT_FUNCTIONS_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace fuzzer {
+
+struct ExternalFunctions {
+ // Initialize function pointers. Functions that are not available will be set
+ // to nullptr. Do not call this constructor before ``main()`` has been
+ // entered.
+ ExternalFunctions();
+
+#define EXT_FUNC(NAME, RETURN_TYPE, FUNC_SIG, WARN) \
+ RETURN_TYPE(*NAME) FUNC_SIG = nullptr
+
+#include "FuzzerExtFunctions.def"
+
+#undef EXT_FUNC
+};
+} // namespace fuzzer
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctions.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsDlsym.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsDlsym.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsDlsym.cpp (revision 351984)
@@ -0,0 +1,51 @@
+//===- FuzzerExtFunctionsDlsym.cpp - Interface to external functions ------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Implementation for operating systems that support dlsym(). We only use it on
+// Apple platforms for now. We don't use this approach on Linux because it
+// requires that clients of LibFuzzer pass ``--export-dynamic`` to the linker.
+// That is a complication we don't wish to expose to clients right now.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+#if LIBFUZZER_APPLE
+
+#include "FuzzerExtFunctions.h"
+#include "FuzzerIO.h"
+#include <dlfcn.h>
+
+using namespace fuzzer;
+
+template <typename T>
+static T GetFnPtr(const char *FnName, bool WarnIfMissing) {
+ dlerror(); // Clear any previous errors.
+ void *Fn = dlsym(RTLD_DEFAULT, FnName);
+ if (Fn == nullptr) {
+ if (WarnIfMissing) {
+ const char *ErrorMsg = dlerror();
+ Printf("WARNING: Failed to find function \"%s\".", FnName);
+ if (ErrorMsg)
+ Printf(" Reason %s.", ErrorMsg);
+ Printf("\n");
+ }
+ }
+ return reinterpret_cast<T>(Fn);
+}
+
+namespace fuzzer {
+
+ExternalFunctions::ExternalFunctions() {
+#define EXT_FUNC(NAME, RETURN_TYPE, FUNC_SIG, WARN) \
+ this->NAME = GetFnPtr<decltype(ExternalFunctions::NAME)>(#NAME, WARN)
+
+#include "FuzzerExtFunctions.def"
+
+#undef EXT_FUNC
+}
+
+} // namespace fuzzer
+
+#endif // LIBFUZZER_APPLE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsDlsym.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsWeak.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsWeak.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsWeak.cpp (revision 351984)
@@ -0,0 +1,54 @@
+//===- FuzzerExtFunctionsWeak.cpp - Interface to external functions -------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Implementation for Linux. This relies on the linker's support for weak
+// symbols. We don't use this approach on Apple platforms because it requires
+// clients of LibFuzzer to pass ``-U _<symbol_name>`` to the linker to allow
+// weak symbols to be undefined. That is a complication we don't want to expose
+// to clients right now.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FUCHSIA || \
+ LIBFUZZER_FREEBSD || LIBFUZZER_OPENBSD
+
+#include "FuzzerExtFunctions.h"
+#include "FuzzerIO.h"
+
+extern "C" {
+// Declare these symbols as weak to allow them to be optionally defined.
+#define EXT_FUNC(NAME, RETURN_TYPE, FUNC_SIG, WARN) \
+ __attribute__((weak, visibility("default"))) RETURN_TYPE NAME FUNC_SIG
+
+#include "FuzzerExtFunctions.def"
+
+#undef EXT_FUNC
+}
+
+using namespace fuzzer;
+
+static void CheckFnPtr(void *FnPtr, const char *FnName, bool WarnIfMissing) {
+ if (FnPtr == nullptr && WarnIfMissing) {
+ Printf("WARNING: Failed to find function \"%s\".\n", FnName);
+ }
+}
+
+namespace fuzzer {
+
+ExternalFunctions::ExternalFunctions() {
+#define EXT_FUNC(NAME, RETURN_TYPE, FUNC_SIG, WARN) \
+ this->NAME = ::NAME; \
+ CheckFnPtr(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(::NAME)), \
+ #NAME, WARN);
+
+#include "FuzzerExtFunctions.def"
+
+#undef EXT_FUNC
+}
+
+} // namespace fuzzer
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsWeak.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsWindows.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsWindows.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtFunctionsWindows.cpp (revision 351984)
@@ -0,0 +1,82 @@
+//=== FuzzerExtWindows.cpp - Interface to external functions --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Implementation of FuzzerExtFunctions for Windows. Uses alternatename when
+// compiled with MSVC. Uses weak aliases when compiled with clang. Unfortunately
+// the method each compiler supports is not supported by the other.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+#if LIBFUZZER_WINDOWS
+
+#include "FuzzerExtFunctions.h"
+#include "FuzzerIO.h"
+
+using namespace fuzzer;
+
+// Intermediate macro to ensure the parameter is expanded before stringified.
+#define STRINGIFY_(A) #A
+#define STRINGIFY(A) STRINGIFY_(A)
+
+#if LIBFUZZER_MSVC
+// Copied from compiler-rt/lib/sanitizer_common/sanitizer_win_defs.h
+#if defined(_M_IX86) || defined(__i386__)
+#define WIN_SYM_PREFIX "_"
+#else
+#define WIN_SYM_PREFIX
+#endif
+
+// Declare external functions as having alternativenames, so that we can
+// determine if they are not defined.
+#define EXTERNAL_FUNC(Name, Default) \
+ __pragma(comment(linker, "/alternatename:" WIN_SYM_PREFIX STRINGIFY( \
+ Name) "=" WIN_SYM_PREFIX STRINGIFY(Default)))
+#else
+// Declare external functions as weak to allow them to default to a specified
+// function if not defined explicitly. We must use weak symbols because clang's
+// support for alternatename is not 100%, see
+// https://bugs.llvm.org/show_bug.cgi?id=40218 for more details.
+#define EXTERNAL_FUNC(Name, Default) \
+ __attribute__((weak, alias(STRINGIFY(Default))))
+#endif // LIBFUZZER_MSVC
+
+extern "C" {
+#define EXT_FUNC(NAME, RETURN_TYPE, FUNC_SIG, WARN) \
+ RETURN_TYPE NAME##Def FUNC_SIG { \
+ Printf("ERROR: Function \"%s\" not defined.\n", #NAME); \
+ exit(1); \
+ } \
+ EXTERNAL_FUNC(NAME, NAME##Def) RETURN_TYPE NAME FUNC_SIG
+
+#include "FuzzerExtFunctions.def"
+
+#undef EXT_FUNC
+}
+
+template <typename T>
+static T *GetFnPtr(T *Fun, T *FunDef, const char *FnName, bool WarnIfMissing) {
+ if (Fun == FunDef) {
+ if (WarnIfMissing)
+ Printf("WARNING: Failed to find function \"%s\".\n", FnName);
+ return nullptr;
+ }
+ return Fun;
+}
+
+namespace fuzzer {
+
+ExternalFunctions::ExternalFunctions() {
+#define EXT_FUNC(NAME, RETURN_TYPE, FUNC_SIG, WARN) \
+ this->NAME = GetFnPtr<decltype(::NAME)>(::NAME, ::NAME##Def, #NAME, WARN);
+
+#include "FuzzerExtFunctions.def"
+
+#undef EXT_FUNC
+}
+
+} // namespace fuzzer
+
+#endif // LIBFUZZER_WINDOWS
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtraCounters.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtraCounters.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtraCounters.cpp (revision 351984)
@@ -0,0 +1,41 @@
+//===- FuzzerExtraCounters.cpp - Extra coverage counters ------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Extra coverage counters defined by user code.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerDefs.h"
+
+#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \
+ LIBFUZZER_OPENBSD
+__attribute__((weak)) extern uint8_t __start___libfuzzer_extra_counters;
+__attribute__((weak)) extern uint8_t __stop___libfuzzer_extra_counters;
+
+namespace fuzzer {
+uint8_t *ExtraCountersBegin() { return &__start___libfuzzer_extra_counters; }
+uint8_t *ExtraCountersEnd() { return &__stop___libfuzzer_extra_counters; }
+ATTRIBUTE_NO_SANITIZE_ALL
+void ClearExtraCounters() { // hand-written memset, don't asan-ify.
+ uintptr_t *Beg = reinterpret_cast<uintptr_t*>(ExtraCountersBegin());
+ uintptr_t *End = reinterpret_cast<uintptr_t*>(ExtraCountersEnd());
+ for (; Beg < End; Beg++) {
+ *Beg = 0;
+ __asm__ __volatile__("" : : : "memory");
+ }
+}
+
+} // namespace fuzzer
+
+#else
+// TODO: implement for other platforms.
+namespace fuzzer {
+uint8_t *ExtraCountersBegin() { return nullptr; }
+uint8_t *ExtraCountersEnd() { return nullptr; }
+void ClearExtraCounters() {}
+} // namespace fuzzer
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerExtraCounters.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerFlags.def
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerFlags.def (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerFlags.def (revision 351984)
@@ -0,0 +1,163 @@
+//===- FuzzerFlags.def - Run-time flags -------------------------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Flags. FUZZER_FLAG_INT/FUZZER_FLAG_STRING macros should be defined at the
+// point of inclusion. We are not using any flag parsing library for better
+// portability and independence.
+//===----------------------------------------------------------------------===//
+FUZZER_FLAG_INT(verbosity, 1, "Verbosity level.")
+FUZZER_FLAG_UNSIGNED(seed, 0, "Random seed. If 0, seed is generated.")
+FUZZER_FLAG_INT(runs, -1,
+ "Number of individual test runs (-1 for infinite runs).")
+FUZZER_FLAG_INT(max_len, 0, "Maximum length of the test input. "
+ "If 0, libFuzzer tries to guess a good value based on the corpus "
+ "and reports it. ")
+FUZZER_FLAG_INT(len_control, 100, "Try generating small inputs first, "
+ "then try larger inputs over time. Specifies the rate at which the length "
+ "limit is increased (smaller == faster). If 0, immediately try inputs with "
+ "size up to max_len. Default value is 0, if LLVMFuzzerCustomMutator is used.")
+FUZZER_FLAG_STRING(seed_inputs, "A comma-separated list of input files "
+ "to use as an additional seed corpus. Alternatively, an \"@\" followed by "
+ "the name of a file containing the comma-seperated list.")
+FUZZER_FLAG_INT(cross_over, 1, "If 1, cross over inputs.")
+FUZZER_FLAG_INT(mutate_depth, 5,
+ "Apply this number of consecutive mutations to each input.")
+FUZZER_FLAG_INT(reduce_depth, 0, "Experimental/internal. "
+ "Reduce depth if mutations lose unique features")
+FUZZER_FLAG_INT(shuffle, 1, "Shuffle inputs at startup")
+FUZZER_FLAG_INT(prefer_small, 1,
+ "If 1, always prefer smaller inputs during the corpus shuffle.")
+FUZZER_FLAG_INT(
+ timeout, 1200,
+ "Timeout in seconds (if positive). "
+ "If one unit runs more than this number of seconds the process will abort.")
+FUZZER_FLAG_INT(error_exitcode, 77, "When libFuzzer itself reports a bug "
+ "this exit code will be used.")
+FUZZER_FLAG_INT(timeout_exitcode, 70, "When libFuzzer reports a timeout "
+ "this exit code will be used.")
+FUZZER_FLAG_INT(max_total_time, 0, "If positive, indicates the maximal total "
+ "time in seconds to run the fuzzer.")
+FUZZER_FLAG_INT(help, 0, "Print help.")
+FUZZER_FLAG_INT(fork, 0, "Experimental mode where fuzzing happens "
+ "in a subprocess")
+FUZZER_FLAG_INT(ignore_timeouts, 1, "Ignore timeouts in fork mode")
+FUZZER_FLAG_INT(ignore_ooms, 1, "Ignore OOMs in fork mode")
+FUZZER_FLAG_INT(ignore_crashes, 0, "Ignore crashes in fork mode")
+FUZZER_FLAG_INT(merge, 0, "If 1, the 2-nd, 3-rd, etc corpora will be "
+ "merged into the 1-st corpus. Only interesting units will be taken. "
+ "This flag can be used to minimize a corpus.")
+FUZZER_FLAG_STRING(stop_file, "Stop fuzzing ASAP if this file exists")
+FUZZER_FLAG_STRING(merge_inner, "internal flag")
+FUZZER_FLAG_STRING(merge_control_file,
+ "Specify a control file used for the merge process. "
+ "If a merge process gets killed it tries to leave this file "
+ "in a state suitable for resuming the merge. "
+ "By default a temporary file will be used.")
+FUZZER_FLAG_INT(minimize_crash, 0, "If 1, minimizes the provided"
+ " crash input. Use with -runs=N or -max_total_time=N to limit "
+ "the number attempts."
+ " Use with -exact_artifact_path to specify the output."
+ " Combine with ASAN_OPTIONS=dedup_token_length=3 (or similar) to ensure that"
+ " the minimized input triggers the same crash."
+ )
+FUZZER_FLAG_INT(cleanse_crash, 0, "If 1, tries to cleanse the provided"
+ " crash input to make it contain fewer original bytes."
+ " Use with -exact_artifact_path to specify the output."
+ )
+FUZZER_FLAG_INT(minimize_crash_internal_step, 0, "internal flag")
+FUZZER_FLAG_STRING(features_dir, "internal flag. Used to dump feature sets on disk."
+ "Every time a new input is added to the corpus, a corresponding file in the features_dir"
+ " is created containing the unique features of that input."
+ " Features are stored in binary format.")
+FUZZER_FLAG_INT(use_counters, 1, "Use coverage counters")
+FUZZER_FLAG_INT(use_memmem, 1,
+ "Use hints from intercepting memmem, strstr, etc")
+FUZZER_FLAG_INT(use_value_profile, 0,
+ "Experimental. Use value profile to guide fuzzing.")
+FUZZER_FLAG_INT(use_cmp, 1, "Use CMP traces to guide mutations")
+FUZZER_FLAG_INT(shrink, 0, "Experimental. Try to shrink corpus inputs.")
+FUZZER_FLAG_INT(reduce_inputs, 1,
+ "Try to reduce the size of inputs while preserving their full feature sets")
+FUZZER_FLAG_UNSIGNED(jobs, 0, "Number of jobs to run. If jobs >= 1 we spawn"
+ " this number of jobs in separate worker processes"
+ " with stdout/stderr redirected to fuzz-JOB.log.")
+FUZZER_FLAG_UNSIGNED(workers, 0,
+ "Number of simultaneous worker processes to run the jobs."
+ " If zero, \"min(jobs,NumberOfCpuCores()/2)\" is used.")
+FUZZER_FLAG_INT(reload, 1,
+ "Reload the main corpus every <N> seconds to get new units"
+ " discovered by other processes. If 0, disabled")
+FUZZER_FLAG_INT(report_slow_units, 10,
+ "Report slowest units if they run for more than this number of seconds.")
+FUZZER_FLAG_INT(only_ascii, 0,
+ "If 1, generate only ASCII (isprint+isspace) inputs.")
+FUZZER_FLAG_STRING(dict, "Experimental. Use the dictionary file.")
+FUZZER_FLAG_STRING(artifact_prefix, "Write fuzzing artifacts (crash, "
+ "timeout, or slow inputs) as "
+ "$(artifact_prefix)file")
+FUZZER_FLAG_STRING(exact_artifact_path,
+ "Write the single artifact on failure (crash, timeout) "
+ "as $(exact_artifact_path). This overrides -artifact_prefix "
+ "and will not use checksum in the file name. Do not "
+ "use the same path for several parallel processes.")
+FUZZER_FLAG_INT(print_pcs, 0, "If 1, print out newly covered PCs.")
+FUZZER_FLAG_INT(print_funcs, 2, "If >=1, print out at most this number of "
+ "newly covered functions.")
+FUZZER_FLAG_INT(print_final_stats, 0, "If 1, print statistics at exit.")
+FUZZER_FLAG_INT(print_corpus_stats, 0,
+ "If 1, print statistics on corpus elements at exit.")
+FUZZER_FLAG_INT(print_coverage, 0, "If 1, print coverage information as text"
+ " at exit.")
+FUZZER_FLAG_INT(dump_coverage, 0, "Deprecated.")
+FUZZER_FLAG_INT(handle_segv, 1, "If 1, try to intercept SIGSEGV.")
+FUZZER_FLAG_INT(handle_bus, 1, "If 1, try to intercept SIGBUS.")
+FUZZER_FLAG_INT(handle_abrt, 1, "If 1, try to intercept SIGABRT.")
+FUZZER_FLAG_INT(handle_ill, 1, "If 1, try to intercept SIGILL.")
+FUZZER_FLAG_INT(handle_fpe, 1, "If 1, try to intercept SIGFPE.")
+FUZZER_FLAG_INT(handle_int, 1, "If 1, try to intercept SIGINT.")
+FUZZER_FLAG_INT(handle_term, 1, "If 1, try to intercept SIGTERM.")
+FUZZER_FLAG_INT(handle_xfsz, 1, "If 1, try to intercept SIGXFSZ.")
+FUZZER_FLAG_INT(handle_usr1, 1, "If 1, try to intercept SIGUSR1.")
+FUZZER_FLAG_INT(handle_usr2, 1, "If 1, try to intercept SIGUSR2.")
+FUZZER_FLAG_INT(lazy_counters, 0, "If 1, a performance optimization is"
+ "enabled for the 8bit inline counters. "
+ "Requires that libFuzzer successfully installs its SEGV handler")
+FUZZER_FLAG_INT(close_fd_mask, 0, "If 1, close stdout at startup; "
+ "if 2, close stderr; if 3, close both. "
+ "Be careful, this will also close e.g. stderr of asan.")
+FUZZER_FLAG_INT(detect_leaks, 1, "If 1, and if LeakSanitizer is enabled "
+ "try to detect memory leaks during fuzzing (i.e. not only at shut down).")
+FUZZER_FLAG_INT(purge_allocator_interval, 1, "Purge allocator caches and "
+ "quarantines every <N> seconds. When rss_limit_mb is specified (>0), "
+ "purging starts when RSS exceeds 50% of rss_limit_mb. Pass "
+ "purge_allocator_interval=-1 to disable this functionality.")
+FUZZER_FLAG_INT(trace_malloc, 0, "If >= 1 will print all mallocs/frees. "
+ "If >= 2 will also print stack traces.")
+FUZZER_FLAG_INT(rss_limit_mb, 2048, "If non-zero, the fuzzer will exit upon"
+ "reaching this limit of RSS memory usage.")
+FUZZER_FLAG_INT(malloc_limit_mb, 0, "If non-zero, the fuzzer will exit "
+ "if the target tries to allocate this number of Mb with one malloc call. "
+ "If zero (default) same limit as rss_limit_mb is applied.")
+FUZZER_FLAG_STRING(exit_on_src_pos, "Exit if a newly found PC originates"
+ " from the given source location. Example: -exit_on_src_pos=foo.cc:123. "
+ "Used primarily for testing libFuzzer itself.")
+FUZZER_FLAG_STRING(exit_on_item, "Exit if an item with a given sha1 sum"
+ " was added to the corpus. "
+ "Used primarily for testing libFuzzer itself.")
+FUZZER_FLAG_INT(ignore_remaining_args, 0, "If 1, ignore all arguments passed "
+ "after this one. Useful for fuzzers that need to do their own "
+ "argument parsing.")
+FUZZER_FLAG_STRING(focus_function, "Experimental. "
+ "Fuzzing will focus on inputs that trigger calls to this function. "
+ "If -focus_function=auto and -data_flow_trace is used, libFuzzer "
+ "will choose the focus functions automatically.")
+
+FUZZER_FLAG_INT(analyze_dict, 0, "Experimental")
+FUZZER_DEPRECATED_FLAG(use_clang_coverage)
+FUZZER_FLAG_STRING(data_flow_trace, "Experimental: use the data flow trace")
+FUZZER_FLAG_STRING(collect_data_flow,
+ "Experimental: collect the data flow trace")
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerFork.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerFork.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerFork.cpp (revision 351984)
@@ -0,0 +1,409 @@
+//===- FuzzerFork.cpp - run fuzzing in separate subprocesses --------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Spawn and orchestrate separate fuzzing processes.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerCommand.h"
+#include "FuzzerFork.h"
+#include "FuzzerIO.h"
+#include "FuzzerInternal.h"
+#include "FuzzerMerge.h"
+#include "FuzzerSHA1.h"
+#include "FuzzerTracePC.h"
+#include "FuzzerUtil.h"
+
+#include <atomic>
+#include <chrono>
+#include <condition_variable>
+#include <fstream>
+#include <memory>
+#include <mutex>
+#include <queue>
+#include <sstream>
+#include <thread>
+
+namespace fuzzer {
+
+struct Stats {
+ size_t number_of_executed_units = 0;
+ size_t peak_rss_mb = 0;
+ size_t average_exec_per_sec = 0;
+};
+
+static Stats ParseFinalStatsFromLog(const std::string &LogPath) {
+ std::ifstream In(LogPath);
+ std::string Line;
+ Stats Res;
+ struct {
+ const char *Name;
+ size_t *Var;
+ } NameVarPairs[] = {
+ {"stat::number_of_executed_units:", &Res.number_of_executed_units},
+ {"stat::peak_rss_mb:", &Res.peak_rss_mb},
+ {"stat::average_exec_per_sec:", &Res.average_exec_per_sec},
+ {nullptr, nullptr},
+ };
+ while (std::getline(In, Line, '\n')) {
+ if (Line.find("stat::") != 0) continue;
+ std::istringstream ISS(Line);
+ std::string Name;
+ size_t Val;
+ ISS >> Name >> Val;
+ for (size_t i = 0; NameVarPairs[i].Name; i++)
+ if (Name == NameVarPairs[i].Name)
+ *NameVarPairs[i].Var = Val;
+ }
+ return Res;
+}
+
+struct FuzzJob {
+ // Inputs.
+ Command Cmd;
+ std::string CorpusDir;
+ std::string FeaturesDir;
+ std::string LogPath;
+ std::string SeedListPath;
+ std::string CFPath;
+ size_t JobId;
+
+ int DftTimeInSeconds = 0;
+
+ // Fuzzing Outputs.
+ int ExitCode;
+
+ ~FuzzJob() {
+ RemoveFile(CFPath);
+ RemoveFile(LogPath);
+ RemoveFile(SeedListPath);
+ RmDirRecursive(CorpusDir);
+ RmDirRecursive(FeaturesDir);
+ }
+};
+
+struct GlobalEnv {
+ Vector<std::string> Args;
+ Vector<std::string> CorpusDirs;
+ std::string MainCorpusDir;
+ std::string TempDir;
+ std::string DFTDir;
+ std::string DataFlowBinary;
+ Set<uint32_t> Features, Cov;
+ Set<std::string> FilesWithDFT;
+ Vector<std::string> Files;
+ Random *Rand;
+ std::chrono::system_clock::time_point ProcessStartTime;
+ int Verbosity = 0;
+
+ size_t NumTimeouts = 0;
+ size_t NumOOMs = 0;
+ size_t NumCrashes = 0;
+
+
+ size_t NumRuns = 0;
+
+ std::string StopFile() { return DirPlusFile(TempDir, "STOP"); }
+
+ size_t secondsSinceProcessStartUp() const {
+ return std::chrono::duration_cast<std::chrono::seconds>(
+ std::chrono::system_clock::now() - ProcessStartTime)
+ .count();
+ }
+
+ FuzzJob *CreateNewJob(size_t JobId) {
+ Command Cmd(Args);
+ Cmd.removeFlag("fork");
+ Cmd.removeFlag("runs");
+ Cmd.removeFlag("collect_data_flow");
+ for (auto &C : CorpusDirs) // Remove all corpora from the args.
+ Cmd.removeArgument(C);
+ Cmd.addFlag("reload", "0"); // working in an isolated dir, no reload.
+ Cmd.addFlag("print_final_stats", "1");
+ Cmd.addFlag("print_funcs", "0"); // no need to spend time symbolizing.
+ Cmd.addFlag("max_total_time", std::to_string(std::min((size_t)300, JobId)));
+ Cmd.addFlag("stop_file", StopFile());
+ if (!DataFlowBinary.empty()) {
+ Cmd.addFlag("data_flow_trace", DFTDir);
+ if (!Cmd.hasFlag("focus_function"))
+ Cmd.addFlag("focus_function", "auto");
+ }
+ auto Job = new FuzzJob;
+ std::string Seeds;
+ if (size_t CorpusSubsetSize =
+ std::min(Files.size(), (size_t)sqrt(Files.size() + 2))) {
+ auto Time1 = std::chrono::system_clock::now();
+ for (size_t i = 0; i < CorpusSubsetSize; i++) {
+ auto &SF = Files[Rand->SkewTowardsLast(Files.size())];
+ Seeds += (Seeds.empty() ? "" : ",") + SF;
+ CollectDFT(SF);
+ }
+ auto Time2 = std::chrono::system_clock::now();
+ Job->DftTimeInSeconds = duration_cast<seconds>(Time2 - Time1).count();
+ }
+ if (!Seeds.empty()) {
+ Job->SeedListPath =
+ DirPlusFile(TempDir, std::to_string(JobId) + ".seeds");
+ WriteToFile(Seeds, Job->SeedListPath);
+ Cmd.addFlag("seed_inputs", "@" + Job->SeedListPath);
+ }
+ Job->LogPath = DirPlusFile(TempDir, std::to_string(JobId) + ".log");
+ Job->CorpusDir = DirPlusFile(TempDir, "C" + std::to_string(JobId));
+ Job->FeaturesDir = DirPlusFile(TempDir, "F" + std::to_string(JobId));
+ Job->CFPath = DirPlusFile(TempDir, std::to_string(JobId) + ".merge");
+ Job->JobId = JobId;
+
+
+ Cmd.addArgument(Job->CorpusDir);
+ Cmd.addFlag("features_dir", Job->FeaturesDir);
+
+ for (auto &D : {Job->CorpusDir, Job->FeaturesDir}) {
+ RmDirRecursive(D);
+ MkDir(D);
+ }
+
+ Cmd.setOutputFile(Job->LogPath);
+ Cmd.combineOutAndErr();
+
+ Job->Cmd = Cmd;
+
+ if (Verbosity >= 2)
+ Printf("Job %zd/%p Created: %s\n", JobId, Job,
+ Job->Cmd.toString().c_str());
+ // Start from very short runs and gradually increase them.
+ return Job;
+ }
+
+ void RunOneMergeJob(FuzzJob *Job) {
+ auto Stats = ParseFinalStatsFromLog(Job->LogPath);
+ NumRuns += Stats.number_of_executed_units;
+
+ Vector<SizedFile> TempFiles, MergeCandidates;
+ // Read all newly created inputs and their feature sets.
+ // Choose only those inputs that have new features.
+ GetSizedFilesFromDir(Job->CorpusDir, &TempFiles);
+ std::sort(TempFiles.begin(), TempFiles.end());
+ for (auto &F : TempFiles) {
+ auto FeatureFile = F.File;
+ FeatureFile.replace(0, Job->CorpusDir.size(), Job->FeaturesDir);
+ auto FeatureBytes = FileToVector(FeatureFile, 0, false);
+ assert((FeatureBytes.size() % sizeof(uint32_t)) == 0);
+ Vector<uint32_t> NewFeatures(FeatureBytes.size() / sizeof(uint32_t));
+ memcpy(NewFeatures.data(), FeatureBytes.data(), FeatureBytes.size());
+ for (auto Ft : NewFeatures) {
+ if (!Features.count(Ft)) {
+ MergeCandidates.push_back(F);
+ break;
+ }
+ }
+ }
+ // if (!FilesToAdd.empty() || Job->ExitCode != 0)
+ Printf("#%zd: cov: %zd ft: %zd corp: %zd exec/s %zd "
+ "oom/timeout/crash: %zd/%zd/%zd time: %zds job: %zd dft_time: %d\n",
+ NumRuns, Cov.size(), Features.size(), Files.size(),
+ Stats.average_exec_per_sec, NumOOMs, NumTimeouts, NumCrashes,
+ secondsSinceProcessStartUp(), Job->JobId, Job->DftTimeInSeconds);
+
+ if (MergeCandidates.empty()) return;
+
+ Vector<std::string> FilesToAdd;
+ Set<uint32_t> NewFeatures, NewCov;
+ CrashResistantMerge(Args, {}, MergeCandidates, &FilesToAdd, Features,
+ &NewFeatures, Cov, &NewCov, Job->CFPath, false);
+ for (auto &Path : FilesToAdd) {
+ auto U = FileToVector(Path);
+ auto NewPath = DirPlusFile(MainCorpusDir, Hash(U));
+ WriteToFile(U, NewPath);
+ Files.push_back(NewPath);
+ }
+ Features.insert(NewFeatures.begin(), NewFeatures.end());
+ Cov.insert(NewCov.begin(), NewCov.end());
+ for (auto Idx : NewCov)
+ if (auto *TE = TPC.PCTableEntryByIdx(Idx))
+ if (TPC.PcIsFuncEntry(TE))
+ PrintPC(" NEW_FUNC: %p %F %L\n", "",
+ TPC.GetNextInstructionPc(TE->PC));
+
+ }
+
+
+ void CollectDFT(const std::string &InputPath) {
+ if (DataFlowBinary.empty()) return;
+ if (!FilesWithDFT.insert(InputPath).second) return;
+ Command Cmd(Args);
+ Cmd.removeFlag("fork");
+ Cmd.removeFlag("runs");
+ Cmd.addFlag("data_flow_trace", DFTDir);
+ Cmd.addArgument(InputPath);
+ for (auto &C : CorpusDirs) // Remove all corpora from the args.
+ Cmd.removeArgument(C);
+ Cmd.setOutputFile(DirPlusFile(TempDir, "dft.log"));
+ Cmd.combineOutAndErr();
+ // Printf("CollectDFT: %s\n", Cmd.toString().c_str());
+ ExecuteCommand(Cmd);
+ }
+
+};
+
+struct JobQueue {
+ std::queue<FuzzJob *> Qu;
+ std::mutex Mu;
+ std::condition_variable Cv;
+
+ void Push(FuzzJob *Job) {
+ {
+ std::lock_guard<std::mutex> Lock(Mu);
+ Qu.push(Job);
+ }
+ Cv.notify_one();
+ }
+ FuzzJob *Pop() {
+ std::unique_lock<std::mutex> Lk(Mu);
+ // std::lock_guard<std::mutex> Lock(Mu);
+ Cv.wait(Lk, [&]{return !Qu.empty();});
+ assert(!Qu.empty());
+ auto Job = Qu.front();
+ Qu.pop();
+ return Job;
+ }
+};
+
+void WorkerThread(JobQueue *FuzzQ, JobQueue *MergeQ) {
+ while (auto Job = FuzzQ->Pop()) {
+ // Printf("WorkerThread: job %p\n", Job);
+ Job->ExitCode = ExecuteCommand(Job->Cmd);
+ MergeQ->Push(Job);
+ }
+}
+
+// This is just a skeleton of an experimental -fork=1 feature.
+void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
+ const Vector<std::string> &Args,
+ const Vector<std::string> &CorpusDirs, int NumJobs) {
+ Printf("INFO: -fork=%d: fuzzing in separate process(s)\n", NumJobs);
+
+ GlobalEnv Env;
+ Env.Args = Args;
+ Env.CorpusDirs = CorpusDirs;
+ Env.Rand = &Rand;
+ Env.Verbosity = Options.Verbosity;
+ Env.ProcessStartTime = std::chrono::system_clock::now();
+ Env.DataFlowBinary = Options.CollectDataFlow;
+
+ Vector<SizedFile> SeedFiles;
+ for (auto &Dir : CorpusDirs)
+ GetSizedFilesFromDir(Dir, &SeedFiles);
+ std::sort(SeedFiles.begin(), SeedFiles.end());
+ Env.TempDir = TempPath(".dir");
+ Env.DFTDir = DirPlusFile(Env.TempDir, "DFT");
+ RmDirRecursive(Env.TempDir); // in case there is a leftover from old runs.
+ MkDir(Env.TempDir);
+ MkDir(Env.DFTDir);
+
+
+ if (CorpusDirs.empty())
+ MkDir(Env.MainCorpusDir = DirPlusFile(Env.TempDir, "C"));
+ else
+ Env.MainCorpusDir = CorpusDirs[0];
+
+ auto CFPath = DirPlusFile(Env.TempDir, "merge.txt");
+ CrashResistantMerge(Env.Args, {}, SeedFiles, &Env.Files, {}, &Env.Features,
+ {}, &Env.Cov,
+ CFPath, false);
+ RemoveFile(CFPath);
+ Printf("INFO: -fork=%d: %zd seed inputs, starting to fuzz in %s\n", NumJobs,
+ Env.Files.size(), Env.TempDir.c_str());
+
+ int ExitCode = 0;
+
+ JobQueue FuzzQ, MergeQ;
+
+ auto StopJobs = [&]() {
+ for (int i = 0; i < NumJobs; i++)
+ FuzzQ.Push(nullptr);
+ MergeQ.Push(nullptr);
+ WriteToFile(Unit({1}), Env.StopFile());
+ };
+
+ size_t JobId = 1;
+ Vector<std::thread> Threads;
+ for (int t = 0; t < NumJobs; t++) {
+ Threads.push_back(std::thread(WorkerThread, &FuzzQ, &MergeQ));
+ FuzzQ.Push(Env.CreateNewJob(JobId++));
+ }
+
+ while (true) {
+ std::unique_ptr<FuzzJob> Job(MergeQ.Pop());
+ if (!Job)
+ break;
+ ExitCode = Job->ExitCode;
+ if (ExitCode == Options.InterruptExitCode) {
+ Printf("==%lu== libFuzzer: a child was interrupted; exiting\n", GetPid());
+ StopJobs();
+ break;
+ }
+ Fuzzer::MaybeExitGracefully();
+
+ Env.RunOneMergeJob(Job.get());
+
+ // Continue if our crash is one of the ignorred ones.
+ if (Options.IgnoreTimeouts && ExitCode == Options.TimeoutExitCode)
+ Env.NumTimeouts++;
+ else if (Options.IgnoreOOMs && ExitCode == Options.OOMExitCode)
+ Env.NumOOMs++;
+ else if (ExitCode != 0) {
+ Env.NumCrashes++;
+ if (Options.IgnoreCrashes) {
+ std::ifstream In(Job->LogPath);
+ std::string Line;
+ while (std::getline(In, Line, '\n'))
+ if (Line.find("ERROR:") != Line.npos ||
+ Line.find("runtime error:") != Line.npos)
+ Printf("%s\n", Line.c_str());
+ } else {
+ // And exit if we don't ignore this crash.
+ Printf("INFO: log from the inner process:\n%s",
+ FileToString(Job->LogPath).c_str());
+ StopJobs();
+ break;
+ }
+ }
+
+ // Stop if we are over the time budget.
+ // This is not precise, since other threads are still running
+ // and we will wait while joining them.
+ // We also don't stop instantly: other jobs need to finish.
+ if (Options.MaxTotalTimeSec > 0 &&
+ Env.secondsSinceProcessStartUp() >= (size_t)Options.MaxTotalTimeSec) {
+ Printf("INFO: fuzzed for %zd seconds, wrapping up soon\n",
+ Env.secondsSinceProcessStartUp());
+ StopJobs();
+ break;
+ }
+ if (Env.NumRuns >= Options.MaxNumberOfRuns) {
+ Printf("INFO: fuzzed for %zd iterations, wrapping up soon\n",
+ Env.NumRuns);
+ StopJobs();
+ break;
+ }
+
+ FuzzQ.Push(Env.CreateNewJob(JobId++));
+ }
+
+ for (auto &T : Threads)
+ T.join();
+
+ // The workers have terminated. Don't try to remove the directory before they
+ // terminate to avoid a race condition preventing cleanup on Windows.
+ RmDirRecursive(Env.TempDir);
+
+ // Use the exit code from the last child process.
+ Printf("INFO: exiting: %d time: %zds\n", ExitCode,
+ Env.secondsSinceProcessStartUp());
+ exit(ExitCode);
+}
+
+} // namespace fuzzer
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerFork.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerFork.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerFork.h (revision 351984)
@@ -0,0 +1,24 @@
+//===- FuzzerFork.h - run fuzzing in sub-processes --------------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_FORK_H
+#define LLVM_FUZZER_FORK_H
+
+#include "FuzzerDefs.h"
+#include "FuzzerOptions.h"
+#include "FuzzerRandom.h"
+
+#include <string>
+
+namespace fuzzer {
+void FuzzWithFork(Random &Rand, const FuzzingOptions &Options,
+ const Vector<std::string> &Args,
+ const Vector<std::string> &CorpusDirs, int NumJobs);
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_FORK_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIO.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIO.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIO.cpp (revision 351984)
@@ -0,0 +1,159 @@
+//===- FuzzerIO.cpp - IO utils. -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// IO functions.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerDefs.h"
+#include "FuzzerExtFunctions.h"
+#include "FuzzerIO.h"
+#include "FuzzerUtil.h"
+#include <algorithm>
+#include <cstdarg>
+#include <fstream>
+#include <iterator>
+#include <sys/stat.h>
+#include <sys/types.h>
+
+namespace fuzzer {
+
+static FILE *OutputFile = stderr;
+
+long GetEpoch(const std::string &Path) {
+ struct stat St;
+ if (stat(Path.c_str(), &St))
+ return 0; // Can't stat, be conservative.
+ return St.st_mtime;
+}
+
+Unit FileToVector(const std::string &Path, size_t MaxSize, bool ExitOnError) {
+ std::ifstream T(Path, std::ios::binary);
+ if (ExitOnError && !T) {
+ Printf("No such directory: %s; exiting\n", Path.c_str());
+ exit(1);
+ }
+
+ T.seekg(0, T.end);
+ auto EndPos = T.tellg();
+ if (EndPos < 0) return {};
+ size_t FileLen = EndPos;
+ if (MaxSize)
+ FileLen = std::min(FileLen, MaxSize);
+
+ T.seekg(0, T.beg);
+ Unit Res(FileLen);
+ T.read(reinterpret_cast<char *>(Res.data()), FileLen);
+ return Res;
+}
+
+std::string FileToString(const std::string &Path) {
+ std::ifstream T(Path, std::ios::binary);
+ return std::string((std::istreambuf_iterator<char>(T)),
+ std::istreambuf_iterator<char>());
+}
+
+void CopyFileToErr(const std::string &Path) {
+ Printf("%s", FileToString(Path).c_str());
+}
+
+void WriteToFile(const Unit &U, const std::string &Path) {
+ WriteToFile(U.data(), U.size(), Path);
+}
+
+void WriteToFile(const std::string &Data, const std::string &Path) {
+ WriteToFile(reinterpret_cast<const uint8_t *>(Data.c_str()), Data.size(),
+ Path);
+}
+
+void WriteToFile(const uint8_t *Data, size_t Size, const std::string &Path) {
+ // Use raw C interface because this function may be called from a sig handler.
+ FILE *Out = fopen(Path.c_str(), "wb");
+ if (!Out) return;
+ fwrite(Data, sizeof(Data[0]), Size, Out);
+ fclose(Out);
+}
+
+void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V,
+ long *Epoch, size_t MaxSize, bool ExitOnError) {
+ long E = Epoch ? *Epoch : 0;
+ Vector<std::string> Files;
+ ListFilesInDirRecursive(Path, Epoch, &Files, /*TopDir*/true);
+ size_t NumLoaded = 0;
+ for (size_t i = 0; i < Files.size(); i++) {
+ auto &X = Files[i];
+ if (Epoch && GetEpoch(X) < E) continue;
+ NumLoaded++;
+ if ((NumLoaded & (NumLoaded - 1)) == 0 && NumLoaded >= 1024)
+ Printf("Loaded %zd/%zd files from %s\n", NumLoaded, Files.size(), Path);
+ auto S = FileToVector(X, MaxSize, ExitOnError);
+ if (!S.empty())
+ V->push_back(S);
+ }
+}
+
+
+void GetSizedFilesFromDir(const std::string &Dir, Vector<SizedFile> *V) {
+ Vector<std::string> Files;
+ ListFilesInDirRecursive(Dir, 0, &Files, /*TopDir*/true);
+ for (auto &File : Files)
+ if (size_t Size = FileSize(File))
+ V->push_back({File, Size});
+}
+
+std::string DirPlusFile(const std::string &DirPath,
+ const std::string &FileName) {
+ return DirPath + GetSeparator() + FileName;
+}
+
+void DupAndCloseStderr() {
+ int OutputFd = DuplicateFile(2);
+ if (OutputFd > 0) {
+ FILE *NewOutputFile = OpenFile(OutputFd, "w");
+ if (NewOutputFile) {
+ OutputFile = NewOutputFile;
+ if (EF->__sanitizer_set_report_fd)
+ EF->__sanitizer_set_report_fd(
+ reinterpret_cast<void *>(GetHandleFromFd(OutputFd)));
+ DiscardOutput(2);
+ }
+ }
+}
+
+void CloseStdout() {
+ DiscardOutput(1);
+}
+
+void Printf(const char *Fmt, ...) {
+ va_list ap;
+ va_start(ap, Fmt);
+ vfprintf(OutputFile, Fmt, ap);
+ va_end(ap);
+ fflush(OutputFile);
+}
+
+void VPrintf(bool Verbose, const char *Fmt, ...) {
+ if (!Verbose) return;
+ va_list ap;
+ va_start(ap, Fmt);
+ vfprintf(OutputFile, Fmt, ap);
+ va_end(ap);
+ fflush(OutputFile);
+}
+
+void RmDirRecursive(const std::string &Dir) {
+ IterateDirRecursive(
+ Dir, [](const std::string &Path) {},
+ [](const std::string &Path) { RmDir(Path); },
+ [](const std::string &Path) { RemoveFile(Path); });
+}
+
+std::string TempPath(const char *Extension) {
+ return DirPlusFile(TmpDir(),
+ "libFuzzerTemp." + std::to_string(GetPid()) + Extension);
+}
+
+} // namespace fuzzer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIO.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIO.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIO.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIO.h (revision 351984)
@@ -0,0 +1,108 @@
+//===- FuzzerIO.h - Internal header for IO utils ----------------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// IO interface.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_IO_H
+#define LLVM_FUZZER_IO_H
+
+#include "FuzzerDefs.h"
+
+namespace fuzzer {
+
+long GetEpoch(const std::string &Path);
+
+Unit FileToVector(const std::string &Path, size_t MaxSize = 0,
+ bool ExitOnError = true);
+
+std::string FileToString(const std::string &Path);
+
+void CopyFileToErr(const std::string &Path);
+
+void WriteToFile(const uint8_t *Data, size_t Size, const std::string &Path);
+// Write Data.c_str() to the file without terminating null character.
+void WriteToFile(const std::string &Data, const std::string &Path);
+void WriteToFile(const Unit &U, const std::string &Path);
+
+void ReadDirToVectorOfUnits(const char *Path, Vector<Unit> *V,
+ long *Epoch, size_t MaxSize, bool ExitOnError);
+
+// Returns "Dir/FileName" or equivalent for the current OS.
+std::string DirPlusFile(const std::string &DirPath,
+ const std::string &FileName);
+
+// Returns the name of the dir, similar to the 'dirname' utility.
+std::string DirName(const std::string &FileName);
+
+// Returns path to a TmpDir.
+std::string TmpDir();
+
+std::string TempPath(const char *Extension);
+
+bool IsInterestingCoverageFile(const std::string &FileName);
+
+void DupAndCloseStderr();
+
+void CloseStdout();
+
+void Printf(const char *Fmt, ...);
+void VPrintf(bool Verbose, const char *Fmt, ...);
+
+// Print using raw syscalls, useful when printing at early init stages.
+void RawPrint(const char *Str);
+
+// Platform specific functions:
+bool IsFile(const std::string &Path);
+size_t FileSize(const std::string &Path);
+
+void ListFilesInDirRecursive(const std::string &Dir, long *Epoch,
+ Vector<std::string> *V, bool TopDir);
+
+void RmDirRecursive(const std::string &Dir);
+
+// Iterate files and dirs inside Dir, recursively.
+// Call DirPreCallback/DirPostCallback on dirs before/after
+// calling FileCallback on files.
+void IterateDirRecursive(const std::string &Dir,
+ void (*DirPreCallback)(const std::string &Dir),
+ void (*DirPostCallback)(const std::string &Dir),
+ void (*FileCallback)(const std::string &Dir));
+
+struct SizedFile {
+ std::string File;
+ size_t Size;
+ bool operator<(const SizedFile &B) const { return Size < B.Size; }
+};
+
+void GetSizedFilesFromDir(const std::string &Dir, Vector<SizedFile> *V);
+
+char GetSeparator();
+// Similar to the basename utility: returns the file name w/o the dir prefix.
+std::string Basename(const std::string &Path);
+
+FILE* OpenFile(int Fd, const char *Mode);
+
+int CloseFile(int Fd);
+
+int DuplicateFile(int Fd);
+
+void RemoveFile(const std::string &Path);
+void RenameFile(const std::string &OldPath, const std::string &NewPath);
+
+void DiscardOutput(int Fd);
+
+intptr_t GetHandleFromFd(int fd);
+
+void MkDir(const std::string &Path);
+void RmDir(const std::string &Path);
+
+const std::string &getDevNull();
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_IO_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIO.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIOPosix.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIOPosix.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIOPosix.cpp (revision 351984)
@@ -0,0 +1,184 @@
+//===- FuzzerIOPosix.cpp - IO utils for Posix. ----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// IO functions implementation using Posix API.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+#if LIBFUZZER_POSIX || LIBFUZZER_FUCHSIA
+
+#include "FuzzerExtFunctions.h"
+#include "FuzzerIO.h"
+#include <cstdarg>
+#include <cstdio>
+#include <dirent.h>
+#include <fstream>
+#include <iterator>
+#include <libgen.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+namespace fuzzer {
+
+bool IsFile(const std::string &Path) {
+ struct stat St;
+ if (stat(Path.c_str(), &St))
+ return false;
+ return S_ISREG(St.st_mode);
+}
+
+static bool IsDirectory(const std::string &Path) {
+ struct stat St;
+ if (stat(Path.c_str(), &St))
+ return false;
+ return S_ISDIR(St.st_mode);
+}
+
+size_t FileSize(const std::string &Path) {
+ struct stat St;
+ if (stat(Path.c_str(), &St))
+ return 0;
+ return St.st_size;
+}
+
+std::string Basename(const std::string &Path) {
+ size_t Pos = Path.rfind(GetSeparator());
+ if (Pos == std::string::npos) return Path;
+ assert(Pos < Path.size());
+ return Path.substr(Pos + 1);
+}
+
+void ListFilesInDirRecursive(const std::string &Dir, long *Epoch,
+ Vector<std::string> *V, bool TopDir) {
+ auto E = GetEpoch(Dir);
+ if (Epoch)
+ if (E && *Epoch >= E) return;
+
+ DIR *D = opendir(Dir.c_str());
+ if (!D) {
+ Printf("%s: %s; exiting\n", strerror(errno), Dir.c_str());
+ exit(1);
+ }
+ while (auto E = readdir(D)) {
+ std::string Path = DirPlusFile(Dir, E->d_name);
+ if (E->d_type == DT_REG || E->d_type == DT_LNK ||
+ (E->d_type == DT_UNKNOWN && IsFile(Path)))
+ V->push_back(Path);
+ else if ((E->d_type == DT_DIR ||
+ (E->d_type == DT_UNKNOWN && IsDirectory(Path))) &&
+ *E->d_name != '.')
+ ListFilesInDirRecursive(Path, Epoch, V, false);
+ }
+ closedir(D);
+ if (Epoch && TopDir)
+ *Epoch = E;
+}
+
+
+void IterateDirRecursive(const std::string &Dir,
+ void (*DirPreCallback)(const std::string &Dir),
+ void (*DirPostCallback)(const std::string &Dir),
+ void (*FileCallback)(const std::string &Dir)) {
+ DirPreCallback(Dir);
+ DIR *D = opendir(Dir.c_str());
+ if (!D) return;
+ while (auto E = readdir(D)) {
+ std::string Path = DirPlusFile(Dir, E->d_name);
+ if (E->d_type == DT_REG || E->d_type == DT_LNK ||
+ (E->d_type == DT_UNKNOWN && IsFile(Path)))
+ FileCallback(Path);
+ else if ((E->d_type == DT_DIR ||
+ (E->d_type == DT_UNKNOWN && IsDirectory(Path))) &&
+ *E->d_name != '.')
+ IterateDirRecursive(Path, DirPreCallback, DirPostCallback, FileCallback);
+ }
+ closedir(D);
+ DirPostCallback(Dir);
+}
+
+char GetSeparator() {
+ return '/';
+}
+
+FILE* OpenFile(int Fd, const char* Mode) {
+ return fdopen(Fd, Mode);
+}
+
+int CloseFile(int fd) {
+ return close(fd);
+}
+
+int DuplicateFile(int Fd) {
+ return dup(Fd);
+}
+
+void RemoveFile(const std::string &Path) {
+ unlink(Path.c_str());
+}
+
+void RenameFile(const std::string &OldPath, const std::string &NewPath) {
+ rename(OldPath.c_str(), NewPath.c_str());
+}
+
+void DiscardOutput(int Fd) {
+ FILE* Temp = fopen("/dev/null", "w");
+ if (!Temp)
+ return;
+ dup2(fileno(Temp), Fd);
+ fclose(Temp);
+}
+
+intptr_t GetHandleFromFd(int fd) {
+ return static_cast<intptr_t>(fd);
+}
+
+std::string DirName(const std::string &FileName) {
+ char *Tmp = new char[FileName.size() + 1];
+ memcpy(Tmp, FileName.c_str(), FileName.size() + 1);
+ std::string Res = dirname(Tmp);
+ delete [] Tmp;
+ return Res;
+}
+
+std::string TmpDir() {
+ if (auto Env = getenv("TMPDIR"))
+ return Env;
+ return "/tmp";
+}
+
+bool IsInterestingCoverageFile(const std::string &FileName) {
+ if (FileName.find("compiler-rt/lib/") != std::string::npos)
+ return false; // sanitizer internal.
+ if (FileName.find("/usr/lib/") != std::string::npos)
+ return false;
+ if (FileName.find("/usr/include/") != std::string::npos)
+ return false;
+ if (FileName == "<null>")
+ return false;
+ return true;
+}
+
+void RawPrint(const char *Str) {
+ write(2, Str, strlen(Str));
+}
+
+void MkDir(const std::string &Path) {
+ mkdir(Path.c_str(), 0700);
+}
+
+void RmDir(const std::string &Path) {
+ rmdir(Path.c_str());
+}
+
+const std::string &getDevNull() {
+ static const std::string devNull = "/dev/null";
+ return devNull;
+}
+
+} // namespace fuzzer
+
+#endif // LIBFUZZER_POSIX
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIOPosix.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIOWindows.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIOWindows.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIOWindows.cpp (revision 351984)
@@ -0,0 +1,421 @@
+//===- FuzzerIOWindows.cpp - IO utils for Windows. ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// IO functions implementation for Windows.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+#if LIBFUZZER_WINDOWS
+
+#include "FuzzerExtFunctions.h"
+#include "FuzzerIO.h"
+#include <cstdarg>
+#include <cstdio>
+#include <fstream>
+#include <io.h>
+#include <iterator>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <windows.h>
+
+namespace fuzzer {
+
+static bool IsFile(const std::string &Path, const DWORD &FileAttributes) {
+
+ if (FileAttributes & FILE_ATTRIBUTE_NORMAL)
+ return true;
+
+ if (FileAttributes & FILE_ATTRIBUTE_DIRECTORY)
+ return false;
+
+ HANDLE FileHandle(
+ CreateFileA(Path.c_str(), 0, FILE_SHARE_READ, NULL, OPEN_EXISTING,
+ FILE_FLAG_BACKUP_SEMANTICS, 0));
+
+ if (FileHandle == INVALID_HANDLE_VALUE) {
+ Printf("CreateFileA() failed for \"%s\" (Error code: %lu).\n", Path.c_str(),
+ GetLastError());
+ return false;
+ }
+
+ DWORD FileType = GetFileType(FileHandle);
+
+ if (FileType == FILE_TYPE_UNKNOWN) {
+ Printf("GetFileType() failed for \"%s\" (Error code: %lu).\n", Path.c_str(),
+ GetLastError());
+ CloseHandle(FileHandle);
+ return false;
+ }
+
+ if (FileType != FILE_TYPE_DISK) {
+ CloseHandle(FileHandle);
+ return false;
+ }
+
+ CloseHandle(FileHandle);
+ return true;
+}
+
+bool IsFile(const std::string &Path) {
+ DWORD Att = GetFileAttributesA(Path.c_str());
+
+ if (Att == INVALID_FILE_ATTRIBUTES) {
+ Printf("GetFileAttributesA() failed for \"%s\" (Error code: %lu).\n",
+ Path.c_str(), GetLastError());
+ return false;
+ }
+
+ return IsFile(Path, Att);
+}
+
+static bool IsDir(DWORD FileAttrs) {
+ if (FileAttrs == INVALID_FILE_ATTRIBUTES) return false;
+ return FileAttrs & FILE_ATTRIBUTE_DIRECTORY;
+}
+
+std::string Basename(const std::string &Path) {
+ size_t Pos = Path.find_last_of("/\\");
+ if (Pos == std::string::npos) return Path;
+ assert(Pos < Path.size());
+ return Path.substr(Pos + 1);
+}
+
+size_t FileSize(const std::string &Path) {
+ WIN32_FILE_ATTRIBUTE_DATA attr;
+ if (!GetFileAttributesExA(Path.c_str(), GetFileExInfoStandard, &attr)) {
+ DWORD LastError = GetLastError();
+ if (LastError != ERROR_FILE_NOT_FOUND)
+ Printf("GetFileAttributesExA() failed for \"%s\" (Error code: %lu).\n",
+ Path.c_str(), LastError);
+ return 0;
+ }
+ ULARGE_INTEGER size;
+ size.HighPart = attr.nFileSizeHigh;
+ size.LowPart = attr.nFileSizeLow;
+ return size.QuadPart;
+}
+
+void ListFilesInDirRecursive(const std::string &Dir, long *Epoch,
+ Vector<std::string> *V, bool TopDir) {
+ auto E = GetEpoch(Dir);
+ if (Epoch)
+ if (E && *Epoch >= E) return;
+
+ std::string Path(Dir);
+ assert(!Path.empty());
+ if (Path.back() != '\\')
+ Path.push_back('\\');
+ Path.push_back('*');
+
+ // Get the first directory entry.
+ WIN32_FIND_DATAA FindInfo;
+ HANDLE FindHandle(FindFirstFileA(Path.c_str(), &FindInfo));
+ if (FindHandle == INVALID_HANDLE_VALUE)
+ {
+ if (GetLastError() == ERROR_FILE_NOT_FOUND)
+ return;
+ Printf("No such file or directory: %s; exiting\n", Dir.c_str());
+ exit(1);
+ }
+
+ do {
+ std::string FileName = DirPlusFile(Dir, FindInfo.cFileName);
+
+ if (FindInfo.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) {
+ size_t FilenameLen = strlen(FindInfo.cFileName);
+ if ((FilenameLen == 1 && FindInfo.cFileName[0] == '.') ||
+ (FilenameLen == 2 && FindInfo.cFileName[0] == '.' &&
+ FindInfo.cFileName[1] == '.'))
+ continue;
+
+ ListFilesInDirRecursive(FileName, Epoch, V, false);
+ }
+ else if (IsFile(FileName, FindInfo.dwFileAttributes))
+ V->push_back(FileName);
+ } while (FindNextFileA(FindHandle, &FindInfo));
+
+ DWORD LastError = GetLastError();
+ if (LastError != ERROR_NO_MORE_FILES)
+ Printf("FindNextFileA failed (Error code: %lu).\n", LastError);
+
+ FindClose(FindHandle);
+
+ if (Epoch && TopDir)
+ *Epoch = E;
+}
+
+
+void IterateDirRecursive(const std::string &Dir,
+ void (*DirPreCallback)(const std::string &Dir),
+ void (*DirPostCallback)(const std::string &Dir),
+ void (*FileCallback)(const std::string &Dir)) {
+ // TODO(metzman): Implement ListFilesInDirRecursive via this function.
+ DirPreCallback(Dir);
+
+ DWORD DirAttrs = GetFileAttributesA(Dir.c_str());
+ if (!IsDir(DirAttrs)) return;
+
+ std::string TargetDir(Dir);
+ assert(!TargetDir.empty());
+ if (TargetDir.back() != '\\') TargetDir.push_back('\\');
+ TargetDir.push_back('*');
+
+ WIN32_FIND_DATAA FindInfo;
+ // Find the directory's first file.
+ HANDLE FindHandle = FindFirstFileA(TargetDir.c_str(), &FindInfo);
+ if (FindHandle == INVALID_HANDLE_VALUE) {
+ DWORD LastError = GetLastError();
+ if (LastError != ERROR_FILE_NOT_FOUND) {
+ // If the directory isn't empty, then something abnormal is going on.
+ Printf("FindFirstFileA failed for %s (Error code: %lu).\n", Dir.c_str(),
+ LastError);
+ }
+ return;
+ }
+
+ do {
+ std::string Path = DirPlusFile(Dir, FindInfo.cFileName);
+ DWORD PathAttrs = FindInfo.dwFileAttributes;
+ if (IsDir(PathAttrs)) {
+ // Is Path the current directory (".") or the parent ("..")?
+ if (strcmp(FindInfo.cFileName, ".") == 0 ||
+ strcmp(FindInfo.cFileName, "..") == 0)
+ continue;
+ IterateDirRecursive(Path, DirPreCallback, DirPostCallback, FileCallback);
+ } else if (PathAttrs != INVALID_FILE_ATTRIBUTES) {
+ FileCallback(Path);
+ }
+ } while (FindNextFileA(FindHandle, &FindInfo));
+
+ DWORD LastError = GetLastError();
+ if (LastError != ERROR_NO_MORE_FILES)
+ Printf("FindNextFileA failed for %s (Error code: %lu).\n", Dir.c_str(),
+ LastError);
+
+ FindClose(FindHandle);
+ DirPostCallback(Dir);
+}
+
+char GetSeparator() {
+ return '\\';
+}
+
+FILE* OpenFile(int Fd, const char* Mode) {
+ return _fdopen(Fd, Mode);
+}
+
+int CloseFile(int Fd) {
+ return _close(Fd);
+}
+
+int DuplicateFile(int Fd) {
+ return _dup(Fd);
+}
+
+void RemoveFile(const std::string &Path) {
+ _unlink(Path.c_str());
+}
+
+void RenameFile(const std::string &OldPath, const std::string &NewPath) {
+ rename(OldPath.c_str(), NewPath.c_str());
+}
+
+void DiscardOutput(int Fd) {
+ FILE* Temp = fopen("nul", "w");
+ if (!Temp)
+ return;
+ _dup2(_fileno(Temp), Fd);
+ fclose(Temp);
+}
+
+intptr_t GetHandleFromFd(int fd) {
+ return _get_osfhandle(fd);
+}
+
+static bool IsSeparator(char C) {
+ return C == '\\' || C == '/';
+}
+
+// Parse disk designators, like "C:\". If Relative == true, also accepts: "C:".
+// Returns number of characters considered if successful.
+static size_t ParseDrive(const std::string &FileName, const size_t Offset,
+ bool Relative = true) {
+ if (Offset + 1 >= FileName.size() || FileName[Offset + 1] != ':')
+ return 0;
+ if (Offset + 2 >= FileName.size() || !IsSeparator(FileName[Offset + 2])) {
+ if (!Relative) // Accept relative path?
+ return 0;
+ else
+ return 2;
+ }
+ return 3;
+}
+
+// Parse a file name, like: SomeFile.txt
+// Returns number of characters considered if successful.
+static size_t ParseFileName(const std::string &FileName, const size_t Offset) {
+ size_t Pos = Offset;
+ const size_t End = FileName.size();
+ for(; Pos < End && !IsSeparator(FileName[Pos]); ++Pos)
+ ;
+ return Pos - Offset;
+}
+
+// Parse a directory ending in separator, like: `SomeDir\`
+// Returns number of characters considered if successful.
+static size_t ParseDir(const std::string &FileName, const size_t Offset) {
+ size_t Pos = Offset;
+ const size_t End = FileName.size();
+ if (Pos >= End || IsSeparator(FileName[Pos]))
+ return 0;
+ for(; Pos < End && !IsSeparator(FileName[Pos]); ++Pos)
+ ;
+ if (Pos >= End)
+ return 0;
+ ++Pos; // Include separator.
+ return Pos - Offset;
+}
+
+// Parse a servername and share, like: `SomeServer\SomeShare\`
+// Returns number of characters considered if successful.
+static size_t ParseServerAndShare(const std::string &FileName,
+ const size_t Offset) {
+ size_t Pos = Offset, Res;
+ if (!(Res = ParseDir(FileName, Pos)))
+ return 0;
+ Pos += Res;
+ if (!(Res = ParseDir(FileName, Pos)))
+ return 0;
+ Pos += Res;
+ return Pos - Offset;
+}
+
+// Parse the given Ref string from the position Offset, to exactly match the given
+// string Patt.
+// Returns number of characters considered if successful.
+static size_t ParseCustomString(const std::string &Ref, size_t Offset,
+ const char *Patt) {
+ size_t Len = strlen(Patt);
+ if (Offset + Len > Ref.size())
+ return 0;
+ return Ref.compare(Offset, Len, Patt) == 0 ? Len : 0;
+}
+
+// Parse a location, like:
+// \\?\UNC\Server\Share\ \\?\C:\ \\Server\Share\ \ C:\ C:
+// Returns number of characters considered if successful.
+static size_t ParseLocation(const std::string &FileName) {
+ size_t Pos = 0, Res;
+
+ if ((Res = ParseCustomString(FileName, Pos, R"(\\?\)"))) {
+ Pos += Res;
+ if ((Res = ParseCustomString(FileName, Pos, R"(UNC\)"))) {
+ Pos += Res;
+ if ((Res = ParseServerAndShare(FileName, Pos)))
+ return Pos + Res;
+ return 0;
+ }
+ if ((Res = ParseDrive(FileName, Pos, false)))
+ return Pos + Res;
+ return 0;
+ }
+
+ if (Pos < FileName.size() && IsSeparator(FileName[Pos])) {
+ ++Pos;
+ if (Pos < FileName.size() && IsSeparator(FileName[Pos])) {
+ ++Pos;
+ if ((Res = ParseServerAndShare(FileName, Pos)))
+ return Pos + Res;
+ return 0;
+ }
+ return Pos;
+ }
+
+ if ((Res = ParseDrive(FileName, Pos)))
+ return Pos + Res;
+
+ return Pos;
+}
+
+std::string DirName(const std::string &FileName) {
+ size_t LocationLen = ParseLocation(FileName);
+ size_t DirLen = 0, Res;
+ while ((Res = ParseDir(FileName, LocationLen + DirLen)))
+ DirLen += Res;
+ size_t FileLen = ParseFileName(FileName, LocationLen + DirLen);
+
+ if (LocationLen + DirLen + FileLen != FileName.size()) {
+ Printf("DirName() failed for \"%s\", invalid path.\n", FileName.c_str());
+ exit(1);
+ }
+
+ if (DirLen) {
+ --DirLen; // Remove trailing separator.
+ if (!FileLen) { // Path ended in separator.
+ assert(DirLen);
+ // Remove file name from Dir.
+ while (DirLen && !IsSeparator(FileName[LocationLen + DirLen - 1]))
+ --DirLen;
+ if (DirLen) // Remove trailing separator.
+ --DirLen;
+ }
+ }
+
+ if (!LocationLen) { // Relative path.
+ if (!DirLen)
+ return ".";
+ return std::string(".\\").append(FileName, 0, DirLen);
+ }
+
+ return FileName.substr(0, LocationLen + DirLen);
+}
+
+std::string TmpDir() {
+ std::string Tmp;
+ Tmp.resize(MAX_PATH + 1);
+ DWORD Size = GetTempPathA(Tmp.size(), &Tmp[0]);
+ if (Size == 0) {
+ Printf("Couldn't get Tmp path.\n");
+ exit(1);
+ }
+ Tmp.resize(Size);
+ return Tmp;
+}
+
+bool IsInterestingCoverageFile(const std::string &FileName) {
+ if (FileName.find("Program Files") != std::string::npos)
+ return false;
+ if (FileName.find("compiler-rt\\lib\\") != std::string::npos)
+ return false; // sanitizer internal.
+ if (FileName == "<null>")
+ return false;
+ return true;
+}
+
+void RawPrint(const char *Str) {
+ _write(2, Str, strlen(Str));
+}
+
+void MkDir(const std::string &Path) {
+ if (CreateDirectoryA(Path.c_str(), nullptr)) return;
+ Printf("CreateDirectoryA failed for %s (Error code: %lu).\n", Path.c_str(),
+ GetLastError());
+}
+
+void RmDir(const std::string &Path) {
+ if (RemoveDirectoryA(Path.c_str())) return;
+ Printf("RemoveDirectoryA failed for %s (Error code: %lu).\n", Path.c_str(),
+ GetLastError());
+}
+
+const std::string &getDevNull() {
+ static const std::string devNull = "NUL";
+ return devNull;
+}
+
+} // namespace fuzzer
+
+#endif // LIBFUZZER_WINDOWS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerIOWindows.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerInterface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerInterface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerInterface.h (revision 351984)
@@ -0,0 +1,79 @@
+//===- FuzzerInterface.h - Interface header for the Fuzzer ------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Define the interface between libFuzzer and the library being tested.
+//===----------------------------------------------------------------------===//
+
+// NOTE: the libFuzzer interface is thin and in the majority of cases
+// you should not include this file into your target. In 95% of cases
+// all you need is to define the following function in your file:
+// extern "C" int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size);
+
+// WARNING: keep the interface in C.
+
+#ifndef LLVM_FUZZER_INTERFACE_H
+#define LLVM_FUZZER_INTERFACE_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif // __cplusplus
+
+// Define FUZZER_INTERFACE_VISIBILITY to set default visibility in a way that
+// doesn't break MSVC.
+#if defined(_WIN32)
+#define FUZZER_INTERFACE_VISIBILITY __declspec(dllexport)
+#else
+#define FUZZER_INTERFACE_VISIBILITY __attribute__((visibility("default")))
+#endif
+
+// Mandatory user-provided target function.
+// Executes the code under test with [Data, Data+Size) as the input.
+// libFuzzer will invoke this function *many* times with different inputs.
+// Must return 0.
+FUZZER_INTERFACE_VISIBILITY int
+LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size);
+
+// Optional user-provided initialization function.
+// If provided, this function will be called by libFuzzer once at startup.
+// It may read and modify argc/argv.
+// Must return 0.
+FUZZER_INTERFACE_VISIBILITY int LLVMFuzzerInitialize(int *argc, char ***argv);
+
+// Optional user-provided custom mutator.
+// Mutates raw data in [Data, Data+Size) inplace.
+// Returns the new size, which is not greater than MaxSize.
+// Given the same Seed produces the same mutation.
+FUZZER_INTERFACE_VISIBILITY size_t
+LLVMFuzzerCustomMutator(uint8_t *Data, size_t Size, size_t MaxSize,
+ unsigned int Seed);
+
+// Optional user-provided custom cross-over function.
+// Combines pieces of Data1 & Data2 together into Out.
+// Returns the new size, which is not greater than MaxOutSize.
+// Should produce the same mutation given the same Seed.
+FUZZER_INTERFACE_VISIBILITY size_t
+LLVMFuzzerCustomCrossOver(const uint8_t *Data1, size_t Size1,
+ const uint8_t *Data2, size_t Size2, uint8_t *Out,
+ size_t MaxOutSize, unsigned int Seed);
+
+// Experimental, may go away in future.
+// libFuzzer-provided function to be used inside LLVMFuzzerCustomMutator.
+// Mutates raw data in [Data, Data+Size) inplace.
+// Returns the new size, which is not greater than MaxSize.
+FUZZER_INTERFACE_VISIBILITY size_t
+LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize);
+
+#undef FUZZER_INTERFACE_VISIBILITY
+
+#ifdef __cplusplus
+} // extern "C"
+#endif // __cplusplus
+
+#endif // LLVM_FUZZER_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerInterface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerInternal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerInternal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerInternal.h (revision 351984)
@@ -0,0 +1,171 @@
+//===- FuzzerInternal.h - Internal header for the Fuzzer --------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Define the main class fuzzer::Fuzzer and most functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_INTERNAL_H
+#define LLVM_FUZZER_INTERNAL_H
+
+#include "FuzzerDataFlowTrace.h"
+#include "FuzzerDefs.h"
+#include "FuzzerExtFunctions.h"
+#include "FuzzerInterface.h"
+#include "FuzzerOptions.h"
+#include "FuzzerSHA1.h"
+#include "FuzzerValueBitMap.h"
+#include <algorithm>
+#include <atomic>
+#include <chrono>
+#include <climits>
+#include <cstdlib>
+#include <string.h>
+
+namespace fuzzer {
+
+using namespace std::chrono;
+
+class Fuzzer {
+public:
+
+ Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
+ FuzzingOptions Options);
+ ~Fuzzer();
+ void Loop(Vector<SizedFile> &CorporaFiles);
+ void ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles);
+ void MinimizeCrashLoop(const Unit &U);
+ void RereadOutputCorpus(size_t MaxSize);
+
+ size_t secondsSinceProcessStartUp() {
+ return duration_cast<seconds>(system_clock::now() - ProcessStartTime)
+ .count();
+ }
+
+ bool TimedOut() {
+ return Options.MaxTotalTimeSec > 0 &&
+ secondsSinceProcessStartUp() >
+ static_cast<size_t>(Options.MaxTotalTimeSec);
+ }
+
+ size_t execPerSec() {
+ size_t Seconds = secondsSinceProcessStartUp();
+ return Seconds ? TotalNumberOfRuns / Seconds : 0;
+ }
+
+ size_t getTotalNumberOfRuns() { return TotalNumberOfRuns; }
+
+ static void StaticAlarmCallback();
+ static void StaticCrashSignalCallback();
+ static void StaticExitCallback();
+ static void StaticInterruptCallback();
+ static void StaticFileSizeExceedCallback();
+ static void StaticGracefulExitCallback();
+
+ void ExecuteCallback(const uint8_t *Data, size_t Size);
+ bool RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile = false,
+ InputInfo *II = nullptr, bool *FoundUniqFeatures = nullptr);
+
+ // Merge Corpora[1:] into Corpora[0].
+ void Merge(const Vector<std::string> &Corpora);
+ void CrashResistantMergeInternalStep(const std::string &ControlFilePath);
+ MutationDispatcher &GetMD() { return MD; }
+ void PrintFinalStats();
+ void SetMaxInputLen(size_t MaxInputLen);
+ void SetMaxMutationLen(size_t MaxMutationLen);
+ void RssLimitCallback();
+
+ bool InFuzzingThread() const { return IsMyThread; }
+ size_t GetCurrentUnitInFuzzingThead(const uint8_t **Data) const;
+ void TryDetectingAMemoryLeak(const uint8_t *Data, size_t Size,
+ bool DuringInitialCorpusExecution);
+
+ void HandleMalloc(size_t Size);
+ static void MaybeExitGracefully();
+ std::string WriteToOutputCorpus(const Unit &U);
+
+private:
+ void AlarmCallback();
+ void CrashCallback();
+ void ExitCallback();
+ void CrashOnOverwrittenData();
+ void InterruptCallback();
+ void MutateAndTestOne();
+ void PurgeAllocator();
+ void ReportNewCoverage(InputInfo *II, const Unit &U);
+ void PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size);
+ void WriteUnitToFileWithPrefix(const Unit &U, const char *Prefix);
+ void PrintStats(const char *Where, const char *End = "\n", size_t Units = 0);
+ void PrintStatusForNewUnit(const Unit &U, const char *Text);
+ void CheckExitOnSrcPosOrItem();
+
+ static void StaticDeathCallback();
+ void DumpCurrentUnit(const char *Prefix);
+ void DeathCallback();
+
+ void AllocateCurrentUnitData();
+ uint8_t *CurrentUnitData = nullptr;
+ std::atomic<size_t> CurrentUnitSize;
+ uint8_t BaseSha1[kSHA1NumBytes]; // Checksum of the base unit.
+
+ bool GracefulExitRequested = false;
+
+ size_t TotalNumberOfRuns = 0;
+ size_t NumberOfNewUnitsAdded = 0;
+
+ size_t LastCorpusUpdateRun = 0;
+
+ bool HasMoreMallocsThanFrees = false;
+ size_t NumberOfLeakDetectionAttempts = 0;
+
+ system_clock::time_point LastAllocatorPurgeAttemptTime = system_clock::now();
+
+ UserCallback CB;
+ InputCorpus &Corpus;
+ MutationDispatcher &MD;
+ FuzzingOptions Options;
+ DataFlowTrace DFT;
+
+ system_clock::time_point ProcessStartTime = system_clock::now();
+ system_clock::time_point UnitStartTime, UnitStopTime;
+ long TimeOfLongestUnitInSeconds = 0;
+ long EpochOfLastReadOfOutputCorpus = 0;
+
+ size_t MaxInputLen = 0;
+ size_t MaxMutationLen = 0;
+ size_t TmpMaxMutationLen = 0;
+
+ Vector<uint32_t> UniqFeatureSetTmp;
+
+ // Need to know our own thread.
+ static thread_local bool IsMyThread;
+};
+
+struct ScopedEnableMsanInterceptorChecks {
+ ScopedEnableMsanInterceptorChecks() {
+ if (EF->__msan_scoped_enable_interceptor_checks)
+ EF->__msan_scoped_enable_interceptor_checks();
+ }
+ ~ScopedEnableMsanInterceptorChecks() {
+ if (EF->__msan_scoped_disable_interceptor_checks)
+ EF->__msan_scoped_disable_interceptor_checks();
+ }
+};
+
+struct ScopedDisableMsanInterceptorChecks {
+ ScopedDisableMsanInterceptorChecks() {
+ if (EF->__msan_scoped_disable_interceptor_checks)
+ EF->__msan_scoped_disable_interceptor_checks();
+ }
+ ~ScopedDisableMsanInterceptorChecks() {
+ if (EF->__msan_scoped_enable_interceptor_checks)
+ EF->__msan_scoped_enable_interceptor_checks();
+ }
+};
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_INTERNAL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerInternal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerLoop.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerLoop.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerLoop.cpp (revision 351984)
@@ -0,0 +1,867 @@
+//===- FuzzerLoop.cpp - Fuzzer's main loop --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Fuzzer's main loop.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerCorpus.h"
+#include "FuzzerIO.h"
+#include "FuzzerInternal.h"
+#include "FuzzerMutate.h"
+#include "FuzzerRandom.h"
+#include "FuzzerTracePC.h"
+#include <algorithm>
+#include <cstring>
+#include <memory>
+#include <mutex>
+#include <set>
+
+#if defined(__has_include)
+#if __has_include(<sanitizer / lsan_interface.h>)
+#include <sanitizer/lsan_interface.h>
+#endif
+#endif
+
+#define NO_SANITIZE_MEMORY
+#if defined(__has_feature)
+#if __has_feature(memory_sanitizer)
+#undef NO_SANITIZE_MEMORY
+#define NO_SANITIZE_MEMORY __attribute__((no_sanitize_memory))
+#endif
+#endif
+
+namespace fuzzer {
+static const size_t kMaxUnitSizeToPrint = 256;
+
+thread_local bool Fuzzer::IsMyThread;
+
+bool RunningUserCallback = false;
+
+// Only one Fuzzer per process.
+static Fuzzer *F;
+
+// Leak detection is expensive, so we first check if there were more mallocs
+// than frees (using the sanitizer malloc hooks) and only then try to call lsan.
+struct MallocFreeTracer {
+ void Start(int TraceLevel) {
+ this->TraceLevel = TraceLevel;
+ if (TraceLevel)
+ Printf("MallocFreeTracer: START\n");
+ Mallocs = 0;
+ Frees = 0;
+ }
+ // Returns true if there were more mallocs than frees.
+ bool Stop() {
+ if (TraceLevel)
+ Printf("MallocFreeTracer: STOP %zd %zd (%s)\n", Mallocs.load(),
+ Frees.load(), Mallocs == Frees ? "same" : "DIFFERENT");
+ bool Result = Mallocs > Frees;
+ Mallocs = 0;
+ Frees = 0;
+ TraceLevel = 0;
+ return Result;
+ }
+ std::atomic<size_t> Mallocs;
+ std::atomic<size_t> Frees;
+ int TraceLevel = 0;
+
+ std::recursive_mutex TraceMutex;
+ bool TraceDisabled = false;
+};
+
+static MallocFreeTracer AllocTracer;
+
+// Locks printing and avoids nested hooks triggered from mallocs/frees in
+// sanitizer.
+class TraceLock {
+public:
+ TraceLock() : Lock(AllocTracer.TraceMutex) {
+ AllocTracer.TraceDisabled = !AllocTracer.TraceDisabled;
+ }
+ ~TraceLock() { AllocTracer.TraceDisabled = !AllocTracer.TraceDisabled; }
+
+ bool IsDisabled() const {
+ // This is already inverted value.
+ return !AllocTracer.TraceDisabled;
+ }
+
+private:
+ std::lock_guard<std::recursive_mutex> Lock;
+};
+
+ATTRIBUTE_NO_SANITIZE_MEMORY
+void MallocHook(const volatile void *ptr, size_t size) {
+ size_t N = AllocTracer.Mallocs++;
+ F->HandleMalloc(size);
+ if (int TraceLevel = AllocTracer.TraceLevel) {
+ TraceLock Lock;
+ if (Lock.IsDisabled())
+ return;
+ Printf("MALLOC[%zd] %p %zd\n", N, ptr, size);
+ if (TraceLevel >= 2 && EF)
+ PrintStackTrace();
+ }
+}
+
+ATTRIBUTE_NO_SANITIZE_MEMORY
+void FreeHook(const volatile void *ptr) {
+ size_t N = AllocTracer.Frees++;
+ if (int TraceLevel = AllocTracer.TraceLevel) {
+ TraceLock Lock;
+ if (Lock.IsDisabled())
+ return;
+ Printf("FREE[%zd] %p\n", N, ptr);
+ if (TraceLevel >= 2 && EF)
+ PrintStackTrace();
+ }
+}
+
+// Crash on a single malloc that exceeds the rss limit.
+void Fuzzer::HandleMalloc(size_t Size) {
+ if (!Options.MallocLimitMb || (Size >> 20) < (size_t)Options.MallocLimitMb)
+ return;
+ Printf("==%d== ERROR: libFuzzer: out-of-memory (malloc(%zd))\n", GetPid(),
+ Size);
+ Printf(" To change the out-of-memory limit use -rss_limit_mb=<N>\n\n");
+ PrintStackTrace();
+ DumpCurrentUnit("oom-");
+ Printf("SUMMARY: libFuzzer: out-of-memory\n");
+ PrintFinalStats();
+ _Exit(Options.OOMExitCode); // Stop right now.
+}
+
+Fuzzer::Fuzzer(UserCallback CB, InputCorpus &Corpus, MutationDispatcher &MD,
+ FuzzingOptions Options)
+ : CB(CB), Corpus(Corpus), MD(MD), Options(Options) {
+ if (EF->__sanitizer_set_death_callback)
+ EF->__sanitizer_set_death_callback(StaticDeathCallback);
+ assert(!F);
+ F = this;
+ TPC.ResetMaps();
+ IsMyThread = true;
+ if (Options.DetectLeaks && EF->__sanitizer_install_malloc_and_free_hooks)
+ EF->__sanitizer_install_malloc_and_free_hooks(MallocHook, FreeHook);
+ TPC.SetUseCounters(Options.UseCounters);
+ TPC.SetUseValueProfileMask(Options.UseValueProfile);
+
+ if (Options.Verbosity)
+ TPC.PrintModuleInfo();
+ if (!Options.OutputCorpus.empty() && Options.ReloadIntervalSec)
+ EpochOfLastReadOfOutputCorpus = GetEpoch(Options.OutputCorpus);
+ MaxInputLen = MaxMutationLen = Options.MaxLen;
+ TmpMaxMutationLen = 0; // Will be set once we load the corpus.
+ AllocateCurrentUnitData();
+ CurrentUnitSize = 0;
+ memset(BaseSha1, 0, sizeof(BaseSha1));
+}
+
+Fuzzer::~Fuzzer() {}
+
+void Fuzzer::AllocateCurrentUnitData() {
+ if (CurrentUnitData || MaxInputLen == 0)
+ return;
+ CurrentUnitData = new uint8_t[MaxInputLen];
+}
+
+void Fuzzer::StaticDeathCallback() {
+ assert(F);
+ F->DeathCallback();
+}
+
+void Fuzzer::DumpCurrentUnit(const char *Prefix) {
+ if (!CurrentUnitData)
+ return; // Happens when running individual inputs.
+ ScopedDisableMsanInterceptorChecks S;
+ MD.PrintMutationSequence();
+ Printf("; base unit: %s\n", Sha1ToString(BaseSha1).c_str());
+ size_t UnitSize = CurrentUnitSize;
+ if (UnitSize <= kMaxUnitSizeToPrint) {
+ PrintHexArray(CurrentUnitData, UnitSize, "\n");
+ PrintASCII(CurrentUnitData, UnitSize, "\n");
+ }
+ WriteUnitToFileWithPrefix({CurrentUnitData, CurrentUnitData + UnitSize},
+ Prefix);
+}
+
+NO_SANITIZE_MEMORY
+void Fuzzer::DeathCallback() {
+ DumpCurrentUnit("crash-");
+ PrintFinalStats();
+}
+
+void Fuzzer::StaticAlarmCallback() {
+ assert(F);
+ F->AlarmCallback();
+}
+
+void Fuzzer::StaticCrashSignalCallback() {
+ assert(F);
+ F->CrashCallback();
+}
+
+void Fuzzer::StaticExitCallback() {
+ assert(F);
+ F->ExitCallback();
+}
+
+void Fuzzer::StaticInterruptCallback() {
+ assert(F);
+ F->InterruptCallback();
+}
+
+void Fuzzer::StaticGracefulExitCallback() {
+ assert(F);
+ F->GracefulExitRequested = true;
+ Printf("INFO: signal received, trying to exit gracefully\n");
+}
+
+void Fuzzer::StaticFileSizeExceedCallback() {
+ Printf("==%lu== ERROR: libFuzzer: file size exceeded\n", GetPid());
+ exit(1);
+}
+
+void Fuzzer::CrashCallback() {
+ if (EF->__sanitizer_acquire_crash_state &&
+ !EF->__sanitizer_acquire_crash_state())
+ return;
+ Printf("==%lu== ERROR: libFuzzer: deadly signal\n", GetPid());
+ PrintStackTrace();
+ Printf("NOTE: libFuzzer has rudimentary signal handlers.\n"
+ " Combine libFuzzer with AddressSanitizer or similar for better "
+ "crash reports.\n");
+ Printf("SUMMARY: libFuzzer: deadly signal\n");
+ DumpCurrentUnit("crash-");
+ PrintFinalStats();
+ _Exit(Options.ErrorExitCode); // Stop right now.
+}
+
+void Fuzzer::ExitCallback() {
+ if (!RunningUserCallback)
+ return; // This exit did not come from the user callback
+ if (EF->__sanitizer_acquire_crash_state &&
+ !EF->__sanitizer_acquire_crash_state())
+ return;
+ Printf("==%lu== ERROR: libFuzzer: fuzz target exited\n", GetPid());
+ PrintStackTrace();
+ Printf("SUMMARY: libFuzzer: fuzz target exited\n");
+ DumpCurrentUnit("crash-");
+ PrintFinalStats();
+ _Exit(Options.ErrorExitCode);
+}
+
+void Fuzzer::MaybeExitGracefully() {
+ if (!F->GracefulExitRequested) return;
+ Printf("==%lu== INFO: libFuzzer: exiting as requested\n", GetPid());
+ RmDirRecursive(TempPath(".dir"));
+ F->PrintFinalStats();
+ _Exit(0);
+}
+
+void Fuzzer::InterruptCallback() {
+ Printf("==%lu== libFuzzer: run interrupted; exiting\n", GetPid());
+ PrintFinalStats();
+ ScopedDisableMsanInterceptorChecks S; // RmDirRecursive may call opendir().
+ RmDirRecursive(TempPath(".dir"));
+ // Stop right now, don't perform any at-exit actions.
+ _Exit(Options.InterruptExitCode);
+}
+
+NO_SANITIZE_MEMORY
+void Fuzzer::AlarmCallback() {
+ assert(Options.UnitTimeoutSec > 0);
+ // In Windows Alarm callback is executed by a different thread.
+ // NetBSD's current behavior needs this change too.
+#if !LIBFUZZER_WINDOWS && !LIBFUZZER_NETBSD
+ if (!InFuzzingThread())
+ return;
+#endif
+ if (!RunningUserCallback)
+ return; // We have not started running units yet.
+ size_t Seconds =
+ duration_cast<seconds>(system_clock::now() - UnitStartTime).count();
+ if (Seconds == 0)
+ return;
+ if (Options.Verbosity >= 2)
+ Printf("AlarmCallback %zd\n", Seconds);
+ if (Seconds >= (size_t)Options.UnitTimeoutSec) {
+ if (EF->__sanitizer_acquire_crash_state &&
+ !EF->__sanitizer_acquire_crash_state())
+ return;
+ Printf("ALARM: working on the last Unit for %zd seconds\n", Seconds);
+ Printf(" and the timeout value is %d (use -timeout=N to change)\n",
+ Options.UnitTimeoutSec);
+ DumpCurrentUnit("timeout-");
+ Printf("==%lu== ERROR: libFuzzer: timeout after %d seconds\n", GetPid(),
+ Seconds);
+ PrintStackTrace();
+ Printf("SUMMARY: libFuzzer: timeout\n");
+ PrintFinalStats();
+ _Exit(Options.TimeoutExitCode); // Stop right now.
+ }
+}
+
+void Fuzzer::RssLimitCallback() {
+ if (EF->__sanitizer_acquire_crash_state &&
+ !EF->__sanitizer_acquire_crash_state())
+ return;
+ Printf(
+ "==%lu== ERROR: libFuzzer: out-of-memory (used: %zdMb; limit: %zdMb)\n",
+ GetPid(), GetPeakRSSMb(), Options.RssLimitMb);
+ Printf(" To change the out-of-memory limit use -rss_limit_mb=<N>\n\n");
+ PrintMemoryProfile();
+ DumpCurrentUnit("oom-");
+ Printf("SUMMARY: libFuzzer: out-of-memory\n");
+ PrintFinalStats();
+ _Exit(Options.OOMExitCode); // Stop right now.
+}
+
+void Fuzzer::PrintStats(const char *Where, const char *End, size_t Units) {
+ size_t ExecPerSec = execPerSec();
+ if (!Options.Verbosity)
+ return;
+ Printf("#%zd\t%s", TotalNumberOfRuns, Where);
+ if (size_t N = TPC.GetTotalPCCoverage())
+ Printf(" cov: %zd", N);
+ if (size_t N = Corpus.NumFeatures())
+ Printf(" ft: %zd", N);
+ if (!Corpus.empty()) {
+ Printf(" corp: %zd", Corpus.NumActiveUnits());
+ if (size_t N = Corpus.SizeInBytes()) {
+ if (N < (1 << 14))
+ Printf("/%zdb", N);
+ else if (N < (1 << 24))
+ Printf("/%zdKb", N >> 10);
+ else
+ Printf("/%zdMb", N >> 20);
+ }
+ if (size_t FF = Corpus.NumInputsThatTouchFocusFunction())
+ Printf(" focus: %zd", FF);
+ }
+ if (TmpMaxMutationLen)
+ Printf(" lim: %zd", TmpMaxMutationLen);
+ if (Units)
+ Printf(" units: %zd", Units);
+
+ Printf(" exec/s: %zd", ExecPerSec);
+ Printf(" rss: %zdMb", GetPeakRSSMb());
+ Printf("%s", End);
+}
+
+void Fuzzer::PrintFinalStats() {
+ if (Options.PrintCoverage)
+ TPC.PrintCoverage();
+ if (Options.PrintCorpusStats)
+ Corpus.PrintStats();
+ if (!Options.PrintFinalStats)
+ return;
+ size_t ExecPerSec = execPerSec();
+ Printf("stat::number_of_executed_units: %zd\n", TotalNumberOfRuns);
+ Printf("stat::average_exec_per_sec: %zd\n", ExecPerSec);
+ Printf("stat::new_units_added: %zd\n", NumberOfNewUnitsAdded);
+ Printf("stat::slowest_unit_time_sec: %zd\n", TimeOfLongestUnitInSeconds);
+ Printf("stat::peak_rss_mb: %zd\n", GetPeakRSSMb());
+}
+
+void Fuzzer::SetMaxInputLen(size_t MaxInputLen) {
+ assert(this->MaxInputLen == 0); // Can only reset MaxInputLen from 0 to non-0.
+ assert(MaxInputLen);
+ this->MaxInputLen = MaxInputLen;
+ this->MaxMutationLen = MaxInputLen;
+ AllocateCurrentUnitData();
+ Printf("INFO: -max_len is not provided; "
+ "libFuzzer will not generate inputs larger than %zd bytes\n",
+ MaxInputLen);
+}
+
+void Fuzzer::SetMaxMutationLen(size_t MaxMutationLen) {
+ assert(MaxMutationLen && MaxMutationLen <= MaxInputLen);
+ this->MaxMutationLen = MaxMutationLen;
+}
+
+void Fuzzer::CheckExitOnSrcPosOrItem() {
+ if (!Options.ExitOnSrcPos.empty()) {
+ static auto *PCsSet = new Set<uintptr_t>;
+ auto HandlePC = [&](const TracePC::PCTableEntry *TE) {
+ if (!PCsSet->insert(TE->PC).second)
+ return;
+ std::string Descr = DescribePC("%F %L", TE->PC + 1);
+ if (Descr.find(Options.ExitOnSrcPos) != std::string::npos) {
+ Printf("INFO: found line matching '%s', exiting.\n",
+ Options.ExitOnSrcPos.c_str());
+ _Exit(0);
+ }
+ };
+ TPC.ForEachObservedPC(HandlePC);
+ }
+ if (!Options.ExitOnItem.empty()) {
+ if (Corpus.HasUnit(Options.ExitOnItem)) {
+ Printf("INFO: found item with checksum '%s', exiting.\n",
+ Options.ExitOnItem.c_str());
+ _Exit(0);
+ }
+ }
+}
+
+void Fuzzer::RereadOutputCorpus(size_t MaxSize) {
+ if (Options.OutputCorpus.empty() || !Options.ReloadIntervalSec)
+ return;
+ Vector<Unit> AdditionalCorpus;
+ ReadDirToVectorOfUnits(Options.OutputCorpus.c_str(), &AdditionalCorpus,
+ &EpochOfLastReadOfOutputCorpus, MaxSize,
+ /*ExitOnError*/ false);
+ if (Options.Verbosity >= 2)
+ Printf("Reload: read %zd new units.\n", AdditionalCorpus.size());
+ bool Reloaded = false;
+ for (auto &U : AdditionalCorpus) {
+ if (U.size() > MaxSize)
+ U.resize(MaxSize);
+ if (!Corpus.HasUnit(U)) {
+ if (RunOne(U.data(), U.size())) {
+ CheckExitOnSrcPosOrItem();
+ Reloaded = true;
+ }
+ }
+ }
+ if (Reloaded)
+ PrintStats("RELOAD");
+}
+
+void Fuzzer::PrintPulseAndReportSlowInput(const uint8_t *Data, size_t Size) {
+ auto TimeOfUnit =
+ duration_cast<seconds>(UnitStopTime - UnitStartTime).count();
+ if (!(TotalNumberOfRuns & (TotalNumberOfRuns - 1)) &&
+ secondsSinceProcessStartUp() >= 2)
+ PrintStats("pulse ");
+ if (TimeOfUnit > TimeOfLongestUnitInSeconds * 1.1 &&
+ TimeOfUnit >= Options.ReportSlowUnits) {
+ TimeOfLongestUnitInSeconds = TimeOfUnit;
+ Printf("Slowest unit: %zd s:\n", TimeOfLongestUnitInSeconds);
+ WriteUnitToFileWithPrefix({Data, Data + Size}, "slow-unit-");
+ }
+}
+
+static void WriteFeatureSetToFile(const std::string &FeaturesDir,
+ const std::string &FileName,
+ const Vector<uint32_t> &FeatureSet) {
+ if (FeaturesDir.empty() || FeatureSet.empty()) return;
+ WriteToFile(reinterpret_cast<const uint8_t *>(FeatureSet.data()),
+ FeatureSet.size() * sizeof(FeatureSet[0]),
+ DirPlusFile(FeaturesDir, FileName));
+}
+
+static void RenameFeatureSetFile(const std::string &FeaturesDir,
+ const std::string &OldFile,
+ const std::string &NewFile) {
+ if (FeaturesDir.empty()) return;
+ RenameFile(DirPlusFile(FeaturesDir, OldFile),
+ DirPlusFile(FeaturesDir, NewFile));
+}
+
+bool Fuzzer::RunOne(const uint8_t *Data, size_t Size, bool MayDeleteFile,
+ InputInfo *II, bool *FoundUniqFeatures) {
+ if (!Size)
+ return false;
+
+ ExecuteCallback(Data, Size);
+
+ UniqFeatureSetTmp.clear();
+ size_t FoundUniqFeaturesOfII = 0;
+ size_t NumUpdatesBefore = Corpus.NumFeatureUpdates();
+ TPC.CollectFeatures([&](size_t Feature) {
+ if (Corpus.AddFeature(Feature, Size, Options.Shrink))
+ UniqFeatureSetTmp.push_back(Feature);
+ if (Options.ReduceInputs && II)
+ if (std::binary_search(II->UniqFeatureSet.begin(),
+ II->UniqFeatureSet.end(), Feature))
+ FoundUniqFeaturesOfII++;
+ });
+ if (FoundUniqFeatures)
+ *FoundUniqFeatures = FoundUniqFeaturesOfII;
+ PrintPulseAndReportSlowInput(Data, Size);
+ size_t NumNewFeatures = Corpus.NumFeatureUpdates() - NumUpdatesBefore;
+ if (NumNewFeatures) {
+ TPC.UpdateObservedPCs();
+ auto NewII = Corpus.AddToCorpus({Data, Data + Size}, NumNewFeatures,
+ MayDeleteFile, TPC.ObservedFocusFunction(),
+ UniqFeatureSetTmp, DFT, II);
+ WriteFeatureSetToFile(Options.FeaturesDir, Sha1ToString(NewII->Sha1),
+ NewII->UniqFeatureSet);
+ return true;
+ }
+ if (II && FoundUniqFeaturesOfII &&
+ II->DataFlowTraceForFocusFunction.empty() &&
+ FoundUniqFeaturesOfII == II->UniqFeatureSet.size() &&
+ II->U.size() > Size) {
+ auto OldFeaturesFile = Sha1ToString(II->Sha1);
+ Corpus.Replace(II, {Data, Data + Size});
+ RenameFeatureSetFile(Options.FeaturesDir, OldFeaturesFile,
+ Sha1ToString(II->Sha1));
+ return true;
+ }
+ return false;
+}
+
+size_t Fuzzer::GetCurrentUnitInFuzzingThead(const uint8_t **Data) const {
+ assert(InFuzzingThread());
+ *Data = CurrentUnitData;
+ return CurrentUnitSize;
+}
+
+void Fuzzer::CrashOnOverwrittenData() {
+ Printf("==%d== ERROR: libFuzzer: fuzz target overwrites it's const input\n",
+ GetPid());
+ DumpCurrentUnit("crash-");
+ Printf("SUMMARY: libFuzzer: out-of-memory\n");
+ _Exit(Options.ErrorExitCode); // Stop right now.
+}
+
+// Compare two arrays, but not all bytes if the arrays are large.
+static bool LooseMemeq(const uint8_t *A, const uint8_t *B, size_t Size) {
+ const size_t Limit = 64;
+ if (Size <= 64)
+ return !memcmp(A, B, Size);
+ // Compare first and last Limit/2 bytes.
+ return !memcmp(A, B, Limit / 2) &&
+ !memcmp(A + Size - Limit / 2, B + Size - Limit / 2, Limit / 2);
+}
+
+void Fuzzer::ExecuteCallback(const uint8_t *Data, size_t Size) {
+ TPC.RecordInitialStack();
+ TotalNumberOfRuns++;
+ assert(InFuzzingThread());
+ // We copy the contents of Unit into a separate heap buffer
+ // so that we reliably find buffer overflows in it.
+ uint8_t *DataCopy = new uint8_t[Size];
+ memcpy(DataCopy, Data, Size);
+ if (EF->__msan_unpoison)
+ EF->__msan_unpoison(DataCopy, Size);
+ if (EF->__msan_unpoison_param)
+ EF->__msan_unpoison_param(2);
+ if (CurrentUnitData && CurrentUnitData != Data)
+ memcpy(CurrentUnitData, Data, Size);
+ CurrentUnitSize = Size;
+ {
+ ScopedEnableMsanInterceptorChecks S;
+ AllocTracer.Start(Options.TraceMalloc);
+ UnitStartTime = system_clock::now();
+ TPC.ResetMaps();
+ RunningUserCallback = true;
+ int Res = CB(DataCopy, Size);
+ RunningUserCallback = false;
+ UnitStopTime = system_clock::now();
+ (void)Res;
+ assert(Res == 0);
+ HasMoreMallocsThanFrees = AllocTracer.Stop();
+ }
+ if (!LooseMemeq(DataCopy, Data, Size))
+ CrashOnOverwrittenData();
+ CurrentUnitSize = 0;
+ delete[] DataCopy;
+}
+
+std::string Fuzzer::WriteToOutputCorpus(const Unit &U) {
+ if (Options.OnlyASCII)
+ assert(IsASCII(U));
+ if (Options.OutputCorpus.empty())
+ return "";
+ std::string Path = DirPlusFile(Options.OutputCorpus, Hash(U));
+ WriteToFile(U, Path);
+ if (Options.Verbosity >= 2)
+ Printf("Written %zd bytes to %s\n", U.size(), Path.c_str());
+ return Path;
+}
+
+void Fuzzer::WriteUnitToFileWithPrefix(const Unit &U, const char *Prefix) {
+ if (!Options.SaveArtifacts)
+ return;
+ std::string Path = Options.ArtifactPrefix + Prefix + Hash(U);
+ if (!Options.ExactArtifactPath.empty())
+ Path = Options.ExactArtifactPath; // Overrides ArtifactPrefix.
+ WriteToFile(U, Path);
+ Printf("artifact_prefix='%s'; Test unit written to %s\n",
+ Options.ArtifactPrefix.c_str(), Path.c_str());
+ if (U.size() <= kMaxUnitSizeToPrint)
+ Printf("Base64: %s\n", Base64(U).c_str());
+}
+
+void Fuzzer::PrintStatusForNewUnit(const Unit &U, const char *Text) {
+ if (!Options.PrintNEW)
+ return;
+ PrintStats(Text, "");
+ if (Options.Verbosity) {
+ Printf(" L: %zd/%zd ", U.size(), Corpus.MaxInputSize());
+ MD.PrintMutationSequence();
+ Printf("\n");
+ }
+}
+
+void Fuzzer::ReportNewCoverage(InputInfo *II, const Unit &U) {
+ II->NumSuccessfullMutations++;
+ MD.RecordSuccessfulMutationSequence();
+ PrintStatusForNewUnit(U, II->Reduced ? "REDUCE" : "NEW ");
+ WriteToOutputCorpus(U);
+ NumberOfNewUnitsAdded++;
+ CheckExitOnSrcPosOrItem(); // Check only after the unit is saved to corpus.
+ LastCorpusUpdateRun = TotalNumberOfRuns;
+}
+
+// Tries detecting a memory leak on the particular input that we have just
+// executed before calling this function.
+void Fuzzer::TryDetectingAMemoryLeak(const uint8_t *Data, size_t Size,
+ bool DuringInitialCorpusExecution) {
+ if (!HasMoreMallocsThanFrees)
+ return; // mallocs==frees, a leak is unlikely.
+ if (!Options.DetectLeaks)
+ return;
+ if (!DuringInitialCorpusExecution &&
+ TotalNumberOfRuns >= Options.MaxNumberOfRuns)
+ return;
+ if (!&(EF->__lsan_enable) || !&(EF->__lsan_disable) ||
+ !(EF->__lsan_do_recoverable_leak_check))
+ return; // No lsan.
+ // Run the target once again, but with lsan disabled so that if there is
+ // a real leak we do not report it twice.
+ EF->__lsan_disable();
+ ExecuteCallback(Data, Size);
+ EF->__lsan_enable();
+ if (!HasMoreMallocsThanFrees)
+ return; // a leak is unlikely.
+ if (NumberOfLeakDetectionAttempts++ > 1000) {
+ Options.DetectLeaks = false;
+ Printf("INFO: libFuzzer disabled leak detection after every mutation.\n"
+ " Most likely the target function accumulates allocated\n"
+ " memory in a global state w/o actually leaking it.\n"
+ " You may try running this binary with -trace_malloc=[12]"
+ " to get a trace of mallocs and frees.\n"
+ " If LeakSanitizer is enabled in this process it will still\n"
+ " run on the process shutdown.\n");
+ return;
+ }
+ // Now perform the actual lsan pass. This is expensive and we must ensure
+ // we don't call it too often.
+ if (EF->__lsan_do_recoverable_leak_check()) { // Leak is found, report it.
+ if (DuringInitialCorpusExecution)
+ Printf("\nINFO: a leak has been found in the initial corpus.\n\n");
+ Printf("INFO: to ignore leaks on libFuzzer side use -detect_leaks=0.\n\n");
+ CurrentUnitSize = Size;
+ DumpCurrentUnit("leak-");
+ PrintFinalStats();
+ _Exit(Options.ErrorExitCode); // not exit() to disable lsan further on.
+ }
+}
+
+void Fuzzer::MutateAndTestOne() {
+ MD.StartMutationSequence();
+
+ auto &II = Corpus.ChooseUnitToMutate(MD.GetRand());
+ if (Options.DoCrossOver)
+ MD.SetCrossOverWith(&Corpus.ChooseUnitToMutate(MD.GetRand()).U);
+ const auto &U = II.U;
+ memcpy(BaseSha1, II.Sha1, sizeof(BaseSha1));
+ assert(CurrentUnitData);
+ size_t Size = U.size();
+ assert(Size <= MaxInputLen && "Oversized Unit");
+ memcpy(CurrentUnitData, U.data(), Size);
+
+ assert(MaxMutationLen > 0);
+
+ size_t CurrentMaxMutationLen =
+ Min(MaxMutationLen, Max(U.size(), TmpMaxMutationLen));
+ assert(CurrentMaxMutationLen > 0);
+
+ for (int i = 0; i < Options.MutateDepth; i++) {
+ if (TotalNumberOfRuns >= Options.MaxNumberOfRuns)
+ break;
+ MaybeExitGracefully();
+ size_t NewSize = 0;
+ if (II.HasFocusFunction && !II.DataFlowTraceForFocusFunction.empty() &&
+ Size <= CurrentMaxMutationLen)
+ NewSize = MD.MutateWithMask(CurrentUnitData, Size, Size,
+ II.DataFlowTraceForFocusFunction);
+
+ // If MutateWithMask either failed or wasn't called, call default Mutate.
+ if (!NewSize)
+ NewSize = MD.Mutate(CurrentUnitData, Size, CurrentMaxMutationLen);
+ assert(NewSize > 0 && "Mutator returned empty unit");
+ assert(NewSize <= CurrentMaxMutationLen && "Mutator return oversized unit");
+ Size = NewSize;
+ II.NumExecutedMutations++;
+
+ bool FoundUniqFeatures = false;
+ bool NewCov = RunOne(CurrentUnitData, Size, /*MayDeleteFile=*/true, &II,
+ &FoundUniqFeatures);
+ TryDetectingAMemoryLeak(CurrentUnitData, Size,
+ /*DuringInitialCorpusExecution*/ false);
+ if (NewCov) {
+ ReportNewCoverage(&II, {CurrentUnitData, CurrentUnitData + Size});
+ break; // We will mutate this input more in the next rounds.
+ }
+ if (Options.ReduceDepth && !FoundUniqFeatures)
+ break;
+ }
+}
+
+void Fuzzer::PurgeAllocator() {
+ if (Options.PurgeAllocatorIntervalSec < 0 || !EF->__sanitizer_purge_allocator)
+ return;
+ if (duration_cast<seconds>(system_clock::now() -
+ LastAllocatorPurgeAttemptTime)
+ .count() < Options.PurgeAllocatorIntervalSec)
+ return;
+
+ if (Options.RssLimitMb <= 0 ||
+ GetPeakRSSMb() > static_cast<size_t>(Options.RssLimitMb) / 2)
+ EF->__sanitizer_purge_allocator();
+
+ LastAllocatorPurgeAttemptTime = system_clock::now();
+}
+
+void Fuzzer::ReadAndExecuteSeedCorpora(Vector<SizedFile> &CorporaFiles) {
+ const size_t kMaxSaneLen = 1 << 20;
+ const size_t kMinDefaultLen = 4096;
+ size_t MaxSize = 0;
+ size_t MinSize = -1;
+ size_t TotalSize = 0;
+ for (auto &File : CorporaFiles) {
+ MaxSize = Max(File.Size, MaxSize);
+ MinSize = Min(File.Size, MinSize);
+ TotalSize += File.Size;
+ }
+ if (Options.MaxLen == 0)
+ SetMaxInputLen(std::min(std::max(kMinDefaultLen, MaxSize), kMaxSaneLen));
+ assert(MaxInputLen > 0);
+
+ // Test the callback with empty input and never try it again.
+ uint8_t dummy = 0;
+ ExecuteCallback(&dummy, 0);
+
+ // Protect lazy counters here, after the once-init code has been executed.
+ if (Options.LazyCounters)
+ TPC.ProtectLazyCounters();
+
+ if (CorporaFiles.empty()) {
+ Printf("INFO: A corpus is not provided, starting from an empty corpus\n");
+ Unit U({'\n'}); // Valid ASCII input.
+ RunOne(U.data(), U.size());
+ } else {
+ Printf("INFO: seed corpus: files: %zd min: %zdb max: %zdb total: %zdb"
+ " rss: %zdMb\n",
+ CorporaFiles.size(), MinSize, MaxSize, TotalSize, GetPeakRSSMb());
+ if (Options.ShuffleAtStartUp)
+ std::shuffle(CorporaFiles.begin(), CorporaFiles.end(), MD.GetRand());
+
+ if (Options.PreferSmall) {
+ std::stable_sort(CorporaFiles.begin(), CorporaFiles.end());
+ assert(CorporaFiles.front().Size <= CorporaFiles.back().Size);
+ }
+
+ // Load and execute inputs one by one.
+ for (auto &SF : CorporaFiles) {
+ auto U = FileToVector(SF.File, MaxInputLen, /*ExitOnError=*/false);
+ assert(U.size() <= MaxInputLen);
+ RunOne(U.data(), U.size());
+ CheckExitOnSrcPosOrItem();
+ TryDetectingAMemoryLeak(U.data(), U.size(),
+ /*DuringInitialCorpusExecution*/ true);
+ }
+ }
+
+ PrintStats("INITED");
+ if (!Options.FocusFunction.empty())
+ Printf("INFO: %zd/%zd inputs touch the focus function\n",
+ Corpus.NumInputsThatTouchFocusFunction(), Corpus.size());
+ if (!Options.DataFlowTrace.empty())
+ Printf("INFO: %zd/%zd inputs have the Data Flow Trace\n",
+ Corpus.NumInputsWithDataFlowTrace(), Corpus.size());
+
+ if (Corpus.empty() && Options.MaxNumberOfRuns) {
+ Printf("ERROR: no interesting inputs were found. "
+ "Is the code instrumented for coverage? Exiting.\n");
+ exit(1);
+ }
+}
+
+void Fuzzer::Loop(Vector<SizedFile> &CorporaFiles) {
+ auto FocusFunctionOrAuto = Options.FocusFunction;
+ DFT.Init(Options.DataFlowTrace, &FocusFunctionOrAuto, CorporaFiles,
+ MD.GetRand());
+ TPC.SetFocusFunction(FocusFunctionOrAuto);
+ ReadAndExecuteSeedCorpora(CorporaFiles);
+ DFT.Clear(); // No need for DFT any more.
+ TPC.SetPrintNewPCs(Options.PrintNewCovPcs);
+ TPC.SetPrintNewFuncs(Options.PrintNewCovFuncs);
+ system_clock::time_point LastCorpusReload = system_clock::now();
+
+ TmpMaxMutationLen =
+ Min(MaxMutationLen, Max(size_t(4), Corpus.MaxInputSize()));
+
+ while (true) {
+ auto Now = system_clock::now();
+ if (!Options.StopFile.empty() &&
+ !FileToVector(Options.StopFile, 1, false).empty())
+ break;
+ if (duration_cast<seconds>(Now - LastCorpusReload).count() >=
+ Options.ReloadIntervalSec) {
+ RereadOutputCorpus(MaxInputLen);
+ LastCorpusReload = system_clock::now();
+ }
+ if (TotalNumberOfRuns >= Options.MaxNumberOfRuns)
+ break;
+ if (TimedOut())
+ break;
+
+ // Update TmpMaxMutationLen
+ if (Options.LenControl) {
+ if (TmpMaxMutationLen < MaxMutationLen &&
+ TotalNumberOfRuns - LastCorpusUpdateRun >
+ Options.LenControl * Log(TmpMaxMutationLen)) {
+ TmpMaxMutationLen =
+ Min(MaxMutationLen, TmpMaxMutationLen + Log(TmpMaxMutationLen));
+ LastCorpusUpdateRun = TotalNumberOfRuns;
+ }
+ } else {
+ TmpMaxMutationLen = MaxMutationLen;
+ }
+
+ // Perform several mutations and runs.
+ MutateAndTestOne();
+
+ PurgeAllocator();
+ }
+
+ PrintStats("DONE ", "\n");
+ MD.PrintRecommendedDictionary();
+}
+
+void Fuzzer::MinimizeCrashLoop(const Unit &U) {
+ if (U.size() <= 1)
+ return;
+ while (!TimedOut() && TotalNumberOfRuns < Options.MaxNumberOfRuns) {
+ MD.StartMutationSequence();
+ memcpy(CurrentUnitData, U.data(), U.size());
+ for (int i = 0; i < Options.MutateDepth; i++) {
+ size_t NewSize = MD.Mutate(CurrentUnitData, U.size(), MaxMutationLen);
+ assert(NewSize > 0 && NewSize <= MaxMutationLen);
+ ExecuteCallback(CurrentUnitData, NewSize);
+ PrintPulseAndReportSlowInput(CurrentUnitData, NewSize);
+ TryDetectingAMemoryLeak(CurrentUnitData, NewSize,
+ /*DuringInitialCorpusExecution*/ false);
+ }
+ }
+}
+
+} // namespace fuzzer
+
+extern "C" {
+
+ATTRIBUTE_INTERFACE size_t
+LLVMFuzzerMutate(uint8_t *Data, size_t Size, size_t MaxSize) {
+ assert(fuzzer::F);
+ return fuzzer::F->GetMD().DefaultMutate(Data, Size, MaxSize);
+}
+
+} // extern "C"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerLoop.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMain.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMain.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMain.cpp (revision 351984)
@@ -0,0 +1,20 @@
+//===- FuzzerMain.cpp - main() function and flags -------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// main() and flags.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerDefs.h"
+
+extern "C" {
+// This function should be defined by the user.
+int LLVMFuzzerTestOneInput(const uint8_t *Data, size_t Size);
+} // extern "C"
+
+ATTRIBUTE_INTERFACE int main(int argc, char **argv) {
+ return fuzzer::FuzzerDriver(&argc, &argv, LLVMFuzzerTestOneInput);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMain.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMerge.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMerge.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMerge.cpp (revision 351984)
@@ -0,0 +1,362 @@
+//===- FuzzerMerge.cpp - merging corpora ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Merging corpora.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerCommand.h"
+#include "FuzzerMerge.h"
+#include "FuzzerIO.h"
+#include "FuzzerInternal.h"
+#include "FuzzerTracePC.h"
+#include "FuzzerUtil.h"
+
+#include <fstream>
+#include <iterator>
+#include <set>
+#include <sstream>
+
+namespace fuzzer {
+
+bool Merger::Parse(const std::string &Str, bool ParseCoverage) {
+ std::istringstream SS(Str);
+ return Parse(SS, ParseCoverage);
+}
+
+void Merger::ParseOrExit(std::istream &IS, bool ParseCoverage) {
+ if (!Parse(IS, ParseCoverage)) {
+ Printf("MERGE: failed to parse the control file (unexpected error)\n");
+ exit(1);
+ }
+}
+
+// The control file example:
+//
+// 3 # The number of inputs
+// 1 # The number of inputs in the first corpus, <= the previous number
+// file0
+// file1
+// file2 # One file name per line.
+// STARTED 0 123 # FileID, file size
+// FT 0 1 4 6 8 # FileID COV1 COV2 ...
+// COV 0 7 8 9 # FileID COV1 COV1
+// STARTED 1 456 # If FT is missing, the input crashed while processing.
+// STARTED 2 567
+// FT 2 8 9
+// COV 2 11 12
+bool Merger::Parse(std::istream &IS, bool ParseCoverage) {
+ LastFailure.clear();
+ std::string Line;
+
+ // Parse NumFiles.
+ if (!std::getline(IS, Line, '\n')) return false;
+ std::istringstream L1(Line);
+ size_t NumFiles = 0;
+ L1 >> NumFiles;
+ if (NumFiles == 0 || NumFiles > 10000000) return false;
+
+ // Parse NumFilesInFirstCorpus.
+ if (!std::getline(IS, Line, '\n')) return false;
+ std::istringstream L2(Line);
+ NumFilesInFirstCorpus = NumFiles + 1;
+ L2 >> NumFilesInFirstCorpus;
+ if (NumFilesInFirstCorpus > NumFiles) return false;
+
+ // Parse file names.
+ Files.resize(NumFiles);
+ for (size_t i = 0; i < NumFiles; i++)
+ if (!std::getline(IS, Files[i].Name, '\n'))
+ return false;
+
+ // Parse STARTED, FT, and COV lines.
+ size_t ExpectedStartMarker = 0;
+ const size_t kInvalidStartMarker = -1;
+ size_t LastSeenStartMarker = kInvalidStartMarker;
+ Vector<uint32_t> TmpFeatures;
+ Set<uint32_t> PCs;
+ while (std::getline(IS, Line, '\n')) {
+ std::istringstream ISS1(Line);
+ std::string Marker;
+ size_t N;
+ ISS1 >> Marker;
+ ISS1 >> N;
+ if (Marker == "STARTED") {
+ // STARTED FILE_ID FILE_SIZE
+ if (ExpectedStartMarker != N)
+ return false;
+ ISS1 >> Files[ExpectedStartMarker].Size;
+ LastSeenStartMarker = ExpectedStartMarker;
+ assert(ExpectedStartMarker < Files.size());
+ ExpectedStartMarker++;
+ } else if (Marker == "FT") {
+ // FT FILE_ID COV1 COV2 COV3 ...
+ size_t CurrentFileIdx = N;
+ if (CurrentFileIdx != LastSeenStartMarker)
+ return false;
+ LastSeenStartMarker = kInvalidStartMarker;
+ if (ParseCoverage) {
+ TmpFeatures.clear(); // use a vector from outer scope to avoid resizes.
+ while (ISS1 >> N)
+ TmpFeatures.push_back(N);
+ std::sort(TmpFeatures.begin(), TmpFeatures.end());
+ Files[CurrentFileIdx].Features = TmpFeatures;
+ }
+ } else if (Marker == "COV") {
+ size_t CurrentFileIdx = N;
+ if (ParseCoverage)
+ while (ISS1 >> N)
+ if (PCs.insert(N).second)
+ Files[CurrentFileIdx].Cov.push_back(N);
+ } else {
+ return false;
+ }
+ }
+ if (LastSeenStartMarker != kInvalidStartMarker)
+ LastFailure = Files[LastSeenStartMarker].Name;
+
+ FirstNotProcessedFile = ExpectedStartMarker;
+ return true;
+}
+
+size_t Merger::ApproximateMemoryConsumption() const {
+ size_t Res = 0;
+ for (const auto &F: Files)
+ Res += sizeof(F) + F.Features.size() * sizeof(F.Features[0]);
+ return Res;
+}
+
+// Decides which files need to be merged (add those to NewFiles).
+// Returns the number of new features added.
+size_t Merger::Merge(const Set<uint32_t> &InitialFeatures,
+ Set<uint32_t> *NewFeatures,
+ const Set<uint32_t> &InitialCov, Set<uint32_t> *NewCov,
+ Vector<std::string> *NewFiles) {
+ NewFiles->clear();
+ assert(NumFilesInFirstCorpus <= Files.size());
+ Set<uint32_t> AllFeatures = InitialFeatures;
+
+ // What features are in the initial corpus?
+ for (size_t i = 0; i < NumFilesInFirstCorpus; i++) {
+ auto &Cur = Files[i].Features;
+ AllFeatures.insert(Cur.begin(), Cur.end());
+ }
+ // Remove all features that we already know from all other inputs.
+ for (size_t i = NumFilesInFirstCorpus; i < Files.size(); i++) {
+ auto &Cur = Files[i].Features;
+ Vector<uint32_t> Tmp;
+ std::set_difference(Cur.begin(), Cur.end(), AllFeatures.begin(),
+ AllFeatures.end(), std::inserter(Tmp, Tmp.begin()));
+ Cur.swap(Tmp);
+ }
+
+ // Sort. Give preference to
+ // * smaller files
+ // * files with more features.
+ std::sort(Files.begin() + NumFilesInFirstCorpus, Files.end(),
+ [&](const MergeFileInfo &a, const MergeFileInfo &b) -> bool {
+ if (a.Size != b.Size)
+ return a.Size < b.Size;
+ return a.Features.size() > b.Features.size();
+ });
+
+ // One greedy pass: add the file's features to AllFeatures.
+ // If new features were added, add this file to NewFiles.
+ for (size_t i = NumFilesInFirstCorpus; i < Files.size(); i++) {
+ auto &Cur = Files[i].Features;
+ // Printf("%s -> sz %zd ft %zd\n", Files[i].Name.c_str(),
+ // Files[i].Size, Cur.size());
+ bool FoundNewFeatures = false;
+ for (auto Fe: Cur) {
+ if (AllFeatures.insert(Fe).second) {
+ FoundNewFeatures = true;
+ NewFeatures->insert(Fe);
+ }
+ }
+ if (FoundNewFeatures)
+ NewFiles->push_back(Files[i].Name);
+ for (auto Cov : Files[i].Cov)
+ if (InitialCov.find(Cov) == InitialCov.end())
+ NewCov->insert(Cov);
+ }
+ return NewFeatures->size();
+}
+
+Set<uint32_t> Merger::AllFeatures() const {
+ Set<uint32_t> S;
+ for (auto &File : Files)
+ S.insert(File.Features.begin(), File.Features.end());
+ return S;
+}
+
+// Inner process. May crash if the target crashes.
+void Fuzzer::CrashResistantMergeInternalStep(const std::string &CFPath) {
+ Printf("MERGE-INNER: using the control file '%s'\n", CFPath.c_str());
+ Merger M;
+ std::ifstream IF(CFPath);
+ M.ParseOrExit(IF, false);
+ IF.close();
+ if (!M.LastFailure.empty())
+ Printf("MERGE-INNER: '%s' caused a failure at the previous merge step\n",
+ M.LastFailure.c_str());
+
+ Printf("MERGE-INNER: %zd total files;"
+ " %zd processed earlier; will process %zd files now\n",
+ M.Files.size(), M.FirstNotProcessedFile,
+ M.Files.size() - M.FirstNotProcessedFile);
+
+ std::ofstream OF(CFPath, std::ofstream::out | std::ofstream::app);
+ Set<size_t> AllFeatures;
+ Set<const TracePC::PCTableEntry *> AllPCs;
+ for (size_t i = M.FirstNotProcessedFile; i < M.Files.size(); i++) {
+ Fuzzer::MaybeExitGracefully();
+ auto U = FileToVector(M.Files[i].Name);
+ if (U.size() > MaxInputLen) {
+ U.resize(MaxInputLen);
+ U.shrink_to_fit();
+ }
+ std::ostringstream StartedLine;
+ // Write the pre-run marker.
+ OF << "STARTED " << i << " " << U.size() << "\n";
+ OF.flush(); // Flush is important since Command::Execute may crash.
+ // Run.
+ TPC.ResetMaps();
+ ExecuteCallback(U.data(), U.size());
+ // Collect coverage. We are iterating over the files in this order:
+ // * First, files in the initial corpus ordered by size, smallest first.
+ // * Then, all other files, smallest first.
+ // So it makes no sense to record all features for all files, instead we
+ // only record features that were not seen before.
+ Set<size_t> UniqFeatures;
+ TPC.CollectFeatures([&](size_t Feature) {
+ if (AllFeatures.insert(Feature).second)
+ UniqFeatures.insert(Feature);
+ });
+ TPC.UpdateObservedPCs();
+ // Show stats.
+ if (!(TotalNumberOfRuns & (TotalNumberOfRuns - 1)))
+ PrintStats("pulse ");
+ // Write the post-run marker and the coverage.
+ OF << "FT " << i;
+ for (size_t F : UniqFeatures)
+ OF << " " << F;
+ OF << "\n";
+ OF << "COV " << i;
+ TPC.ForEachObservedPC([&](const TracePC::PCTableEntry *TE) {
+ if (AllPCs.insert(TE).second)
+ OF << " " << TPC.PCTableEntryIdx(TE);
+ });
+ OF << "\n";
+ OF.flush();
+ }
+ PrintStats("DONE ");
+}
+
+static void WriteNewControlFile(const std::string &CFPath,
+ const Vector<SizedFile> &OldCorpus,
+ const Vector<SizedFile> &NewCorpus) {
+ RemoveFile(CFPath);
+ std::ofstream ControlFile(CFPath);
+ ControlFile << (OldCorpus.size() + NewCorpus.size()) << "\n";
+ ControlFile << OldCorpus.size() << "\n";
+ for (auto &SF: OldCorpus)
+ ControlFile << SF.File << "\n";
+ for (auto &SF: NewCorpus)
+ ControlFile << SF.File << "\n";
+ if (!ControlFile) {
+ Printf("MERGE-OUTER: failed to write to the control file: %s\n",
+ CFPath.c_str());
+ exit(1);
+ }
+}
+
+// Outer process. Does not call the target code and thus should not fail.
+void CrashResistantMerge(const Vector<std::string> &Args,
+ const Vector<SizedFile> &OldCorpus,
+ const Vector<SizedFile> &NewCorpus,
+ Vector<std::string> *NewFiles,
+ const Set<uint32_t> &InitialFeatures,
+ Set<uint32_t> *NewFeatures,
+ const Set<uint32_t> &InitialCov,
+ Set<uint32_t> *NewCov,
+ const std::string &CFPath,
+ bool V /*Verbose*/) {
+ if (NewCorpus.empty() && OldCorpus.empty()) return; // Nothing to merge.
+ size_t NumAttempts = 0;
+ if (FileSize(CFPath)) {
+ VPrintf(V, "MERGE-OUTER: non-empty control file provided: '%s'\n",
+ CFPath.c_str());
+ Merger M;
+ std::ifstream IF(CFPath);
+ if (M.Parse(IF, /*ParseCoverage=*/false)) {
+ VPrintf(V, "MERGE-OUTER: control file ok, %zd files total,"
+ " first not processed file %zd\n",
+ M.Files.size(), M.FirstNotProcessedFile);
+ if (!M.LastFailure.empty())
+ VPrintf(V, "MERGE-OUTER: '%s' will be skipped as unlucky "
+ "(merge has stumbled on it the last time)\n",
+ M.LastFailure.c_str());
+ if (M.FirstNotProcessedFile >= M.Files.size()) {
+ VPrintf(
+ V, "MERGE-OUTER: nothing to do, merge has been completed before\n");
+ exit(0);
+ }
+
+ NumAttempts = M.Files.size() - M.FirstNotProcessedFile;
+ } else {
+ VPrintf(V, "MERGE-OUTER: bad control file, will overwrite it\n");
+ }
+ }
+
+ if (!NumAttempts) {
+ // The supplied control file is empty or bad, create a fresh one.
+ NumAttempts = OldCorpus.size() + NewCorpus.size();
+ VPrintf(V, "MERGE-OUTER: %zd files, %zd in the initial corpus\n",
+ NumAttempts, OldCorpus.size());
+ WriteNewControlFile(CFPath, OldCorpus, NewCorpus);
+ }
+
+ // Execute the inner process until it passes.
+ // Every inner process should execute at least one input.
+ Command BaseCmd(Args);
+ BaseCmd.removeFlag("merge");
+ BaseCmd.removeFlag("fork");
+ BaseCmd.removeFlag("collect_data_flow");
+ for (size_t Attempt = 1; Attempt <= NumAttempts; Attempt++) {
+ Fuzzer::MaybeExitGracefully();
+ VPrintf(V, "MERGE-OUTER: attempt %zd\n", Attempt);
+ Command Cmd(BaseCmd);
+ Cmd.addFlag("merge_control_file", CFPath);
+ Cmd.addFlag("merge_inner", "1");
+ if (!V) {
+ Cmd.setOutputFile(getDevNull());
+ Cmd.combineOutAndErr();
+ }
+ auto ExitCode = ExecuteCommand(Cmd);
+ if (!ExitCode) {
+ VPrintf(V, "MERGE-OUTER: succesfull in %zd attempt(s)\n", Attempt);
+ break;
+ }
+ }
+ // Read the control file and do the merge.
+ Merger M;
+ std::ifstream IF(CFPath);
+ IF.seekg(0, IF.end);
+ VPrintf(V, "MERGE-OUTER: the control file has %zd bytes\n",
+ (size_t)IF.tellg());
+ IF.seekg(0, IF.beg);
+ M.ParseOrExit(IF, true);
+ IF.close();
+ VPrintf(V,
+ "MERGE-OUTER: consumed %zdMb (%zdMb rss) to parse the control file\n",
+ M.ApproximateMemoryConsumption() >> 20, GetPeakRSSMb());
+ M.Merge(InitialFeatures, NewFeatures, InitialCov, NewCov, NewFiles);
+ VPrintf(V, "MERGE-OUTER: %zd new files with %zd new features added; "
+ "%zd new coverage edges\n",
+ NewFiles->size(), NewFeatures->size(), NewCov->size());
+}
+
+} // namespace fuzzer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMerge.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMerge.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMerge.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMerge.h (revision 351984)
@@ -0,0 +1,86 @@
+//===- FuzzerMerge.h - merging corpa ----------------------------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Merging Corpora.
+//
+// The task:
+// Take the existing corpus (possibly empty) and merge new inputs into
+// it so that only inputs with new coverage ('features') are added.
+// The process should tolerate the crashes, OOMs, leaks, etc.
+//
+// Algorithm:
+// The outter process collects the set of files and writes their names
+// into a temporary "control" file, then repeatedly launches the inner
+// process until all inputs are processed.
+// The outer process does not actually execute the target code.
+//
+// The inner process reads the control file and sees a) list of all the inputs
+// and b) the last processed input. Then it starts processing the inputs one
+// by one. Before processing every input it writes one line to control file:
+// STARTED INPUT_ID INPUT_SIZE
+// After processing an input it write another line:
+// DONE INPUT_ID Feature1 Feature2 Feature3 ...
+// If a crash happens while processing an input the last line in the control
+// file will be "STARTED INPUT_ID" and so the next process will know
+// where to resume.
+//
+// Once all inputs are processed by the innner process(es) the outer process
+// reads the control files and does the merge based entirely on the contents
+// of control file.
+// It uses a single pass greedy algorithm choosing first the smallest inputs
+// within the same size the inputs that have more new features.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_MERGE_H
+#define LLVM_FUZZER_MERGE_H
+
+#include "FuzzerDefs.h"
+
+#include <istream>
+#include <ostream>
+#include <set>
+#include <vector>
+
+namespace fuzzer {
+
+struct MergeFileInfo {
+ std::string Name;
+ size_t Size = 0;
+ Vector<uint32_t> Features, Cov;
+};
+
+struct Merger {
+ Vector<MergeFileInfo> Files;
+ size_t NumFilesInFirstCorpus = 0;
+ size_t FirstNotProcessedFile = 0;
+ std::string LastFailure;
+
+ bool Parse(std::istream &IS, bool ParseCoverage);
+ bool Parse(const std::string &Str, bool ParseCoverage);
+ void ParseOrExit(std::istream &IS, bool ParseCoverage);
+ size_t Merge(const Set<uint32_t> &InitialFeatures, Set<uint32_t> *NewFeatures,
+ const Set<uint32_t> &InitialCov, Set<uint32_t> *NewCov,
+ Vector<std::string> *NewFiles);
+ size_t ApproximateMemoryConsumption() const;
+ Set<uint32_t> AllFeatures() const;
+};
+
+void CrashResistantMerge(const Vector<std::string> &Args,
+ const Vector<SizedFile> &OldCorpus,
+ const Vector<SizedFile> &NewCorpus,
+ Vector<std::string> *NewFiles,
+ const Set<uint32_t> &InitialFeatures,
+ Set<uint32_t> *NewFeatures,
+ const Set<uint32_t> &InitialCov,
+ Set<uint32_t> *NewCov,
+ const std::string &CFPath,
+ bool Verbose);
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_MERGE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMerge.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMutate.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMutate.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMutate.cpp (revision 351984)
@@ -0,0 +1,562 @@
+//===- FuzzerMutate.cpp - Mutate a test input -----------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Mutate a test input.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerDefs.h"
+#include "FuzzerExtFunctions.h"
+#include "FuzzerIO.h"
+#include "FuzzerMutate.h"
+#include "FuzzerOptions.h"
+#include "FuzzerTracePC.h"
+
+namespace fuzzer {
+
+const size_t Dictionary::kMaxDictSize;
+
+static void PrintASCII(const Word &W, const char *PrintAfter) {
+ PrintASCII(W.data(), W.size(), PrintAfter);
+}
+
+MutationDispatcher::MutationDispatcher(Random &Rand,
+ const FuzzingOptions &Options)
+ : Rand(Rand), Options(Options) {
+ DefaultMutators.insert(
+ DefaultMutators.begin(),
+ {
+ {&MutationDispatcher::Mutate_EraseBytes, "EraseBytes"},
+ {&MutationDispatcher::Mutate_InsertByte, "InsertByte"},
+ {&MutationDispatcher::Mutate_InsertRepeatedBytes,
+ "InsertRepeatedBytes"},
+ {&MutationDispatcher::Mutate_ChangeByte, "ChangeByte"},
+ {&MutationDispatcher::Mutate_ChangeBit, "ChangeBit"},
+ {&MutationDispatcher::Mutate_ShuffleBytes, "ShuffleBytes"},
+ {&MutationDispatcher::Mutate_ChangeASCIIInteger, "ChangeASCIIInt"},
+ {&MutationDispatcher::Mutate_ChangeBinaryInteger, "ChangeBinInt"},
+ {&MutationDispatcher::Mutate_CopyPart, "CopyPart"},
+ {&MutationDispatcher::Mutate_CrossOver, "CrossOver"},
+ {&MutationDispatcher::Mutate_AddWordFromManualDictionary,
+ "ManualDict"},
+ {&MutationDispatcher::Mutate_AddWordFromPersistentAutoDictionary,
+ "PersAutoDict"},
+ });
+ if(Options.UseCmp)
+ DefaultMutators.push_back(
+ {&MutationDispatcher::Mutate_AddWordFromTORC, "CMP"});
+
+ if (EF->LLVMFuzzerCustomMutator)
+ Mutators.push_back({&MutationDispatcher::Mutate_Custom, "Custom"});
+ else
+ Mutators = DefaultMutators;
+
+ if (EF->LLVMFuzzerCustomCrossOver)
+ Mutators.push_back(
+ {&MutationDispatcher::Mutate_CustomCrossOver, "CustomCrossOver"});
+}
+
+static char RandCh(Random &Rand) {
+ if (Rand.RandBool()) return Rand(256);
+ const char Special[] = "!*'();:@&=+$,/?%#[]012Az-`~.\xff\x00";
+ return Special[Rand(sizeof(Special) - 1)];
+}
+
+size_t MutationDispatcher::Mutate_Custom(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ return EF->LLVMFuzzerCustomMutator(Data, Size, MaxSize, Rand.Rand());
+}
+
+size_t MutationDispatcher::Mutate_CustomCrossOver(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ if (Size == 0)
+ return 0;
+ if (!CrossOverWith) return 0;
+ const Unit &Other = *CrossOverWith;
+ if (Other.empty())
+ return 0;
+ CustomCrossOverInPlaceHere.resize(MaxSize);
+ auto &U = CustomCrossOverInPlaceHere;
+ size_t NewSize = EF->LLVMFuzzerCustomCrossOver(
+ Data, Size, Other.data(), Other.size(), U.data(), U.size(), Rand.Rand());
+ if (!NewSize)
+ return 0;
+ assert(NewSize <= MaxSize && "CustomCrossOver returned overisized unit");
+ memcpy(Data, U.data(), NewSize);
+ return NewSize;
+}
+
+size_t MutationDispatcher::Mutate_ShuffleBytes(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ if (Size > MaxSize || Size == 0) return 0;
+ size_t ShuffleAmount =
+ Rand(std::min(Size, (size_t)8)) + 1; // [1,8] and <= Size.
+ size_t ShuffleStart = Rand(Size - ShuffleAmount);
+ assert(ShuffleStart + ShuffleAmount <= Size);
+ std::shuffle(Data + ShuffleStart, Data + ShuffleStart + ShuffleAmount, Rand);
+ return Size;
+}
+
+size_t MutationDispatcher::Mutate_EraseBytes(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ if (Size <= 1) return 0;
+ size_t N = Rand(Size / 2) + 1;
+ assert(N < Size);
+ size_t Idx = Rand(Size - N + 1);
+ // Erase Data[Idx:Idx+N].
+ memmove(Data + Idx, Data + Idx + N, Size - Idx - N);
+ // Printf("Erase: %zd %zd => %zd; Idx %zd\n", N, Size, Size - N, Idx);
+ return Size - N;
+}
+
+size_t MutationDispatcher::Mutate_InsertByte(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ if (Size >= MaxSize) return 0;
+ size_t Idx = Rand(Size + 1);
+ // Insert new value at Data[Idx].
+ memmove(Data + Idx + 1, Data + Idx, Size - Idx);
+ Data[Idx] = RandCh(Rand);
+ return Size + 1;
+}
+
+size_t MutationDispatcher::Mutate_InsertRepeatedBytes(uint8_t *Data,
+ size_t Size,
+ size_t MaxSize) {
+ const size_t kMinBytesToInsert = 3;
+ if (Size + kMinBytesToInsert >= MaxSize) return 0;
+ size_t MaxBytesToInsert = std::min(MaxSize - Size, (size_t)128);
+ size_t N = Rand(MaxBytesToInsert - kMinBytesToInsert + 1) + kMinBytesToInsert;
+ assert(Size + N <= MaxSize && N);
+ size_t Idx = Rand(Size + 1);
+ // Insert new values at Data[Idx].
+ memmove(Data + Idx + N, Data + Idx, Size - Idx);
+ // Give preference to 0x00 and 0xff.
+ uint8_t Byte = Rand.RandBool() ? Rand(256) : (Rand.RandBool() ? 0 : 255);
+ for (size_t i = 0; i < N; i++)
+ Data[Idx + i] = Byte;
+ return Size + N;
+}
+
+size_t MutationDispatcher::Mutate_ChangeByte(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ if (Size > MaxSize) return 0;
+ size_t Idx = Rand(Size);
+ Data[Idx] = RandCh(Rand);
+ return Size;
+}
+
+size_t MutationDispatcher::Mutate_ChangeBit(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ if (Size > MaxSize) return 0;
+ size_t Idx = Rand(Size);
+ Data[Idx] ^= 1 << Rand(8);
+ return Size;
+}
+
+size_t MutationDispatcher::Mutate_AddWordFromManualDictionary(uint8_t *Data,
+ size_t Size,
+ size_t MaxSize) {
+ return AddWordFromDictionary(ManualDictionary, Data, Size, MaxSize);
+}
+
+size_t MutationDispatcher::ApplyDictionaryEntry(uint8_t *Data, size_t Size,
+ size_t MaxSize,
+ DictionaryEntry &DE) {
+ const Word &W = DE.GetW();
+ bool UsePositionHint = DE.HasPositionHint() &&
+ DE.GetPositionHint() + W.size() < Size &&
+ Rand.RandBool();
+ if (Rand.RandBool()) { // Insert W.
+ if (Size + W.size() > MaxSize) return 0;
+ size_t Idx = UsePositionHint ? DE.GetPositionHint() : Rand(Size + 1);
+ memmove(Data + Idx + W.size(), Data + Idx, Size - Idx);
+ memcpy(Data + Idx, W.data(), W.size());
+ Size += W.size();
+ } else { // Overwrite some bytes with W.
+ if (W.size() > Size) return 0;
+ size_t Idx = UsePositionHint ? DE.GetPositionHint() : Rand(Size - W.size());
+ memcpy(Data + Idx, W.data(), W.size());
+ }
+ return Size;
+}
+
+// Somewhere in the past we have observed a comparison instructions
+// with arguments Arg1 Arg2. This function tries to guess a dictionary
+// entry that will satisfy that comparison.
+// It first tries to find one of the arguments (possibly swapped) in the
+// input and if it succeeds it creates a DE with a position hint.
+// Otherwise it creates a DE with one of the arguments w/o a position hint.
+DictionaryEntry MutationDispatcher::MakeDictionaryEntryFromCMP(
+ const void *Arg1, const void *Arg2,
+ const void *Arg1Mutation, const void *Arg2Mutation,
+ size_t ArgSize, const uint8_t *Data,
+ size_t Size) {
+ bool HandleFirst = Rand.RandBool();
+ const void *ExistingBytes, *DesiredBytes;
+ Word W;
+ const uint8_t *End = Data + Size;
+ for (int Arg = 0; Arg < 2; Arg++) {
+ ExistingBytes = HandleFirst ? Arg1 : Arg2;
+ DesiredBytes = HandleFirst ? Arg2Mutation : Arg1Mutation;
+ HandleFirst = !HandleFirst;
+ W.Set(reinterpret_cast<const uint8_t*>(DesiredBytes), ArgSize);
+ const size_t kMaxNumPositions = 8;
+ size_t Positions[kMaxNumPositions];
+ size_t NumPositions = 0;
+ for (const uint8_t *Cur = Data;
+ Cur < End && NumPositions < kMaxNumPositions; Cur++) {
+ Cur =
+ (const uint8_t *)SearchMemory(Cur, End - Cur, ExistingBytes, ArgSize);
+ if (!Cur) break;
+ Positions[NumPositions++] = Cur - Data;
+ }
+ if (!NumPositions) continue;
+ return DictionaryEntry(W, Positions[Rand(NumPositions)]);
+ }
+ DictionaryEntry DE(W);
+ return DE;
+}
+
+
+template <class T>
+DictionaryEntry MutationDispatcher::MakeDictionaryEntryFromCMP(
+ T Arg1, T Arg2, const uint8_t *Data, size_t Size) {
+ if (Rand.RandBool()) Arg1 = Bswap(Arg1);
+ if (Rand.RandBool()) Arg2 = Bswap(Arg2);
+ T Arg1Mutation = Arg1 + Rand(-1, 1);
+ T Arg2Mutation = Arg2 + Rand(-1, 1);
+ return MakeDictionaryEntryFromCMP(&Arg1, &Arg2, &Arg1Mutation, &Arg2Mutation,
+ sizeof(Arg1), Data, Size);
+}
+
+DictionaryEntry MutationDispatcher::MakeDictionaryEntryFromCMP(
+ const Word &Arg1, const Word &Arg2, const uint8_t *Data, size_t Size) {
+ return MakeDictionaryEntryFromCMP(Arg1.data(), Arg2.data(), Arg1.data(),
+ Arg2.data(), Arg1.size(), Data, Size);
+}
+
+size_t MutationDispatcher::Mutate_AddWordFromTORC(
+ uint8_t *Data, size_t Size, size_t MaxSize) {
+ Word W;
+ DictionaryEntry DE;
+ switch (Rand(4)) {
+ case 0: {
+ auto X = TPC.TORC8.Get(Rand.Rand());
+ DE = MakeDictionaryEntryFromCMP(X.A, X.B, Data, Size);
+ } break;
+ case 1: {
+ auto X = TPC.TORC4.Get(Rand.Rand());
+ if ((X.A >> 16) == 0 && (X.B >> 16) == 0 && Rand.RandBool())
+ DE = MakeDictionaryEntryFromCMP((uint16_t)X.A, (uint16_t)X.B, Data, Size);
+ else
+ DE = MakeDictionaryEntryFromCMP(X.A, X.B, Data, Size);
+ } break;
+ case 2: {
+ auto X = TPC.TORCW.Get(Rand.Rand());
+ DE = MakeDictionaryEntryFromCMP(X.A, X.B, Data, Size);
+ } break;
+ case 3: if (Options.UseMemmem) {
+ auto X = TPC.MMT.Get(Rand.Rand());
+ DE = DictionaryEntry(X);
+ } break;
+ default:
+ assert(0);
+ }
+ if (!DE.GetW().size()) return 0;
+ Size = ApplyDictionaryEntry(Data, Size, MaxSize, DE);
+ if (!Size) return 0;
+ DictionaryEntry &DERef =
+ CmpDictionaryEntriesDeque[CmpDictionaryEntriesDequeIdx++ %
+ kCmpDictionaryEntriesDequeSize];
+ DERef = DE;
+ CurrentDictionaryEntrySequence.push_back(&DERef);
+ return Size;
+}
+
+size_t MutationDispatcher::Mutate_AddWordFromPersistentAutoDictionary(
+ uint8_t *Data, size_t Size, size_t MaxSize) {
+ return AddWordFromDictionary(PersistentAutoDictionary, Data, Size, MaxSize);
+}
+
+size_t MutationDispatcher::AddWordFromDictionary(Dictionary &D, uint8_t *Data,
+ size_t Size, size_t MaxSize) {
+ if (Size > MaxSize) return 0;
+ if (D.empty()) return 0;
+ DictionaryEntry &DE = D[Rand(D.size())];
+ Size = ApplyDictionaryEntry(Data, Size, MaxSize, DE);
+ if (!Size) return 0;
+ DE.IncUseCount();
+ CurrentDictionaryEntrySequence.push_back(&DE);
+ return Size;
+}
+
+// Overwrites part of To[0,ToSize) with a part of From[0,FromSize).
+// Returns ToSize.
+size_t MutationDispatcher::CopyPartOf(const uint8_t *From, size_t FromSize,
+ uint8_t *To, size_t ToSize) {
+ // Copy From[FromBeg, FromBeg + CopySize) into To[ToBeg, ToBeg + CopySize).
+ size_t ToBeg = Rand(ToSize);
+ size_t CopySize = Rand(ToSize - ToBeg) + 1;
+ assert(ToBeg + CopySize <= ToSize);
+ CopySize = std::min(CopySize, FromSize);
+ size_t FromBeg = Rand(FromSize - CopySize + 1);
+ assert(FromBeg + CopySize <= FromSize);
+ memmove(To + ToBeg, From + FromBeg, CopySize);
+ return ToSize;
+}
+
+// Inserts part of From[0,ToSize) into To.
+// Returns new size of To on success or 0 on failure.
+size_t MutationDispatcher::InsertPartOf(const uint8_t *From, size_t FromSize,
+ uint8_t *To, size_t ToSize,
+ size_t MaxToSize) {
+ if (ToSize >= MaxToSize) return 0;
+ size_t AvailableSpace = MaxToSize - ToSize;
+ size_t MaxCopySize = std::min(AvailableSpace, FromSize);
+ size_t CopySize = Rand(MaxCopySize) + 1;
+ size_t FromBeg = Rand(FromSize - CopySize + 1);
+ assert(FromBeg + CopySize <= FromSize);
+ size_t ToInsertPos = Rand(ToSize + 1);
+ assert(ToInsertPos + CopySize <= MaxToSize);
+ size_t TailSize = ToSize - ToInsertPos;
+ if (To == From) {
+ MutateInPlaceHere.resize(MaxToSize);
+ memcpy(MutateInPlaceHere.data(), From + FromBeg, CopySize);
+ memmove(To + ToInsertPos + CopySize, To + ToInsertPos, TailSize);
+ memmove(To + ToInsertPos, MutateInPlaceHere.data(), CopySize);
+ } else {
+ memmove(To + ToInsertPos + CopySize, To + ToInsertPos, TailSize);
+ memmove(To + ToInsertPos, From + FromBeg, CopySize);
+ }
+ return ToSize + CopySize;
+}
+
+size_t MutationDispatcher::Mutate_CopyPart(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ if (Size > MaxSize || Size == 0) return 0;
+ // If Size == MaxSize, `InsertPartOf(...)` will
+ // fail so there's no point using it in this case.
+ if (Size == MaxSize || Rand.RandBool())
+ return CopyPartOf(Data, Size, Data, Size);
+ else
+ return InsertPartOf(Data, Size, Data, Size, MaxSize);
+}
+
+size_t MutationDispatcher::Mutate_ChangeASCIIInteger(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ if (Size > MaxSize) return 0;
+ size_t B = Rand(Size);
+ while (B < Size && !isdigit(Data[B])) B++;
+ if (B == Size) return 0;
+ size_t E = B;
+ while (E < Size && isdigit(Data[E])) E++;
+ assert(B < E);
+ // now we have digits in [B, E).
+ // strtol and friends don't accept non-zero-teminated data, parse it manually.
+ uint64_t Val = Data[B] - '0';
+ for (size_t i = B + 1; i < E; i++)
+ Val = Val * 10 + Data[i] - '0';
+
+ // Mutate the integer value.
+ switch(Rand(5)) {
+ case 0: Val++; break;
+ case 1: Val--; break;
+ case 2: Val /= 2; break;
+ case 3: Val *= 2; break;
+ case 4: Val = Rand(Val * Val); break;
+ default: assert(0);
+ }
+ // Just replace the bytes with the new ones, don't bother moving bytes.
+ for (size_t i = B; i < E; i++) {
+ size_t Idx = E + B - i - 1;
+ assert(Idx >= B && Idx < E);
+ Data[Idx] = (Val % 10) + '0';
+ Val /= 10;
+ }
+ return Size;
+}
+
+template<class T>
+size_t ChangeBinaryInteger(uint8_t *Data, size_t Size, Random &Rand) {
+ if (Size < sizeof(T)) return 0;
+ size_t Off = Rand(Size - sizeof(T) + 1);
+ assert(Off + sizeof(T) <= Size);
+ T Val;
+ if (Off < 64 && !Rand(4)) {
+ Val = Size;
+ if (Rand.RandBool())
+ Val = Bswap(Val);
+ } else {
+ memcpy(&Val, Data + Off, sizeof(Val));
+ T Add = Rand(21);
+ Add -= 10;
+ if (Rand.RandBool())
+ Val = Bswap(T(Bswap(Val) + Add)); // Add assuming different endiannes.
+ else
+ Val = Val + Add; // Add assuming current endiannes.
+ if (Add == 0 || Rand.RandBool()) // Maybe negate.
+ Val = -Val;
+ }
+ memcpy(Data + Off, &Val, sizeof(Val));
+ return Size;
+}
+
+size_t MutationDispatcher::Mutate_ChangeBinaryInteger(uint8_t *Data,
+ size_t Size,
+ size_t MaxSize) {
+ if (Size > MaxSize) return 0;
+ switch (Rand(4)) {
+ case 3: return ChangeBinaryInteger<uint64_t>(Data, Size, Rand);
+ case 2: return ChangeBinaryInteger<uint32_t>(Data, Size, Rand);
+ case 1: return ChangeBinaryInteger<uint16_t>(Data, Size, Rand);
+ case 0: return ChangeBinaryInteger<uint8_t>(Data, Size, Rand);
+ default: assert(0);
+ }
+ return 0;
+}
+
+size_t MutationDispatcher::Mutate_CrossOver(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ if (Size > MaxSize) return 0;
+ if (Size == 0) return 0;
+ if (!CrossOverWith) return 0;
+ const Unit &O = *CrossOverWith;
+ if (O.empty()) return 0;
+ MutateInPlaceHere.resize(MaxSize);
+ auto &U = MutateInPlaceHere;
+ size_t NewSize = 0;
+ switch(Rand(3)) {
+ case 0:
+ NewSize = CrossOver(Data, Size, O.data(), O.size(), U.data(), U.size());
+ break;
+ case 1:
+ NewSize = InsertPartOf(O.data(), O.size(), U.data(), U.size(), MaxSize);
+ if (!NewSize)
+ NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());
+ break;
+ case 2:
+ NewSize = CopyPartOf(O.data(), O.size(), U.data(), U.size());
+ break;
+ default: assert(0);
+ }
+ assert(NewSize > 0 && "CrossOver returned empty unit");
+ assert(NewSize <= MaxSize && "CrossOver returned overisized unit");
+ memcpy(Data, U.data(), NewSize);
+ return NewSize;
+}
+
+void MutationDispatcher::StartMutationSequence() {
+ CurrentMutatorSequence.clear();
+ CurrentDictionaryEntrySequence.clear();
+}
+
+// Copy successful dictionary entries to PersistentAutoDictionary.
+void MutationDispatcher::RecordSuccessfulMutationSequence() {
+ for (auto DE : CurrentDictionaryEntrySequence) {
+ // PersistentAutoDictionary.AddWithSuccessCountOne(DE);
+ DE->IncSuccessCount();
+ assert(DE->GetW().size());
+ // Linear search is fine here as this happens seldom.
+ if (!PersistentAutoDictionary.ContainsWord(DE->GetW()))
+ PersistentAutoDictionary.push_back({DE->GetW(), 1});
+ }
+}
+
+void MutationDispatcher::PrintRecommendedDictionary() {
+ Vector<DictionaryEntry> V;
+ for (auto &DE : PersistentAutoDictionary)
+ if (!ManualDictionary.ContainsWord(DE.GetW()))
+ V.push_back(DE);
+ if (V.empty()) return;
+ Printf("###### Recommended dictionary. ######\n");
+ for (auto &DE: V) {
+ assert(DE.GetW().size());
+ Printf("\"");
+ PrintASCII(DE.GetW(), "\"");
+ Printf(" # Uses: %zd\n", DE.GetUseCount());
+ }
+ Printf("###### End of recommended dictionary. ######\n");
+}
+
+void MutationDispatcher::PrintMutationSequence() {
+ Printf("MS: %zd ", CurrentMutatorSequence.size());
+ for (auto M : CurrentMutatorSequence)
+ Printf("%s-", M.Name);
+ if (!CurrentDictionaryEntrySequence.empty()) {
+ Printf(" DE: ");
+ for (auto DE : CurrentDictionaryEntrySequence) {
+ Printf("\"");
+ PrintASCII(DE->GetW(), "\"-");
+ }
+ }
+}
+
+size_t MutationDispatcher::Mutate(uint8_t *Data, size_t Size, size_t MaxSize) {
+ return MutateImpl(Data, Size, MaxSize, Mutators);
+}
+
+size_t MutationDispatcher::DefaultMutate(uint8_t *Data, size_t Size,
+ size_t MaxSize) {
+ return MutateImpl(Data, Size, MaxSize, DefaultMutators);
+}
+
+// Mutates Data in place, returns new size.
+size_t MutationDispatcher::MutateImpl(uint8_t *Data, size_t Size,
+ size_t MaxSize,
+ Vector<Mutator> &Mutators) {
+ assert(MaxSize > 0);
+ // Some mutations may fail (e.g. can't insert more bytes if Size == MaxSize),
+ // in which case they will return 0.
+ // Try several times before returning un-mutated data.
+ for (int Iter = 0; Iter < 100; Iter++) {
+ auto M = Mutators[Rand(Mutators.size())];
+ size_t NewSize = (this->*(M.Fn))(Data, Size, MaxSize);
+ if (NewSize && NewSize <= MaxSize) {
+ if (Options.OnlyASCII)
+ ToASCII(Data, NewSize);
+ CurrentMutatorSequence.push_back(M);
+ return NewSize;
+ }
+ }
+ *Data = ' ';
+ return 1; // Fallback, should not happen frequently.
+}
+
+// Mask represents the set of Data bytes that are worth mutating.
+size_t MutationDispatcher::MutateWithMask(uint8_t *Data, size_t Size,
+ size_t MaxSize,
+ const Vector<uint8_t> &Mask) {
+ size_t MaskedSize = std::min(Size, Mask.size());
+ // * Copy the worthy bytes into a temporary array T
+ // * Mutate T
+ // * Copy T back.
+ // This is totally unoptimized.
+ auto &T = MutateWithMaskTemp;
+ if (T.size() < Size)
+ T.resize(Size);
+ size_t OneBits = 0;
+ for (size_t I = 0; I < MaskedSize; I++)
+ if (Mask[I])
+ T[OneBits++] = Data[I];
+
+ if (!OneBits) return 0;
+ assert(!T.empty());
+ size_t NewSize = Mutate(T.data(), OneBits, OneBits);
+ assert(NewSize <= OneBits);
+ (void)NewSize;
+ // Even if NewSize < OneBits we still use all OneBits bytes.
+ for (size_t I = 0, J = 0; I < MaskedSize; I++)
+ if (Mask[I])
+ Data[I] = T[J++];
+ return Size;
+}
+
+void MutationDispatcher::AddWordToManualDictionary(const Word &W) {
+ ManualDictionary.push_back(
+ {W, std::numeric_limits<size_t>::max()});
+}
+
+} // namespace fuzzer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMutate.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMutate.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMutate.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMutate.h (revision 351984)
@@ -0,0 +1,156 @@
+//===- FuzzerMutate.h - Internal header for the Fuzzer ----------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// fuzzer::MutationDispatcher
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_MUTATE_H
+#define LLVM_FUZZER_MUTATE_H
+
+#include "FuzzerDefs.h"
+#include "FuzzerDictionary.h"
+#include "FuzzerOptions.h"
+#include "FuzzerRandom.h"
+
+namespace fuzzer {
+
+class MutationDispatcher {
+public:
+ MutationDispatcher(Random &Rand, const FuzzingOptions &Options);
+ ~MutationDispatcher() {}
+ /// Indicate that we are about to start a new sequence of mutations.
+ void StartMutationSequence();
+ /// Print the current sequence of mutations.
+ void PrintMutationSequence();
+ /// Indicate that the current sequence of mutations was successful.
+ void RecordSuccessfulMutationSequence();
+ /// Mutates data by invoking user-provided mutator.
+ size_t Mutate_Custom(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by invoking user-provided crossover.
+ size_t Mutate_CustomCrossOver(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by shuffling bytes.
+ size_t Mutate_ShuffleBytes(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by erasing bytes.
+ size_t Mutate_EraseBytes(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by inserting a byte.
+ size_t Mutate_InsertByte(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by inserting several repeated bytes.
+ size_t Mutate_InsertRepeatedBytes(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by chanding one byte.
+ size_t Mutate_ChangeByte(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by chanding one bit.
+ size_t Mutate_ChangeBit(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Mutates data by copying/inserting a part of data into a different place.
+ size_t Mutate_CopyPart(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// Mutates data by adding a word from the manual dictionary.
+ size_t Mutate_AddWordFromManualDictionary(uint8_t *Data, size_t Size,
+ size_t MaxSize);
+
+ /// Mutates data by adding a word from the TORC.
+ size_t Mutate_AddWordFromTORC(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// Mutates data by adding a word from the persistent automatic dictionary.
+ size_t Mutate_AddWordFromPersistentAutoDictionary(uint8_t *Data, size_t Size,
+ size_t MaxSize);
+
+ /// Tries to find an ASCII integer in Data, changes it to another ASCII int.
+ size_t Mutate_ChangeASCIIInteger(uint8_t *Data, size_t Size, size_t MaxSize);
+ /// Change a 1-, 2-, 4-, or 8-byte integer in interesting ways.
+ size_t Mutate_ChangeBinaryInteger(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// CrossOver Data with CrossOverWith.
+ size_t Mutate_CrossOver(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// Applies one of the configured mutations.
+ /// Returns the new size of data which could be up to MaxSize.
+ size_t Mutate(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// Applies one of the configured mutations to the bytes of Data
+ /// that have '1' in Mask.
+ /// Mask.size() should be >= Size.
+ size_t MutateWithMask(uint8_t *Data, size_t Size, size_t MaxSize,
+ const Vector<uint8_t> &Mask);
+
+ /// Applies one of the default mutations. Provided as a service
+ /// to mutation authors.
+ size_t DefaultMutate(uint8_t *Data, size_t Size, size_t MaxSize);
+
+ /// Creates a cross-over of two pieces of Data, returns its size.
+ size_t CrossOver(const uint8_t *Data1, size_t Size1, const uint8_t *Data2,
+ size_t Size2, uint8_t *Out, size_t MaxOutSize);
+
+ void AddWordToManualDictionary(const Word &W);
+
+ void PrintRecommendedDictionary();
+
+ void SetCrossOverWith(const Unit *U) { CrossOverWith = U; }
+
+ Random &GetRand() { return Rand; }
+
+ private:
+ struct Mutator {
+ size_t (MutationDispatcher::*Fn)(uint8_t *Data, size_t Size, size_t Max);
+ const char *Name;
+ };
+
+ size_t AddWordFromDictionary(Dictionary &D, uint8_t *Data, size_t Size,
+ size_t MaxSize);
+ size_t MutateImpl(uint8_t *Data, size_t Size, size_t MaxSize,
+ Vector<Mutator> &Mutators);
+
+ size_t InsertPartOf(const uint8_t *From, size_t FromSize, uint8_t *To,
+ size_t ToSize, size_t MaxToSize);
+ size_t CopyPartOf(const uint8_t *From, size_t FromSize, uint8_t *To,
+ size_t ToSize);
+ size_t ApplyDictionaryEntry(uint8_t *Data, size_t Size, size_t MaxSize,
+ DictionaryEntry &DE);
+
+ template <class T>
+ DictionaryEntry MakeDictionaryEntryFromCMP(T Arg1, T Arg2,
+ const uint8_t *Data, size_t Size);
+ DictionaryEntry MakeDictionaryEntryFromCMP(const Word &Arg1, const Word &Arg2,
+ const uint8_t *Data, size_t Size);
+ DictionaryEntry MakeDictionaryEntryFromCMP(const void *Arg1, const void *Arg2,
+ const void *Arg1Mutation,
+ const void *Arg2Mutation,
+ size_t ArgSize,
+ const uint8_t *Data, size_t Size);
+
+ Random &Rand;
+ const FuzzingOptions Options;
+
+ // Dictionary provided by the user via -dict=DICT_FILE.
+ Dictionary ManualDictionary;
+ // Temporary dictionary modified by the fuzzer itself,
+ // recreated periodically.
+ Dictionary TempAutoDictionary;
+ // Persistent dictionary modified by the fuzzer, consists of
+ // entries that led to successful discoveries in the past mutations.
+ Dictionary PersistentAutoDictionary;
+
+ Vector<DictionaryEntry *> CurrentDictionaryEntrySequence;
+
+ static const size_t kCmpDictionaryEntriesDequeSize = 16;
+ DictionaryEntry CmpDictionaryEntriesDeque[kCmpDictionaryEntriesDequeSize];
+ size_t CmpDictionaryEntriesDequeIdx = 0;
+
+ const Unit *CrossOverWith = nullptr;
+ Vector<uint8_t> MutateInPlaceHere;
+ Vector<uint8_t> MutateWithMaskTemp;
+ // CustomCrossOver needs its own buffer as a custom implementation may call
+ // LLVMFuzzerMutate, which in turn may resize MutateInPlaceHere.
+ Vector<uint8_t> CustomCrossOverInPlaceHere;
+
+ Vector<Mutator> Mutators;
+ Vector<Mutator> DefaultMutators;
+ Vector<Mutator> CurrentMutatorSequence;
+};
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_MUTATE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerMutate.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerOptions.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerOptions.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerOptions.h (revision 351984)
@@ -0,0 +1,83 @@
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// fuzzer::FuzzingOptions
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_OPTIONS_H
+#define LLVM_FUZZER_OPTIONS_H
+
+#include "FuzzerDefs.h"
+
+namespace fuzzer {
+
+struct FuzzingOptions {
+ int Verbosity = 1;
+ size_t MaxLen = 0;
+ size_t LenControl = 1000;
+ int UnitTimeoutSec = 300;
+ int TimeoutExitCode = 70;
+ int OOMExitCode = 71;
+ int InterruptExitCode = 72;
+ int ErrorExitCode = 77;
+ bool IgnoreTimeouts = true;
+ bool IgnoreOOMs = true;
+ bool IgnoreCrashes = false;
+ int MaxTotalTimeSec = 0;
+ int RssLimitMb = 0;
+ int MallocLimitMb = 0;
+ bool DoCrossOver = true;
+ int MutateDepth = 5;
+ bool ReduceDepth = false;
+ bool UseCounters = false;
+ bool UseMemmem = true;
+ bool UseCmp = false;
+ int UseValueProfile = false;
+ bool Shrink = false;
+ bool ReduceInputs = false;
+ int ReloadIntervalSec = 1;
+ bool ShuffleAtStartUp = true;
+ bool PreferSmall = true;
+ size_t MaxNumberOfRuns = -1L;
+ int ReportSlowUnits = 10;
+ bool OnlyASCII = false;
+ std::string OutputCorpus;
+ std::string ArtifactPrefix = "./";
+ std::string ExactArtifactPath;
+ std::string ExitOnSrcPos;
+ std::string ExitOnItem;
+ std::string FocusFunction;
+ std::string DataFlowTrace;
+ std::string CollectDataFlow;
+ std::string FeaturesDir;
+ std::string StopFile;
+ bool SaveArtifacts = true;
+ bool PrintNEW = true; // Print a status line when new units are found;
+ bool PrintNewCovPcs = false;
+ int PrintNewCovFuncs = 0;
+ bool PrintFinalStats = false;
+ bool PrintCorpusStats = false;
+ bool PrintCoverage = false;
+ bool DumpCoverage = false;
+ bool DetectLeaks = true;
+ int PurgeAllocatorIntervalSec = 1;
+ int TraceMalloc = 0;
+ bool HandleAbrt = false;
+ bool HandleBus = false;
+ bool HandleFpe = false;
+ bool HandleIll = false;
+ bool HandleInt = false;
+ bool HandleSegv = false;
+ bool HandleTerm = false;
+ bool HandleXfsz = false;
+ bool HandleUsr1 = false;
+ bool HandleUsr2 = false;
+ bool LazyCounters = false;
+};
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_OPTIONS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerOptions.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerRandom.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerRandom.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerRandom.h (revision 351984)
@@ -0,0 +1,38 @@
+//===- FuzzerRandom.h - Internal header for the Fuzzer ----------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// fuzzer::Random
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_RANDOM_H
+#define LLVM_FUZZER_RANDOM_H
+
+#include <random>
+
+namespace fuzzer {
+class Random : public std::minstd_rand {
+ public:
+ Random(unsigned int seed) : std::minstd_rand(seed) {}
+ result_type operator()() { return this->std::minstd_rand::operator()(); }
+ size_t Rand() { return this->operator()(); }
+ size_t RandBool() { return Rand() % 2; }
+ size_t SkewTowardsLast(size_t n) {
+ size_t T = this->operator()(n * n);
+ size_t Res = sqrt(T);
+ return Res;
+ }
+ size_t operator()(size_t n) { return n ? Rand() % n : 0; }
+ intptr_t operator()(intptr_t From, intptr_t To) {
+ assert(From < To);
+ intptr_t RangeSize = To - From + 1;
+ return operator()(RangeSize) + From;
+ }
+};
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_RANDOM_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerRandom.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerSHA1.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerSHA1.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerSHA1.cpp (revision 351984)
@@ -0,0 +1,222 @@
+//===- FuzzerSHA1.h - Private copy of the SHA1 implementation ---*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This code is taken from public domain
+// (http://oauth.googlecode.com/svn/code/c/liboauth/src/sha1.c)
+// and modified by adding anonymous namespace, adding an interface
+// function fuzzer::ComputeSHA1() and removing unnecessary code.
+//
+// lib/Fuzzer can not use SHA1 implementation from openssl because
+// openssl may not be available and because we may be fuzzing openssl itself.
+// For the same reason we do not want to depend on SHA1 from LLVM tree.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerSHA1.h"
+#include "FuzzerDefs.h"
+
+/* This code is public-domain - it is based on libcrypt
+ * placed in the public domain by Wei Dai and other contributors.
+ */
+
+#include <iomanip>
+#include <sstream>
+#include <stdint.h>
+#include <string.h>
+
+namespace { // Added for LibFuzzer
+
+#ifdef __BIG_ENDIAN__
+# define SHA_BIG_ENDIAN
+// Windows is always little endian and MSVC doesn't have <endian.h>
+#elif defined __LITTLE_ENDIAN__ || LIBFUZZER_WINDOWS
+/* override */
+#elif defined __BYTE_ORDER
+# if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define SHA_BIG_ENDIAN
+# endif
+#else // ! defined __LITTLE_ENDIAN__
+# include <endian.h> // machine/endian.h
+# if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+# define SHA_BIG_ENDIAN
+# endif
+#endif
+
+
+/* header */
+
+#define HASH_LENGTH 20
+#define BLOCK_LENGTH 64
+
+typedef struct sha1nfo {
+ uint32_t buffer[BLOCK_LENGTH/4];
+ uint32_t state[HASH_LENGTH/4];
+ uint32_t byteCount;
+ uint8_t bufferOffset;
+ uint8_t keyBuffer[BLOCK_LENGTH];
+ uint8_t innerHash[HASH_LENGTH];
+} sha1nfo;
+
+/* public API - prototypes - TODO: doxygen*/
+
+/**
+ */
+void sha1_init(sha1nfo *s);
+/**
+ */
+void sha1_writebyte(sha1nfo *s, uint8_t data);
+/**
+ */
+void sha1_write(sha1nfo *s, const char *data, size_t len);
+/**
+ */
+uint8_t* sha1_result(sha1nfo *s);
+
+
+/* code */
+#define SHA1_K0 0x5a827999
+#define SHA1_K20 0x6ed9eba1
+#define SHA1_K40 0x8f1bbcdc
+#define SHA1_K60 0xca62c1d6
+
+void sha1_init(sha1nfo *s) {
+ s->state[0] = 0x67452301;
+ s->state[1] = 0xefcdab89;
+ s->state[2] = 0x98badcfe;
+ s->state[3] = 0x10325476;
+ s->state[4] = 0xc3d2e1f0;
+ s->byteCount = 0;
+ s->bufferOffset = 0;
+}
+
+uint32_t sha1_rol32(uint32_t number, uint8_t bits) {
+ return ((number << bits) | (number >> (32-bits)));
+}
+
+void sha1_hashBlock(sha1nfo *s) {
+ uint8_t i;
+ uint32_t a,b,c,d,e,t;
+
+ a=s->state[0];
+ b=s->state[1];
+ c=s->state[2];
+ d=s->state[3];
+ e=s->state[4];
+ for (i=0; i<80; i++) {
+ if (i>=16) {
+ t = s->buffer[(i+13)&15] ^ s->buffer[(i+8)&15] ^ s->buffer[(i+2)&15] ^ s->buffer[i&15];
+ s->buffer[i&15] = sha1_rol32(t,1);
+ }
+ if (i<20) {
+ t = (d ^ (b & (c ^ d))) + SHA1_K0;
+ } else if (i<40) {
+ t = (b ^ c ^ d) + SHA1_K20;
+ } else if (i<60) {
+ t = ((b & c) | (d & (b | c))) + SHA1_K40;
+ } else {
+ t = (b ^ c ^ d) + SHA1_K60;
+ }
+ t+=sha1_rol32(a,5) + e + s->buffer[i&15];
+ e=d;
+ d=c;
+ c=sha1_rol32(b,30);
+ b=a;
+ a=t;
+ }
+ s->state[0] += a;
+ s->state[1] += b;
+ s->state[2] += c;
+ s->state[3] += d;
+ s->state[4] += e;
+}
+
+void sha1_addUncounted(sha1nfo *s, uint8_t data) {
+ uint8_t * const b = (uint8_t*) s->buffer;
+#ifdef SHA_BIG_ENDIAN
+ b[s->bufferOffset] = data;
+#else
+ b[s->bufferOffset ^ 3] = data;
+#endif
+ s->bufferOffset++;
+ if (s->bufferOffset == BLOCK_LENGTH) {
+ sha1_hashBlock(s);
+ s->bufferOffset = 0;
+ }
+}
+
+void sha1_writebyte(sha1nfo *s, uint8_t data) {
+ ++s->byteCount;
+ sha1_addUncounted(s, data);
+}
+
+void sha1_write(sha1nfo *s, const char *data, size_t len) {
+ for (;len--;) sha1_writebyte(s, (uint8_t) *data++);
+}
+
+void sha1_pad(sha1nfo *s) {
+ // Implement SHA-1 padding (fips180-2 §5.1.1)
+
+ // Pad with 0x80 followed by 0x00 until the end of the block
+ sha1_addUncounted(s, 0x80);
+ while (s->bufferOffset != 56) sha1_addUncounted(s, 0x00);
+
+ // Append length in the last 8 bytes
+ sha1_addUncounted(s, 0); // We're only using 32 bit lengths
+ sha1_addUncounted(s, 0); // But SHA-1 supports 64 bit lengths
+ sha1_addUncounted(s, 0); // So zero pad the top bits
+ sha1_addUncounted(s, s->byteCount >> 29); // Shifting to multiply by 8
+ sha1_addUncounted(s, s->byteCount >> 21); // as SHA-1 supports bitstreams as well as
+ sha1_addUncounted(s, s->byteCount >> 13); // byte.
+ sha1_addUncounted(s, s->byteCount >> 5);
+ sha1_addUncounted(s, s->byteCount << 3);
+}
+
+uint8_t* sha1_result(sha1nfo *s) {
+ // Pad to complete the last block
+ sha1_pad(s);
+
+#ifndef SHA_BIG_ENDIAN
+ // Swap byte order back
+ int i;
+ for (i=0; i<5; i++) {
+ s->state[i]=
+ (((s->state[i])<<24)& 0xff000000)
+ | (((s->state[i])<<8) & 0x00ff0000)
+ | (((s->state[i])>>8) & 0x0000ff00)
+ | (((s->state[i])>>24)& 0x000000ff);
+ }
+#endif
+
+ // Return pointer to hash (20 characters)
+ return (uint8_t*) s->state;
+}
+
+} // namespace; Added for LibFuzzer
+
+namespace fuzzer {
+
+// The rest is added for LibFuzzer
+void ComputeSHA1(const uint8_t *Data, size_t Len, uint8_t *Out) {
+ sha1nfo s;
+ sha1_init(&s);
+ sha1_write(&s, (const char*)Data, Len);
+ memcpy(Out, sha1_result(&s), HASH_LENGTH);
+}
+
+std::string Sha1ToString(const uint8_t Sha1[kSHA1NumBytes]) {
+ std::stringstream SS;
+ for (int i = 0; i < kSHA1NumBytes; i++)
+ SS << std::hex << std::setfill('0') << std::setw(2) << (unsigned)Sha1[i];
+ return SS.str();
+}
+
+std::string Hash(const Unit &U) {
+ uint8_t Hash[kSHA1NumBytes];
+ ComputeSHA1(U.data(), U.size(), Hash);
+ return Sha1ToString(Hash);
+}
+
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerSHA1.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerSHA1.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerSHA1.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerSHA1.h (revision 351984)
@@ -0,0 +1,32 @@
+//===- FuzzerSHA1.h - Internal header for the SHA1 utils --------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// SHA1 utils.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_SHA1_H
+#define LLVM_FUZZER_SHA1_H
+
+#include "FuzzerDefs.h"
+#include <cstddef>
+#include <stdint.h>
+
+namespace fuzzer {
+
+// Private copy of SHA1 implementation.
+static const int kSHA1NumBytes = 20;
+
+// Computes SHA1 hash of 'Len' bytes in 'Data', writes kSHA1NumBytes to 'Out'.
+void ComputeSHA1(const uint8_t *Data, size_t Len, uint8_t *Out);
+
+std::string Sha1ToString(const uint8_t Sha1[kSHA1NumBytes]);
+
+std::string Hash(const Unit &U);
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_SHA1_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerSHA1.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerTracePC.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerTracePC.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerTracePC.cpp (revision 351984)
@@ -0,0 +1,689 @@
+//===- FuzzerTracePC.cpp - PC tracing--------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Trace PCs.
+// This module implements __sanitizer_cov_trace_pc_guard[_init],
+// the callback required for -fsanitize-coverage=trace-pc-guard instrumentation.
+//
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerTracePC.h"
+#include "FuzzerBuiltins.h"
+#include "FuzzerBuiltinsMsvc.h"
+#include "FuzzerCorpus.h"
+#include "FuzzerDefs.h"
+#include "FuzzerDictionary.h"
+#include "FuzzerExtFunctions.h"
+#include "FuzzerIO.h"
+#include "FuzzerUtil.h"
+#include "FuzzerValueBitMap.h"
+#include <set>
+
+// Used by -fsanitize-coverage=stack-depth to track stack depth
+ATTRIBUTES_INTERFACE_TLS_INITIAL_EXEC uintptr_t __sancov_lowest_stack;
+
+namespace fuzzer {
+
+TracePC TPC;
+
+size_t TracePC::GetTotalPCCoverage() {
+ return ObservedPCs.size();
+}
+
+
+void TracePC::HandleInline8bitCountersInit(uint8_t *Start, uint8_t *Stop) {
+ if (Start == Stop) return;
+ if (NumModules &&
+ Modules[NumModules - 1].Start() == Start)
+ return;
+ assert(NumModules <
+ sizeof(Modules) / sizeof(Modules[0]));
+ auto &M = Modules[NumModules++];
+ uint8_t *AlignedStart = RoundUpByPage(Start);
+ uint8_t *AlignedStop = RoundDownByPage(Stop);
+ size_t NumFullPages = AlignedStop > AlignedStart ?
+ (AlignedStop - AlignedStart) / PageSize() : 0;
+ bool NeedFirst = Start < AlignedStart || !NumFullPages;
+ bool NeedLast = Stop > AlignedStop && AlignedStop >= AlignedStart;
+ M.NumRegions = NumFullPages + NeedFirst + NeedLast;;
+ assert(M.NumRegions > 0);
+ M.Regions = new Module::Region[M.NumRegions];
+ assert(M.Regions);
+ size_t R = 0;
+ if (NeedFirst)
+ M.Regions[R++] = {Start, std::min(Stop, AlignedStart), true, false};
+ for (uint8_t *P = AlignedStart; P < AlignedStop; P += PageSize())
+ M.Regions[R++] = {P, P + PageSize(), true, true};
+ if (NeedLast)
+ M.Regions[R++] = {AlignedStop, Stop, true, false};
+ assert(R == M.NumRegions);
+ assert(M.Size() == (size_t)(Stop - Start));
+ assert(M.Stop() == Stop);
+ assert(M.Start() == Start);
+ NumInline8bitCounters += M.Size();
+}
+
+// Mark all full page counter regions as PROT_NONE and set Enabled=false.
+// The first time the instrumented code hits such a protected/disabled
+// counter region we should catch a SEGV and call UnprotectLazyCounters,
+// which will mark the page as PROT_READ|PROT_WRITE and set Enabled=true.
+//
+// Whenever other functions iterate over the counters they should ignore
+// regions with Enabled=false.
+void TracePC::ProtectLazyCounters() {
+ size_t NumPagesProtected = 0;
+ IterateCounterRegions([&](Module::Region &R) {
+ if (!R.OneFullPage) return;
+ if (Mprotect(R.Start, R.Stop - R.Start, false)) {
+ R.Enabled = false;
+ NumPagesProtected++;
+ }
+ });
+ if (NumPagesProtected)
+ Printf("INFO: %zd pages of counters where protected;"
+ " libFuzzer's SEGV handler must be installed\n",
+ NumPagesProtected);
+}
+
+bool TracePC::UnprotectLazyCounters(void *CounterPtr) {
+ // Printf("UnprotectLazyCounters: %p\n", CounterPtr);
+ if (!CounterPtr)
+ return false;
+ bool Done = false;
+ uint8_t *Addr = reinterpret_cast<uint8_t *>(CounterPtr);
+ IterateCounterRegions([&](Module::Region &R) {
+ if (!R.OneFullPage || R.Enabled || Done) return;
+ if (Addr >= R.Start && Addr < R.Stop)
+ if (Mprotect(R.Start, R.Stop - R.Start, true)) {
+ R.Enabled = true;
+ Done = true;
+ }
+ });
+ return Done;
+}
+
+void TracePC::HandlePCsInit(const uintptr_t *Start, const uintptr_t *Stop) {
+ const PCTableEntry *B = reinterpret_cast<const PCTableEntry *>(Start);
+ const PCTableEntry *E = reinterpret_cast<const PCTableEntry *>(Stop);
+ if (NumPCTables && ModulePCTable[NumPCTables - 1].Start == B) return;
+ assert(NumPCTables < sizeof(ModulePCTable) / sizeof(ModulePCTable[0]));
+ ModulePCTable[NumPCTables++] = {B, E};
+ NumPCsInPCTables += E - B;
+}
+
+void TracePC::PrintModuleInfo() {
+ if (NumModules) {
+ Printf("INFO: Loaded %zd modules (%zd inline 8-bit counters): ",
+ NumModules, NumInline8bitCounters);
+ for (size_t i = 0; i < NumModules; i++)
+ Printf("%zd [%p, %p), ", Modules[i].Size(), Modules[i].Start(),
+ Modules[i].Stop());
+ Printf("\n");
+ }
+ if (NumPCTables) {
+ Printf("INFO: Loaded %zd PC tables (%zd PCs): ", NumPCTables,
+ NumPCsInPCTables);
+ for (size_t i = 0; i < NumPCTables; i++) {
+ Printf("%zd [%p,%p), ", ModulePCTable[i].Stop - ModulePCTable[i].Start,
+ ModulePCTable[i].Start, ModulePCTable[i].Stop);
+ }
+ Printf("\n");
+
+ if (NumInline8bitCounters && NumInline8bitCounters != NumPCsInPCTables) {
+ Printf("ERROR: The size of coverage PC tables does not match the\n"
+ "number of instrumented PCs. This might be a compiler bug,\n"
+ "please contact the libFuzzer developers.\n"
+ "Also check https://bugs.llvm.org/show_bug.cgi?id=34636\n"
+ "for possible workarounds (tl;dr: don't use the old GNU ld)\n");
+ _Exit(1);
+ }
+ }
+ if (size_t NumExtraCounters = ExtraCountersEnd() - ExtraCountersBegin())
+ Printf("INFO: %zd Extra Counters\n", NumExtraCounters);
+}
+
+ATTRIBUTE_NO_SANITIZE_ALL
+void TracePC::HandleCallerCallee(uintptr_t Caller, uintptr_t Callee) {
+ const uintptr_t kBits = 12;
+ const uintptr_t kMask = (1 << kBits) - 1;
+ uintptr_t Idx = (Caller & kMask) | ((Callee & kMask) << kBits);
+ ValueProfileMap.AddValueModPrime(Idx);
+}
+
+/// \return the address of the previous instruction.
+/// Note: the logic is copied from `sanitizer_common/sanitizer_stacktrace.h`
+inline ALWAYS_INLINE uintptr_t GetPreviousInstructionPc(uintptr_t PC) {
+#if defined(__arm__)
+ // T32 (Thumb) branch instructions might be 16 or 32 bit long,
+ // so we return (pc-2) in that case in order to be safe.
+ // For A32 mode we return (pc-4) because all instructions are 32 bit long.
+ return (PC - 3) & (~1);
+#elif defined(__powerpc__) || defined(__powerpc64__) || defined(__aarch64__)
+ // PCs are always 4 byte aligned.
+ return PC - 4;
+#elif defined(__sparc__) || defined(__mips__)
+ return PC - 8;
+#else
+ return PC - 1;
+#endif
+}
+
+/// \return the address of the next instruction.
+/// Note: the logic is copied from `sanitizer_common/sanitizer_stacktrace.cc`
+ALWAYS_INLINE uintptr_t TracePC::GetNextInstructionPc(uintptr_t PC) {
+#if defined(__mips__)
+ return PC + 8;
+#elif defined(__powerpc__) || defined(__sparc__) || defined(__arm__) || \
+ defined(__aarch64__)
+ return PC + 4;
+#else
+ return PC + 1;
+#endif
+}
+
+void TracePC::UpdateObservedPCs() {
+ Vector<uintptr_t> CoveredFuncs;
+ auto ObservePC = [&](const PCTableEntry *TE) {
+ if (ObservedPCs.insert(TE).second && DoPrintNewPCs) {
+ PrintPC("\tNEW_PC: %p %F %L", "\tNEW_PC: %p",
+ GetNextInstructionPc(TE->PC));
+ Printf("\n");
+ }
+ };
+
+ auto Observe = [&](const PCTableEntry *TE) {
+ if (PcIsFuncEntry(TE))
+ if (++ObservedFuncs[TE->PC] == 1 && NumPrintNewFuncs)
+ CoveredFuncs.push_back(TE->PC);
+ ObservePC(TE);
+ };
+
+ if (NumPCsInPCTables) {
+ if (NumInline8bitCounters == NumPCsInPCTables) {
+ for (size_t i = 0; i < NumModules; i++) {
+ auto &M = Modules[i];
+ assert(M.Size() ==
+ (size_t)(ModulePCTable[i].Stop - ModulePCTable[i].Start));
+ for (size_t r = 0; r < M.NumRegions; r++) {
+ auto &R = M.Regions[r];
+ if (!R.Enabled) continue;
+ for (uint8_t *P = R.Start; P < R.Stop; P++)
+ if (*P)
+ Observe(&ModulePCTable[i].Start[M.Idx(P)]);
+ }
+ }
+ }
+ }
+
+ for (size_t i = 0, N = Min(CoveredFuncs.size(), NumPrintNewFuncs); i < N;
+ i++) {
+ Printf("\tNEW_FUNC[%zd/%zd]: ", i + 1, CoveredFuncs.size());
+ PrintPC("%p %F %L", "%p", GetNextInstructionPc(CoveredFuncs[i]));
+ Printf("\n");
+ }
+}
+
+uintptr_t TracePC::PCTableEntryIdx(const PCTableEntry *TE) {
+ size_t TotalTEs = 0;
+ for (size_t i = 0; i < NumPCTables; i++) {
+ auto &M = ModulePCTable[i];
+ if (TE >= M.Start && TE < M.Stop)
+ return TotalTEs + TE - M.Start;
+ TotalTEs += M.Stop - M.Start;
+ }
+ assert(0);
+ return 0;
+}
+
+const TracePC::PCTableEntry *TracePC::PCTableEntryByIdx(uintptr_t Idx) {
+ for (size_t i = 0; i < NumPCTables; i++) {
+ auto &M = ModulePCTable[i];
+ size_t Size = M.Stop - M.Start;
+ if (Idx < Size) return &M.Start[Idx];
+ Idx -= Size;
+ }
+ return nullptr;
+}
+
+static std::string GetModuleName(uintptr_t PC) {
+ char ModulePathRaw[4096] = ""; // What's PATH_MAX in portable C++?
+ void *OffsetRaw = nullptr;
+ if (!EF->__sanitizer_get_module_and_offset_for_pc(
+ reinterpret_cast<void *>(PC), ModulePathRaw,
+ sizeof(ModulePathRaw), &OffsetRaw))
+ return "";
+ return ModulePathRaw;
+}
+
+template<class CallBack>
+void TracePC::IterateCoveredFunctions(CallBack CB) {
+ for (size_t i = 0; i < NumPCTables; i++) {
+ auto &M = ModulePCTable[i];
+ assert(M.Start < M.Stop);
+ auto ModuleName = GetModuleName(M.Start->PC);
+ for (auto NextFE = M.Start; NextFE < M.Stop; ) {
+ auto FE = NextFE;
+ assert(PcIsFuncEntry(FE) && "Not a function entry point");
+ do {
+ NextFE++;
+ } while (NextFE < M.Stop && !(PcIsFuncEntry(NextFE)));
+ CB(FE, NextFE, ObservedFuncs[FE->PC]);
+ }
+ }
+}
+
+void TracePC::SetFocusFunction(const std::string &FuncName) {
+ // This function should be called once.
+ assert(!FocusFunctionCounterPtr);
+ if (FuncName.empty())
+ return;
+ for (size_t M = 0; M < NumModules; M++) {
+ auto &PCTE = ModulePCTable[M];
+ size_t N = PCTE.Stop - PCTE.Start;
+ for (size_t I = 0; I < N; I++) {
+ if (!(PcIsFuncEntry(&PCTE.Start[I]))) continue; // not a function entry.
+ auto Name = DescribePC("%F", GetNextInstructionPc(PCTE.Start[I].PC));
+ if (Name[0] == 'i' && Name[1] == 'n' && Name[2] == ' ')
+ Name = Name.substr(3, std::string::npos);
+ if (FuncName != Name) continue;
+ Printf("INFO: Focus function is set to '%s'\n", Name.c_str());
+ FocusFunctionCounterPtr = Modules[M].Start() + I;
+ return;
+ }
+ }
+}
+
+bool TracePC::ObservedFocusFunction() {
+ return FocusFunctionCounterPtr && *FocusFunctionCounterPtr;
+}
+
+void TracePC::PrintCoverage() {
+ if (!EF->__sanitizer_symbolize_pc ||
+ !EF->__sanitizer_get_module_and_offset_for_pc) {
+ Printf("INFO: __sanitizer_symbolize_pc or "
+ "__sanitizer_get_module_and_offset_for_pc is not available,"
+ " not printing coverage\n");
+ return;
+ }
+ Printf("COVERAGE:\n");
+ auto CoveredFunctionCallback = [&](const PCTableEntry *First,
+ const PCTableEntry *Last,
+ uintptr_t Counter) {
+ assert(First < Last);
+ auto VisualizePC = GetNextInstructionPc(First->PC);
+ std::string FileStr = DescribePC("%s", VisualizePC);
+ if (!IsInterestingCoverageFile(FileStr))
+ return;
+ std::string FunctionStr = DescribePC("%F", VisualizePC);
+ if (FunctionStr.find("in ") == 0)
+ FunctionStr = FunctionStr.substr(3);
+ std::string LineStr = DescribePC("%l", VisualizePC);
+ size_t NumEdges = Last - First;
+ Vector<uintptr_t> UncoveredPCs;
+ for (auto TE = First; TE < Last; TE++)
+ if (!ObservedPCs.count(TE))
+ UncoveredPCs.push_back(TE->PC);
+ Printf("%sCOVERED_FUNC: hits: %zd", Counter ? "" : "UN", Counter);
+ Printf(" edges: %zd/%zd", NumEdges - UncoveredPCs.size(), NumEdges);
+ Printf(" %s %s:%s\n", FunctionStr.c_str(), FileStr.c_str(),
+ LineStr.c_str());
+ if (Counter)
+ for (auto PC : UncoveredPCs)
+ Printf(" UNCOVERED_PC: %s\n",
+ DescribePC("%s:%l", GetNextInstructionPc(PC)).c_str());
+ };
+
+ IterateCoveredFunctions(CoveredFunctionCallback);
+}
+
+// Value profile.
+// We keep track of various values that affect control flow.
+// These values are inserted into a bit-set-based hash map.
+// Every new bit in the map is treated as a new coverage.
+//
+// For memcmp/strcmp/etc the interesting value is the length of the common
+// prefix of the parameters.
+// For cmp instructions the interesting value is a XOR of the parameters.
+// The interesting value is mixed up with the PC and is then added to the map.
+
+ATTRIBUTE_NO_SANITIZE_ALL
+void TracePC::AddValueForMemcmp(void *caller_pc, const void *s1, const void *s2,
+ size_t n, bool StopAtZero) {
+ if (!n) return;
+ size_t Len = std::min(n, Word::GetMaxSize());
+ const uint8_t *A1 = reinterpret_cast<const uint8_t *>(s1);
+ const uint8_t *A2 = reinterpret_cast<const uint8_t *>(s2);
+ uint8_t B1[Word::kMaxSize];
+ uint8_t B2[Word::kMaxSize];
+ // Copy the data into locals in this non-msan-instrumented function
+ // to avoid msan complaining further.
+ size_t Hash = 0; // Compute some simple hash of both strings.
+ for (size_t i = 0; i < Len; i++) {
+ B1[i] = A1[i];
+ B2[i] = A2[i];
+ size_t T = B1[i];
+ Hash ^= (T << 8) | B2[i];
+ }
+ size_t I = 0;
+ uint8_t HammingDistance = 0;
+ for (; I < Len; I++) {
+ if (B1[I] != B2[I] || (StopAtZero && B1[I] == 0)) {
+ HammingDistance = Popcountll(B1[I] ^ B2[I]);
+ break;
+ }
+ }
+ size_t PC = reinterpret_cast<size_t>(caller_pc);
+ size_t Idx = (PC & 4095) | (I << 12);
+ Idx += HammingDistance;
+ ValueProfileMap.AddValue(Idx);
+ TORCW.Insert(Idx ^ Hash, Word(B1, Len), Word(B2, Len));
+}
+
+template <class T>
+ATTRIBUTE_TARGET_POPCNT ALWAYS_INLINE
+ATTRIBUTE_NO_SANITIZE_ALL
+void TracePC::HandleCmp(uintptr_t PC, T Arg1, T Arg2) {
+ uint64_t ArgXor = Arg1 ^ Arg2;
+ if (sizeof(T) == 4)
+ TORC4.Insert(ArgXor, Arg1, Arg2);
+ else if (sizeof(T) == 8)
+ TORC8.Insert(ArgXor, Arg1, Arg2);
+ uint64_t HammingDistance = Popcountll(ArgXor); // [0,64]
+ uint64_t AbsoluteDistance = (Arg1 == Arg2 ? 0 : Clzll(Arg1 - Arg2) + 1);
+ ValueProfileMap.AddValue(PC * 128 + HammingDistance);
+ ValueProfileMap.AddValue(PC * 128 + 64 + AbsoluteDistance);
+}
+
+static size_t InternalStrnlen(const char *S, size_t MaxLen) {
+ size_t Len = 0;
+ for (; Len < MaxLen && S[Len]; Len++) {}
+ return Len;
+}
+
+// Finds min of (strlen(S1), strlen(S2)).
+// Needed bacause one of these strings may actually be non-zero terminated.
+static size_t InternalStrnlen2(const char *S1, const char *S2) {
+ size_t Len = 0;
+ for (; S1[Len] && S2[Len]; Len++) {}
+ return Len;
+}
+
+void TracePC::ClearInlineCounters() {
+ IterateCounterRegions([](const Module::Region &R){
+ if (R.Enabled)
+ memset(R.Start, 0, R.Stop - R.Start);
+ });
+}
+
+ATTRIBUTE_NO_SANITIZE_ALL
+void TracePC::RecordInitialStack() {
+ int stack;
+ __sancov_lowest_stack = InitialStack = reinterpret_cast<uintptr_t>(&stack);
+}
+
+uintptr_t TracePC::GetMaxStackOffset() const {
+ return InitialStack - __sancov_lowest_stack; // Stack grows down
+}
+
+void WarnAboutDeprecatedInstrumentation(const char *flag) {
+ // Use RawPrint because Printf cannot be used on Windows before OutputFile is
+ // initialized.
+ RawPrint(flag);
+ RawPrint(
+ " is no longer supported by libFuzzer.\n"
+ "Please either migrate to a compiler that supports -fsanitize=fuzzer\n"
+ "or use an older version of libFuzzer\n");
+ exit(1);
+}
+
+} // namespace fuzzer
+
+extern "C" {
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+void __sanitizer_cov_trace_pc_guard(uint32_t *Guard) {
+ fuzzer::WarnAboutDeprecatedInstrumentation(
+ "-fsanitize-coverage=trace-pc-guard");
+}
+
+// Best-effort support for -fsanitize-coverage=trace-pc, which is available
+// in both Clang and GCC.
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+void __sanitizer_cov_trace_pc() {
+ fuzzer::WarnAboutDeprecatedInstrumentation("-fsanitize-coverage=trace-pc");
+}
+
+ATTRIBUTE_INTERFACE
+void __sanitizer_cov_trace_pc_guard_init(uint32_t *Start, uint32_t *Stop) {
+ fuzzer::WarnAboutDeprecatedInstrumentation(
+ "-fsanitize-coverage=trace-pc-guard");
+}
+
+ATTRIBUTE_INTERFACE
+void __sanitizer_cov_8bit_counters_init(uint8_t *Start, uint8_t *Stop) {
+ fuzzer::TPC.HandleInline8bitCountersInit(Start, Stop);
+}
+
+ATTRIBUTE_INTERFACE
+void __sanitizer_cov_pcs_init(const uintptr_t *pcs_beg,
+ const uintptr_t *pcs_end) {
+ fuzzer::TPC.HandlePCsInit(pcs_beg, pcs_end);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+void __sanitizer_cov_trace_pc_indir(uintptr_t Callee) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCallerCallee(PC, Callee);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_cmp8(uint64_t Arg1, uint64_t Arg2) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Arg1, Arg2);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+// Now the __sanitizer_cov_trace_const_cmp[1248] callbacks just mimic
+// the behaviour of __sanitizer_cov_trace_cmp[1248] ones. This, however,
+// should be changed later to make full use of instrumentation.
+void __sanitizer_cov_trace_const_cmp8(uint64_t Arg1, uint64_t Arg2) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Arg1, Arg2);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_cmp4(uint32_t Arg1, uint32_t Arg2) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Arg1, Arg2);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_const_cmp4(uint32_t Arg1, uint32_t Arg2) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Arg1, Arg2);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_cmp2(uint16_t Arg1, uint16_t Arg2) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Arg1, Arg2);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_const_cmp2(uint16_t Arg1, uint16_t Arg2) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Arg1, Arg2);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_cmp1(uint8_t Arg1, uint8_t Arg2) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Arg1, Arg2);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_const_cmp1(uint8_t Arg1, uint8_t Arg2) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Arg1, Arg2);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_switch(uint64_t Val, uint64_t *Cases) {
+ uint64_t N = Cases[0];
+ uint64_t ValSizeInBits = Cases[1];
+ uint64_t *Vals = Cases + 2;
+ // Skip the most common and the most boring case: all switch values are small.
+ // We may want to skip this at compile-time, but it will make the
+ // instrumentation less general.
+ if (Vals[N - 1] < 256)
+ return;
+ // Also skip small inputs values, they won't give good signal.
+ if (Val < 256)
+ return;
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ size_t i;
+ uint64_t Smaller = 0;
+ uint64_t Larger = ~(uint64_t)0;
+ // Find two switch values such that Smaller < Val < Larger.
+ // Use 0 and 0xfff..f as the defaults.
+ for (i = 0; i < N; i++) {
+ if (Val < Vals[i]) {
+ Larger = Vals[i];
+ break;
+ }
+ if (Val > Vals[i]) Smaller = Vals[i];
+ }
+
+ // Apply HandleCmp to {Val,Smaller} and {Val, Larger},
+ // use i as the PC modifier for HandleCmp.
+ if (ValSizeInBits == 16) {
+ fuzzer::TPC.HandleCmp(PC + 2 * i, static_cast<uint16_t>(Val),
+ (uint16_t)(Smaller));
+ fuzzer::TPC.HandleCmp(PC + 2 * i + 1, static_cast<uint16_t>(Val),
+ (uint16_t)(Larger));
+ } else if (ValSizeInBits == 32) {
+ fuzzer::TPC.HandleCmp(PC + 2 * i, static_cast<uint32_t>(Val),
+ (uint32_t)(Smaller));
+ fuzzer::TPC.HandleCmp(PC + 2 * i + 1, static_cast<uint32_t>(Val),
+ (uint32_t)(Larger));
+ } else {
+ fuzzer::TPC.HandleCmp(PC + 2*i, Val, Smaller);
+ fuzzer::TPC.HandleCmp(PC + 2*i + 1, Val, Larger);
+ }
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_div4(uint32_t Val) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Val, (uint32_t)0);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_div8(uint64_t Val) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Val, (uint64_t)0);
+}
+
+ATTRIBUTE_INTERFACE
+ATTRIBUTE_NO_SANITIZE_ALL
+ATTRIBUTE_TARGET_POPCNT
+void __sanitizer_cov_trace_gep(uintptr_t Idx) {
+ uintptr_t PC = reinterpret_cast<uintptr_t>(GET_CALLER_PC());
+ fuzzer::TPC.HandleCmp(PC, Idx, (uintptr_t)0);
+}
+
+ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY
+void __sanitizer_weak_hook_memcmp(void *caller_pc, const void *s1,
+ const void *s2, size_t n, int result) {
+ if (!fuzzer::RunningUserCallback) return;
+ if (result == 0) return; // No reason to mutate.
+ if (n <= 1) return; // Not interesting.
+ fuzzer::TPC.AddValueForMemcmp(caller_pc, s1, s2, n, /*StopAtZero*/false);
+}
+
+ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY
+void __sanitizer_weak_hook_strncmp(void *caller_pc, const char *s1,
+ const char *s2, size_t n, int result) {
+ if (!fuzzer::RunningUserCallback) return;
+ if (result == 0) return; // No reason to mutate.
+ size_t Len1 = fuzzer::InternalStrnlen(s1, n);
+ size_t Len2 = fuzzer::InternalStrnlen(s2, n);
+ n = std::min(n, Len1);
+ n = std::min(n, Len2);
+ if (n <= 1) return; // Not interesting.
+ fuzzer::TPC.AddValueForMemcmp(caller_pc, s1, s2, n, /*StopAtZero*/true);
+}
+
+ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY
+void __sanitizer_weak_hook_strcmp(void *caller_pc, const char *s1,
+ const char *s2, int result) {
+ if (!fuzzer::RunningUserCallback) return;
+ if (result == 0) return; // No reason to mutate.
+ size_t N = fuzzer::InternalStrnlen2(s1, s2);
+ if (N <= 1) return; // Not interesting.
+ fuzzer::TPC.AddValueForMemcmp(caller_pc, s1, s2, N, /*StopAtZero*/true);
+}
+
+ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY
+void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
+ const char *s2, size_t n, int result) {
+ if (!fuzzer::RunningUserCallback) return;
+ return __sanitizer_weak_hook_strncmp(called_pc, s1, s2, n, result);
+}
+
+ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY
+void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
+ const char *s2, int result) {
+ if (!fuzzer::RunningUserCallback) return;
+ return __sanitizer_weak_hook_strcmp(called_pc, s1, s2, result);
+}
+
+ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY
+void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
+ const char *s2, char *result) {
+ if (!fuzzer::RunningUserCallback) return;
+ fuzzer::TPC.MMT.Add(reinterpret_cast<const uint8_t *>(s2), strlen(s2));
+}
+
+ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY
+void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
+ const char *s2, char *result) {
+ if (!fuzzer::RunningUserCallback) return;
+ fuzzer::TPC.MMT.Add(reinterpret_cast<const uint8_t *>(s2), strlen(s2));
+}
+
+ATTRIBUTE_INTERFACE ATTRIBUTE_NO_SANITIZE_MEMORY
+void __sanitizer_weak_hook_memmem(void *called_pc, const void *s1, size_t len1,
+ const void *s2, size_t len2, void *result) {
+ if (!fuzzer::RunningUserCallback) return;
+ fuzzer::TPC.MMT.Add(reinterpret_cast<const uint8_t *>(s2), len2);
+}
+} // extern "C"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerTracePC.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerTracePC.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerTracePC.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerTracePC.h (revision 351984)
@@ -0,0 +1,292 @@
+//===- FuzzerTracePC.h - Internal header for the Fuzzer ---------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// fuzzer::TracePC
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_TRACE_PC
+#define LLVM_FUZZER_TRACE_PC
+
+#include "FuzzerDefs.h"
+#include "FuzzerDictionary.h"
+#include "FuzzerValueBitMap.h"
+
+#include <set>
+#include <unordered_map>
+
+namespace fuzzer {
+
+// TableOfRecentCompares (TORC) remembers the most recently performed
+// comparisons of type T.
+// We record the arguments of CMP instructions in this table unconditionally
+// because it seems cheaper this way than to compute some expensive
+// conditions inside __sanitizer_cov_trace_cmp*.
+// After the unit has been executed we may decide to use the contents of
+// this table to populate a Dictionary.
+template<class T, size_t kSizeT>
+struct TableOfRecentCompares {
+ static const size_t kSize = kSizeT;
+ struct Pair {
+ T A, B;
+ };
+ ATTRIBUTE_NO_SANITIZE_ALL
+ void Insert(size_t Idx, const T &Arg1, const T &Arg2) {
+ Idx = Idx % kSize;
+ Table[Idx].A = Arg1;
+ Table[Idx].B = Arg2;
+ }
+
+ Pair Get(size_t I) { return Table[I % kSize]; }
+
+ Pair Table[kSize];
+};
+
+template <size_t kSizeT>
+struct MemMemTable {
+ static const size_t kSize = kSizeT;
+ Word MemMemWords[kSize];
+ Word EmptyWord;
+
+ void Add(const uint8_t *Data, size_t Size) {
+ if (Size <= 2) return;
+ Size = std::min(Size, Word::GetMaxSize());
+ size_t Idx = SimpleFastHash(Data, Size) % kSize;
+ MemMemWords[Idx].Set(Data, Size);
+ }
+ const Word &Get(size_t Idx) {
+ for (size_t i = 0; i < kSize; i++) {
+ const Word &W = MemMemWords[(Idx + i) % kSize];
+ if (W.size()) return W;
+ }
+ EmptyWord.Set(nullptr, 0);
+ return EmptyWord;
+ }
+};
+
+class TracePC {
+ public:
+ void HandleInline8bitCountersInit(uint8_t *Start, uint8_t *Stop);
+ void HandlePCsInit(const uintptr_t *Start, const uintptr_t *Stop);
+ void HandleCallerCallee(uintptr_t Caller, uintptr_t Callee);
+ template <class T> void HandleCmp(uintptr_t PC, T Arg1, T Arg2);
+ size_t GetTotalPCCoverage();
+ void SetUseCounters(bool UC) { UseCounters = UC; }
+ void SetUseValueProfileMask(uint32_t VPMask) { UseValueProfileMask = VPMask; }
+ void SetPrintNewPCs(bool P) { DoPrintNewPCs = P; }
+ void SetPrintNewFuncs(size_t P) { NumPrintNewFuncs = P; }
+ void UpdateObservedPCs();
+ template <class Callback> void CollectFeatures(Callback CB) const;
+
+ void ResetMaps() {
+ ValueProfileMap.Reset();
+ ClearExtraCounters();
+ ClearInlineCounters();
+ }
+
+ void ClearInlineCounters();
+
+ void UpdateFeatureSet(size_t CurrentElementIdx, size_t CurrentElementSize);
+ void PrintFeatureSet();
+
+ void PrintModuleInfo();
+
+ void PrintCoverage();
+
+ template<class CallBack>
+ void IterateCoveredFunctions(CallBack CB);
+
+ void AddValueForMemcmp(void *caller_pc, const void *s1, const void *s2,
+ size_t n, bool StopAtZero);
+
+ TableOfRecentCompares<uint32_t, 32> TORC4;
+ TableOfRecentCompares<uint64_t, 32> TORC8;
+ TableOfRecentCompares<Word, 32> TORCW;
+ MemMemTable<1024> MMT;
+
+ void RecordInitialStack();
+ uintptr_t GetMaxStackOffset() const;
+
+ template<class CallBack>
+ void ForEachObservedPC(CallBack CB) {
+ for (auto PC : ObservedPCs)
+ CB(PC);
+ }
+
+ void SetFocusFunction(const std::string &FuncName);
+ bool ObservedFocusFunction();
+
+ void ProtectLazyCounters();
+ bool UnprotectLazyCounters(void *CounterPtr);
+
+ struct PCTableEntry {
+ uintptr_t PC, PCFlags;
+ };
+
+ uintptr_t PCTableEntryIdx(const PCTableEntry *TE);
+ const PCTableEntry *PCTableEntryByIdx(uintptr_t Idx);
+ static uintptr_t GetNextInstructionPc(uintptr_t PC);
+ bool PcIsFuncEntry(const PCTableEntry *TE) { return TE->PCFlags & 1; }
+
+private:
+ bool UseCounters = false;
+ uint32_t UseValueProfileMask = false;
+ bool DoPrintNewPCs = false;
+ size_t NumPrintNewFuncs = 0;
+
+ // Module represents the array of 8-bit counters split into regions
+ // such that every region, except maybe the first and the last one, is one
+ // full page.
+ struct Module {
+ struct Region {
+ uint8_t *Start, *Stop;
+ bool Enabled;
+ bool OneFullPage;
+ };
+ Region *Regions;
+ size_t NumRegions;
+ uint8_t *Start() { return Regions[0].Start; }
+ uint8_t *Stop() { return Regions[NumRegions - 1].Stop; }
+ size_t Size() { return Stop() - Start(); }
+ size_t Idx(uint8_t *P) {
+ assert(P >= Start() && P < Stop());
+ return P - Start();
+ }
+ };
+
+ Module Modules[4096];
+ size_t NumModules; // linker-initialized.
+ size_t NumInline8bitCounters;
+
+ template <class Callback>
+ void IterateCounterRegions(Callback CB) {
+ for (size_t m = 0; m < NumModules; m++)
+ for (size_t r = 0; r < Modules[m].NumRegions; r++)
+ CB(Modules[m].Regions[r]);
+ }
+
+ struct { const PCTableEntry *Start, *Stop; } ModulePCTable[4096];
+ size_t NumPCTables;
+ size_t NumPCsInPCTables;
+
+ Set<const PCTableEntry*> ObservedPCs;
+ std::unordered_map<uintptr_t, uintptr_t> ObservedFuncs; // PC => Counter.
+
+ uint8_t *FocusFunctionCounterPtr = nullptr;
+
+ ValueBitMap ValueProfileMap;
+ uintptr_t InitialStack;
+};
+
+template <class Callback>
+// void Callback(size_t FirstFeature, size_t Idx, uint8_t Value);
+ATTRIBUTE_NO_SANITIZE_ALL
+size_t ForEachNonZeroByte(const uint8_t *Begin, const uint8_t *End,
+ size_t FirstFeature, Callback Handle8bitCounter) {
+ typedef uintptr_t LargeType;
+ const size_t Step = sizeof(LargeType) / sizeof(uint8_t);
+ const size_t StepMask = Step - 1;
+ auto P = Begin;
+ // Iterate by 1 byte until either the alignment boundary or the end.
+ for (; reinterpret_cast<uintptr_t>(P) & StepMask && P < End; P++)
+ if (uint8_t V = *P)
+ Handle8bitCounter(FirstFeature, P - Begin, V);
+
+ // Iterate by Step bytes at a time.
+ for (; P < End; P += Step)
+ if (LargeType Bundle = *reinterpret_cast<const LargeType *>(P))
+ for (size_t I = 0; I < Step; I++, Bundle >>= 8)
+ if (uint8_t V = Bundle & 0xff)
+ Handle8bitCounter(FirstFeature, P - Begin + I, V);
+
+ // Iterate by 1 byte until the end.
+ for (; P < End; P++)
+ if (uint8_t V = *P)
+ Handle8bitCounter(FirstFeature, P - Begin, V);
+ return End - Begin;
+}
+
+// Given a non-zero Counter returns a number in the range [0,7].
+template<class T>
+unsigned CounterToFeature(T Counter) {
+ // Returns a feature number by placing Counters into buckets as illustrated
+ // below.
+ //
+ // Counter bucket: [1] [2] [3] [4-7] [8-15] [16-31] [32-127] [128+]
+ // Feature number: 0 1 2 3 4 5 6 7
+ //
+ // This is a heuristic taken from AFL (see
+ // http://lcamtuf.coredump.cx/afl/technical_details.txt).
+ //
+ // This implementation may change in the future so clients should
+ // not rely on it.
+ assert(Counter);
+ unsigned Bit = 0;
+ /**/ if (Counter >= 128) Bit = 7;
+ else if (Counter >= 32) Bit = 6;
+ else if (Counter >= 16) Bit = 5;
+ else if (Counter >= 8) Bit = 4;
+ else if (Counter >= 4) Bit = 3;
+ else if (Counter >= 3) Bit = 2;
+ else if (Counter >= 2) Bit = 1;
+ return Bit;
+}
+
+template <class Callback> // void Callback(size_t Feature)
+ATTRIBUTE_NO_SANITIZE_ADDRESS
+ATTRIBUTE_NOINLINE
+void TracePC::CollectFeatures(Callback HandleFeature) const {
+ auto Handle8bitCounter = [&](size_t FirstFeature,
+ size_t Idx, uint8_t Counter) {
+ if (UseCounters)
+ HandleFeature(FirstFeature + Idx * 8 + CounterToFeature(Counter));
+ else
+ HandleFeature(FirstFeature + Idx);
+ };
+
+ size_t FirstFeature = 0;
+
+ for (size_t i = 0; i < NumModules; i++) {
+ for (size_t r = 0; r < Modules[i].NumRegions; r++) {
+ if (!Modules[i].Regions[r].Enabled) continue;
+ FirstFeature += 8 * ForEachNonZeroByte(Modules[i].Regions[r].Start,
+ Modules[i].Regions[r].Stop,
+ FirstFeature, Handle8bitCounter);
+ }
+ }
+
+ FirstFeature +=
+ 8 * ForEachNonZeroByte(ExtraCountersBegin(), ExtraCountersEnd(),
+ FirstFeature, Handle8bitCounter);
+
+ if (UseValueProfileMask) {
+ ValueProfileMap.ForEach([&](size_t Idx) {
+ HandleFeature(FirstFeature + Idx);
+ });
+ FirstFeature += ValueProfileMap.SizeInBits();
+ }
+
+ // Step function, grows similar to 8 * Log_2(A).
+ auto StackDepthStepFunction = [](uint32_t A) -> uint32_t {
+ if (!A) return A;
+ uint32_t Log2 = Log(A);
+ if (Log2 < 3) return A;
+ Log2 -= 3;
+ return (Log2 + 1) * 8 + ((A >> Log2) & 7);
+ };
+ assert(StackDepthStepFunction(1024) == 64);
+ assert(StackDepthStepFunction(1024 * 4) == 80);
+ assert(StackDepthStepFunction(1024 * 1024) == 144);
+
+ if (auto MaxStackOffset = GetMaxStackOffset())
+ HandleFeature(FirstFeature + StackDepthStepFunction(MaxStackOffset / 8));
+}
+
+extern TracePC TPC;
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_TRACE_PC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerTracePC.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtil.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtil.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtil.cpp (revision 351984)
@@ -0,0 +1,231 @@
+//===- FuzzerUtil.cpp - Misc utils ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Misc utils.
+//===----------------------------------------------------------------------===//
+
+#include "FuzzerUtil.h"
+#include "FuzzerIO.h"
+#include "FuzzerInternal.h"
+#include <cassert>
+#include <chrono>
+#include <cstring>
+#include <errno.h>
+#include <mutex>
+#include <signal.h>
+#include <sstream>
+#include <stdio.h>
+#include <sys/types.h>
+#include <thread>
+
+namespace fuzzer {
+
+void PrintHexArray(const uint8_t *Data, size_t Size,
+ const char *PrintAfter) {
+ for (size_t i = 0; i < Size; i++)
+ Printf("0x%x,", (unsigned)Data[i]);
+ Printf("%s", PrintAfter);
+}
+
+void Print(const Unit &v, const char *PrintAfter) {
+ PrintHexArray(v.data(), v.size(), PrintAfter);
+}
+
+void PrintASCIIByte(uint8_t Byte) {
+ if (Byte == '\\')
+ Printf("\\\\");
+ else if (Byte == '"')
+ Printf("\\\"");
+ else if (Byte >= 32 && Byte < 127)
+ Printf("%c", Byte);
+ else
+ Printf("\\x%02x", Byte);
+}
+
+void PrintASCII(const uint8_t *Data, size_t Size, const char *PrintAfter) {
+ for (size_t i = 0; i < Size; i++)
+ PrintASCIIByte(Data[i]);
+ Printf("%s", PrintAfter);
+}
+
+void PrintASCII(const Unit &U, const char *PrintAfter) {
+ PrintASCII(U.data(), U.size(), PrintAfter);
+}
+
+bool ToASCII(uint8_t *Data, size_t Size) {
+ bool Changed = false;
+ for (size_t i = 0; i < Size; i++) {
+ uint8_t &X = Data[i];
+ auto NewX = X;
+ NewX &= 127;
+ if (!isspace(NewX) && !isprint(NewX))
+ NewX = ' ';
+ Changed |= NewX != X;
+ X = NewX;
+ }
+ return Changed;
+}
+
+bool IsASCII(const Unit &U) { return IsASCII(U.data(), U.size()); }
+
+bool IsASCII(const uint8_t *Data, size_t Size) {
+ for (size_t i = 0; i < Size; i++)
+ if (!(isprint(Data[i]) || isspace(Data[i]))) return false;
+ return true;
+}
+
+bool ParseOneDictionaryEntry(const std::string &Str, Unit *U) {
+ U->clear();
+ if (Str.empty()) return false;
+ size_t L = 0, R = Str.size() - 1; // We are parsing the range [L,R].
+ // Skip spaces from both sides.
+ while (L < R && isspace(Str[L])) L++;
+ while (R > L && isspace(Str[R])) R--;
+ if (R - L < 2) return false;
+ // Check the closing "
+ if (Str[R] != '"') return false;
+ R--;
+ // Find the opening "
+ while (L < R && Str[L] != '"') L++;
+ if (L >= R) return false;
+ assert(Str[L] == '\"');
+ L++;
+ assert(L <= R);
+ for (size_t Pos = L; Pos <= R; Pos++) {
+ uint8_t V = (uint8_t)Str[Pos];
+ if (!isprint(V) && !isspace(V)) return false;
+ if (V =='\\') {
+ // Handle '\\'
+ if (Pos + 1 <= R && (Str[Pos + 1] == '\\' || Str[Pos + 1] == '"')) {
+ U->push_back(Str[Pos + 1]);
+ Pos++;
+ continue;
+ }
+ // Handle '\xAB'
+ if (Pos + 3 <= R && Str[Pos + 1] == 'x'
+ && isxdigit(Str[Pos + 2]) && isxdigit(Str[Pos + 3])) {
+ char Hex[] = "0xAA";
+ Hex[2] = Str[Pos + 2];
+ Hex[3] = Str[Pos + 3];
+ U->push_back(strtol(Hex, nullptr, 16));
+ Pos += 3;
+ continue;
+ }
+ return false; // Invalid escape.
+ } else {
+ // Any other character.
+ U->push_back(V);
+ }
+ }
+ return true;
+}
+
+bool ParseDictionaryFile(const std::string &Text, Vector<Unit> *Units) {
+ if (Text.empty()) {
+ Printf("ParseDictionaryFile: file does not exist or is empty\n");
+ return false;
+ }
+ std::istringstream ISS(Text);
+ Units->clear();
+ Unit U;
+ int LineNo = 0;
+ std::string S;
+ while (std::getline(ISS, S, '\n')) {
+ LineNo++;
+ size_t Pos = 0;
+ while (Pos < S.size() && isspace(S[Pos])) Pos++; // Skip spaces.
+ if (Pos == S.size()) continue; // Empty line.
+ if (S[Pos] == '#') continue; // Comment line.
+ if (ParseOneDictionaryEntry(S, &U)) {
+ Units->push_back(U);
+ } else {
+ Printf("ParseDictionaryFile: error in line %d\n\t\t%s\n", LineNo,
+ S.c_str());
+ return false;
+ }
+ }
+ return true;
+}
+
+std::string Base64(const Unit &U) {
+ static const char Table[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "abcdefghijklmnopqrstuvwxyz"
+ "0123456789+/";
+ std::string Res;
+ size_t i;
+ for (i = 0; i + 2 < U.size(); i += 3) {
+ uint32_t x = (U[i] << 16) + (U[i + 1] << 8) + U[i + 2];
+ Res += Table[(x >> 18) & 63];
+ Res += Table[(x >> 12) & 63];
+ Res += Table[(x >> 6) & 63];
+ Res += Table[x & 63];
+ }
+ if (i + 1 == U.size()) {
+ uint32_t x = (U[i] << 16);
+ Res += Table[(x >> 18) & 63];
+ Res += Table[(x >> 12) & 63];
+ Res += "==";
+ } else if (i + 2 == U.size()) {
+ uint32_t x = (U[i] << 16) + (U[i + 1] << 8);
+ Res += Table[(x >> 18) & 63];
+ Res += Table[(x >> 12) & 63];
+ Res += Table[(x >> 6) & 63];
+ Res += "=";
+ }
+ return Res;
+}
+
+static std::mutex SymbolizeMutex;
+
+std::string DescribePC(const char *SymbolizedFMT, uintptr_t PC) {
+ std::unique_lock<std::mutex> l(SymbolizeMutex, std::try_to_lock);
+ if (!EF->__sanitizer_symbolize_pc || !l.owns_lock())
+ return "<can not symbolize>";
+ char PcDescr[1024] = {};
+ EF->__sanitizer_symbolize_pc(reinterpret_cast<void*>(PC),
+ SymbolizedFMT, PcDescr, sizeof(PcDescr));
+ PcDescr[sizeof(PcDescr) - 1] = 0; // Just in case.
+ return PcDescr;
+}
+
+void PrintPC(const char *SymbolizedFMT, const char *FallbackFMT, uintptr_t PC) {
+ if (EF->__sanitizer_symbolize_pc)
+ Printf("%s", DescribePC(SymbolizedFMT, PC).c_str());
+ else
+ Printf(FallbackFMT, PC);
+}
+
+void PrintStackTrace() {
+ std::unique_lock<std::mutex> l(SymbolizeMutex, std::try_to_lock);
+ if (EF->__sanitizer_print_stack_trace && l.owns_lock())
+ EF->__sanitizer_print_stack_trace();
+}
+
+void PrintMemoryProfile() {
+ std::unique_lock<std::mutex> l(SymbolizeMutex, std::try_to_lock);
+ if (EF->__sanitizer_print_memory_profile && l.owns_lock())
+ EF->__sanitizer_print_memory_profile(95, 8);
+}
+
+unsigned NumberOfCpuCores() {
+ unsigned N = std::thread::hardware_concurrency();
+ if (!N) {
+ Printf("WARNING: std::thread::hardware_concurrency not well defined for "
+ "your platform. Assuming CPU count of 1.\n");
+ N = 1;
+ }
+ return N;
+}
+
+size_t SimpleFastHash(const uint8_t *Data, size_t Size) {
+ size_t Res = 0;
+ for (size_t i = 0; i < Size; i++)
+ Res = Res * 11 + Data[i];
+ return Res;
+}
+
+} // namespace fuzzer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtil.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtil.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtil.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtil.h (revision 351984)
@@ -0,0 +1,108 @@
+//===- FuzzerUtil.h - Internal header for the Fuzzer Utils ------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Util functions.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_UTIL_H
+#define LLVM_FUZZER_UTIL_H
+
+#include "FuzzerBuiltins.h"
+#include "FuzzerBuiltinsMsvc.h"
+#include "FuzzerCommand.h"
+#include "FuzzerDefs.h"
+
+namespace fuzzer {
+
+void PrintHexArray(const Unit &U, const char *PrintAfter = "");
+
+void PrintHexArray(const uint8_t *Data, size_t Size,
+ const char *PrintAfter = "");
+
+void PrintASCII(const uint8_t *Data, size_t Size, const char *PrintAfter = "");
+
+void PrintASCII(const Unit &U, const char *PrintAfter = "");
+
+// Changes U to contain only ASCII (isprint+isspace) characters.
+// Returns true iff U has been changed.
+bool ToASCII(uint8_t *Data, size_t Size);
+
+bool IsASCII(const Unit &U);
+
+bool IsASCII(const uint8_t *Data, size_t Size);
+
+std::string Base64(const Unit &U);
+
+void PrintPC(const char *SymbolizedFMT, const char *FallbackFMT, uintptr_t PC);
+
+std::string DescribePC(const char *SymbolizedFMT, uintptr_t PC);
+
+void PrintStackTrace();
+
+void PrintMemoryProfile();
+
+unsigned NumberOfCpuCores();
+
+// Platform specific functions.
+void SetSignalHandler(const FuzzingOptions& Options);
+
+void SleepSeconds(int Seconds);
+
+bool Mprotect(void *Ptr, size_t Size, bool AllowReadWrite);
+
+unsigned long GetPid();
+
+size_t GetPeakRSSMb();
+
+int ExecuteCommand(const Command &Cmd);
+
+FILE *OpenProcessPipe(const char *Command, const char *Mode);
+
+const void *SearchMemory(const void *haystack, size_t haystacklen,
+ const void *needle, size_t needlelen);
+
+std::string CloneArgsWithoutX(const Vector<std::string> &Args,
+ const char *X1, const char *X2);
+
+inline std::string CloneArgsWithoutX(const Vector<std::string> &Args,
+ const char *X) {
+ return CloneArgsWithoutX(Args, X, X);
+}
+
+inline std::pair<std::string, std::string> SplitBefore(std::string X,
+ std::string S) {
+ auto Pos = S.find(X);
+ if (Pos == std::string::npos)
+ return std::make_pair(S, "");
+ return std::make_pair(S.substr(0, Pos), S.substr(Pos));
+}
+
+std::string DisassembleCmd(const std::string &FileName);
+
+std::string SearchRegexCmd(const std::string &Regex);
+
+size_t SimpleFastHash(const uint8_t *Data, size_t Size);
+
+inline uint32_t Log(uint32_t X) { return 32 - Clz(X) - 1; }
+
+inline size_t PageSize() { return 4096; }
+inline uint8_t *RoundUpByPage(uint8_t *P) {
+ uintptr_t X = reinterpret_cast<uintptr_t>(P);
+ size_t Mask = PageSize() - 1;
+ X = (X + Mask) & ~Mask;
+ return reinterpret_cast<uint8_t *>(X);
+}
+inline uint8_t *RoundDownByPage(uint8_t *P) {
+ uintptr_t X = reinterpret_cast<uintptr_t>(P);
+ size_t Mask = PageSize() - 1;
+ X = X & ~Mask;
+ return reinterpret_cast<uint8_t *>(X);
+}
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_UTIL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtil.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilDarwin.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilDarwin.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilDarwin.cpp (revision 351984)
@@ -0,0 +1,161 @@
+//===- FuzzerUtilDarwin.cpp - Misc utils ----------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Misc utils for Darwin.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+#if LIBFUZZER_APPLE
+#include "FuzzerCommand.h"
+#include "FuzzerIO.h"
+#include <mutex>
+#include <signal.h>
+#include <spawn.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/wait.h>
+
+// There is no header for this on macOS so declare here
+extern "C" char **environ;
+
+namespace fuzzer {
+
+static std::mutex SignalMutex;
+// Global variables used to keep track of how signal handling should be
+// restored. They should **not** be accessed without holding `SignalMutex`.
+static int ActiveThreadCount = 0;
+static struct sigaction OldSigIntAction;
+static struct sigaction OldSigQuitAction;
+static sigset_t OldBlockedSignalsSet;
+
+// This is a reimplementation of Libc's `system()`. On Darwin the Libc
+// implementation contains a mutex which prevents it from being used
+// concurrently. This implementation **can** be used concurrently. It sets the
+// signal handlers when the first thread enters and restores them when the last
+// thread finishes execution of the function and ensures this is not racey by
+// using a mutex.
+int ExecuteCommand(const Command &Cmd) {
+ std::string CmdLine = Cmd.toString();
+ posix_spawnattr_t SpawnAttributes;
+ if (posix_spawnattr_init(&SpawnAttributes))
+ return -1;
+ // Block and ignore signals of the current process when the first thread
+ // enters.
+ {
+ std::lock_guard<std::mutex> Lock(SignalMutex);
+ if (ActiveThreadCount == 0) {
+ static struct sigaction IgnoreSignalAction;
+ sigset_t BlockedSignalsSet;
+ memset(&IgnoreSignalAction, 0, sizeof(IgnoreSignalAction));
+ IgnoreSignalAction.sa_handler = SIG_IGN;
+
+ if (sigaction(SIGINT, &IgnoreSignalAction, &OldSigIntAction) == -1) {
+ Printf("Failed to ignore SIGINT\n");
+ (void)posix_spawnattr_destroy(&SpawnAttributes);
+ return -1;
+ }
+ if (sigaction(SIGQUIT, &IgnoreSignalAction, &OldSigQuitAction) == -1) {
+ Printf("Failed to ignore SIGQUIT\n");
+ // Try our best to restore the signal handlers.
+ (void)sigaction(SIGINT, &OldSigIntAction, NULL);
+ (void)posix_spawnattr_destroy(&SpawnAttributes);
+ return -1;
+ }
+
+ (void)sigemptyset(&BlockedSignalsSet);
+ (void)sigaddset(&BlockedSignalsSet, SIGCHLD);
+ if (sigprocmask(SIG_BLOCK, &BlockedSignalsSet, &OldBlockedSignalsSet) ==
+ -1) {
+ Printf("Failed to block SIGCHLD\n");
+ // Try our best to restore the signal handlers.
+ (void)sigaction(SIGQUIT, &OldSigQuitAction, NULL);
+ (void)sigaction(SIGINT, &OldSigIntAction, NULL);
+ (void)posix_spawnattr_destroy(&SpawnAttributes);
+ return -1;
+ }
+ }
+ ++ActiveThreadCount;
+ }
+
+ // NOTE: Do not introduce any new `return` statements past this
+ // point. It is important that `ActiveThreadCount` always be decremented
+ // when leaving this function.
+
+ // Make sure the child process uses the default handlers for the
+ // following signals rather than inheriting what the parent has.
+ sigset_t DefaultSigSet;
+ (void)sigemptyset(&DefaultSigSet);
+ (void)sigaddset(&DefaultSigSet, SIGQUIT);
+ (void)sigaddset(&DefaultSigSet, SIGINT);
+ (void)posix_spawnattr_setsigdefault(&SpawnAttributes, &DefaultSigSet);
+ // Make sure the child process doesn't block SIGCHLD
+ (void)posix_spawnattr_setsigmask(&SpawnAttributes, &OldBlockedSignalsSet);
+ short SpawnFlags = POSIX_SPAWN_SETSIGDEF | POSIX_SPAWN_SETSIGMASK;
+ (void)posix_spawnattr_setflags(&SpawnAttributes, SpawnFlags);
+
+ pid_t Pid;
+ char **Environ = environ; // Read from global
+ const char *CommandCStr = CmdLine.c_str();
+ char *const Argv[] = {
+ strdup("sh"),
+ strdup("-c"),
+ strdup(CommandCStr),
+ NULL
+ };
+ int ErrorCode = 0, ProcessStatus = 0;
+ // FIXME: We probably shouldn't hardcode the shell path.
+ ErrorCode = posix_spawn(&Pid, "/bin/sh", NULL, &SpawnAttributes,
+ Argv, Environ);
+ (void)posix_spawnattr_destroy(&SpawnAttributes);
+ if (!ErrorCode) {
+ pid_t SavedPid = Pid;
+ do {
+ // Repeat until call completes uninterrupted.
+ Pid = waitpid(SavedPid, &ProcessStatus, /*options=*/0);
+ } while (Pid == -1 && errno == EINTR);
+ if (Pid == -1) {
+ // Fail for some other reason.
+ ProcessStatus = -1;
+ }
+ } else if (ErrorCode == ENOMEM || ErrorCode == EAGAIN) {
+ // Fork failure.
+ ProcessStatus = -1;
+ } else {
+ // Shell execution failure.
+ ProcessStatus = W_EXITCODE(127, 0);
+ }
+ for (unsigned i = 0, n = sizeof(Argv) / sizeof(Argv[0]); i < n; ++i)
+ free(Argv[i]);
+
+ // Restore the signal handlers of the current process when the last thread
+ // using this function finishes.
+ {
+ std::lock_guard<std::mutex> Lock(SignalMutex);
+ --ActiveThreadCount;
+ if (ActiveThreadCount == 0) {
+ bool FailedRestore = false;
+ if (sigaction(SIGINT, &OldSigIntAction, NULL) == -1) {
+ Printf("Failed to restore SIGINT handling\n");
+ FailedRestore = true;
+ }
+ if (sigaction(SIGQUIT, &OldSigQuitAction, NULL) == -1) {
+ Printf("Failed to restore SIGQUIT handling\n");
+ FailedRestore = true;
+ }
+ if (sigprocmask(SIG_BLOCK, &OldBlockedSignalsSet, NULL) == -1) {
+ Printf("Failed to unblock SIGCHLD\n");
+ FailedRestore = true;
+ }
+ if (FailedRestore)
+ ProcessStatus = -1;
+ }
+ }
+ return ProcessStatus;
+}
+
+} // namespace fuzzer
+
+#endif // LIBFUZZER_APPLE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilDarwin.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilFuchsia.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilFuchsia.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilFuchsia.cpp (revision 351984)
@@ -0,0 +1,494 @@
+//===- FuzzerUtilFuchsia.cpp - Misc utils for Fuchsia. --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Misc utils implementation using Fuchsia/Zircon APIs.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+
+#if LIBFUZZER_FUCHSIA
+
+#include "FuzzerInternal.h"
+#include "FuzzerUtil.h"
+#include <cassert>
+#include <cerrno>
+#include <cinttypes>
+#include <cstdint>
+#include <fcntl.h>
+#include <lib/fdio/spawn.h>
+#include <string>
+#include <sys/select.h>
+#include <thread>
+#include <unistd.h>
+#include <zircon/errors.h>
+#include <zircon/process.h>
+#include <zircon/sanitizer.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+#include <zircon/syscalls/debug.h>
+#include <zircon/syscalls/exception.h>
+#include <zircon/syscalls/object.h>
+#include <zircon/types.h>
+
+namespace fuzzer {
+
+// Given that Fuchsia doesn't have the POSIX signals that libFuzzer was written
+// around, the general approach is to spin up dedicated threads to watch for
+// each requested condition (alarm, interrupt, crash). Of these, the crash
+// handler is the most involved, as it requires resuming the crashed thread in
+// order to invoke the sanitizers to get the needed state.
+
+// Forward declaration of assembly trampoline needed to resume crashed threads.
+// This appears to have external linkage to C++, which is why it's not in the
+// anonymous namespace. The assembly definition inside MakeTrampoline()
+// actually defines the symbol with internal linkage only.
+void CrashTrampolineAsm() __asm__("CrashTrampolineAsm");
+
+namespace {
+
+// Helper function to handle Zircon syscall failures.
+void ExitOnErr(zx_status_t Status, const char *Syscall) {
+ if (Status != ZX_OK) {
+ Printf("libFuzzer: %s failed: %s\n", Syscall,
+ _zx_status_get_string(Status));
+ exit(1);
+ }
+}
+
+void AlarmHandler(int Seconds) {
+ while (true) {
+ SleepSeconds(Seconds);
+ Fuzzer::StaticAlarmCallback();
+ }
+}
+
+void InterruptHandler() {
+ fd_set readfds;
+ // Ctrl-C sends ETX in Zircon.
+ do {
+ FD_ZERO(&readfds);
+ FD_SET(STDIN_FILENO, &readfds);
+ select(STDIN_FILENO + 1, &readfds, nullptr, nullptr, nullptr);
+ } while(!FD_ISSET(STDIN_FILENO, &readfds) || getchar() != 0x03);
+ Fuzzer::StaticInterruptCallback();
+}
+
+// For the crash handler, we need to call Fuzzer::StaticCrashSignalCallback
+// without POSIX signal handlers. To achieve this, we use an assembly function
+// to add the necessary CFI unwinding information and a C function to bridge
+// from that back into C++.
+
+// FIXME: This works as a short-term solution, but this code really shouldn't be
+// architecture dependent. A better long term solution is to implement remote
+// unwinding and expose the necessary APIs through sanitizer_common and/or ASAN
+// to allow the exception handling thread to gather the crash state directly.
+//
+// Alternatively, Fuchsia may in future actually implement basic signal
+// handling for the machine trap signals.
+#if defined(__x86_64__)
+#define FOREACH_REGISTER(OP_REG, OP_NUM) \
+ OP_REG(rax) \
+ OP_REG(rbx) \
+ OP_REG(rcx) \
+ OP_REG(rdx) \
+ OP_REG(rsi) \
+ OP_REG(rdi) \
+ OP_REG(rbp) \
+ OP_REG(rsp) \
+ OP_REG(r8) \
+ OP_REG(r9) \
+ OP_REG(r10) \
+ OP_REG(r11) \
+ OP_REG(r12) \
+ OP_REG(r13) \
+ OP_REG(r14) \
+ OP_REG(r15) \
+ OP_REG(rip)
+
+#elif defined(__aarch64__)
+#define FOREACH_REGISTER(OP_REG, OP_NUM) \
+ OP_NUM(0) \
+ OP_NUM(1) \
+ OP_NUM(2) \
+ OP_NUM(3) \
+ OP_NUM(4) \
+ OP_NUM(5) \
+ OP_NUM(6) \
+ OP_NUM(7) \
+ OP_NUM(8) \
+ OP_NUM(9) \
+ OP_NUM(10) \
+ OP_NUM(11) \
+ OP_NUM(12) \
+ OP_NUM(13) \
+ OP_NUM(14) \
+ OP_NUM(15) \
+ OP_NUM(16) \
+ OP_NUM(17) \
+ OP_NUM(18) \
+ OP_NUM(19) \
+ OP_NUM(20) \
+ OP_NUM(21) \
+ OP_NUM(22) \
+ OP_NUM(23) \
+ OP_NUM(24) \
+ OP_NUM(25) \
+ OP_NUM(26) \
+ OP_NUM(27) \
+ OP_NUM(28) \
+ OP_NUM(29) \
+ OP_NUM(30) \
+ OP_REG(sp)
+
+#else
+#error "Unsupported architecture for fuzzing on Fuchsia"
+#endif
+
+// Produces a CFI directive for the named or numbered register.
+#define CFI_OFFSET_REG(reg) ".cfi_offset " #reg ", %c[" #reg "]\n"
+#define CFI_OFFSET_NUM(num) CFI_OFFSET_REG(r##num)
+
+// Produces an assembler input operand for the named or numbered register.
+#define ASM_OPERAND_REG(reg) \
+ [reg] "i"(offsetof(zx_thread_state_general_regs_t, reg)),
+#define ASM_OPERAND_NUM(num) \
+ [r##num] "i"(offsetof(zx_thread_state_general_regs_t, r[num])),
+
+// Trampoline to bridge from the assembly below to the static C++ crash
+// callback.
+__attribute__((noreturn))
+static void StaticCrashHandler() {
+ Fuzzer::StaticCrashSignalCallback();
+ for (;;) {
+ _Exit(1);
+ }
+}
+
+// Creates the trampoline with the necessary CFI information to unwind through
+// to the crashing call stack. The attribute is necessary because the function
+// is never called; it's just a container around the assembly to allow it to
+// use operands for compile-time computed constants.
+__attribute__((used))
+void MakeTrampoline() {
+ __asm__(".cfi_endproc\n"
+ ".pushsection .text.CrashTrampolineAsm\n"
+ ".type CrashTrampolineAsm,STT_FUNC\n"
+"CrashTrampolineAsm:\n"
+ ".cfi_startproc simple\n"
+ ".cfi_signal_frame\n"
+#if defined(__x86_64__)
+ ".cfi_return_column rip\n"
+ ".cfi_def_cfa rsp, 0\n"
+ FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
+ "call %c[StaticCrashHandler]\n"
+ "ud2\n"
+#elif defined(__aarch64__)
+ ".cfi_return_column 33\n"
+ ".cfi_def_cfa sp, 0\n"
+ ".cfi_offset 33, %c[pc]\n"
+ FOREACH_REGISTER(CFI_OFFSET_REG, CFI_OFFSET_NUM)
+ "bl %[StaticCrashHandler]\n"
+#else
+#error "Unsupported architecture for fuzzing on Fuchsia"
+#endif
+ ".cfi_endproc\n"
+ ".size CrashTrampolineAsm, . - CrashTrampolineAsm\n"
+ ".popsection\n"
+ ".cfi_startproc\n"
+ : // No outputs
+ : FOREACH_REGISTER(ASM_OPERAND_REG, ASM_OPERAND_NUM)
+#if defined(__aarch64__)
+ ASM_OPERAND_REG(pc)
+#endif
+ [StaticCrashHandler] "i" (StaticCrashHandler));
+}
+
+void CrashHandler(zx_handle_t *Event) {
+ // This structure is used to ensure we close handles to objects we create in
+ // this handler.
+ struct ScopedHandle {
+ ~ScopedHandle() { _zx_handle_close(Handle); }
+ zx_handle_t Handle = ZX_HANDLE_INVALID;
+ };
+
+ // Create the exception channel. We need to claim to be a "debugger" so the
+ // kernel will allow us to modify and resume dying threads (see below). Once
+ // the channel is set, we can signal the main thread to continue and wait
+ // for the exception to arrive.
+ ScopedHandle Channel;
+ zx_handle_t Self = _zx_process_self();
+ ExitOnErr(_zx_task_create_exception_channel(
+ Self, ZX_EXCEPTION_CHANNEL_DEBUGGER, &Channel.Handle),
+ "_zx_task_create_exception_channel");
+
+ ExitOnErr(_zx_object_signal(*Event, 0, ZX_USER_SIGNAL_0),
+ "_zx_object_signal");
+
+ // This thread lives as long as the process in order to keep handling
+ // crashes. In practice, the first crashed thread to reach the end of the
+ // StaticCrashHandler will end the process.
+ while (true) {
+ ExitOnErr(_zx_object_wait_one(Channel.Handle, ZX_CHANNEL_READABLE,
+ ZX_TIME_INFINITE, nullptr),
+ "_zx_object_wait_one");
+
+ zx_exception_info_t ExceptionInfo;
+ ScopedHandle Exception;
+ ExitOnErr(_zx_channel_read(Channel.Handle, 0, &ExceptionInfo,
+ &Exception.Handle, sizeof(ExceptionInfo), 1,
+ nullptr, nullptr),
+ "_zx_channel_read");
+
+ // Ignore informational synthetic exceptions.
+ if (ZX_EXCP_THREAD_STARTING == ExceptionInfo.type ||
+ ZX_EXCP_THREAD_EXITING == ExceptionInfo.type ||
+ ZX_EXCP_PROCESS_STARTING == ExceptionInfo.type) {
+ continue;
+ }
+
+ // At this point, we want to get the state of the crashing thread, but
+ // libFuzzer and the sanitizers assume this will happen from that same
+ // thread via a POSIX signal handler. "Resurrecting" the thread in the
+ // middle of the appropriate callback is as simple as forcibly setting the
+ // instruction pointer/program counter, provided we NEVER EVER return from
+ // that function (since otherwise our stack will not be valid).
+ ScopedHandle Thread;
+ ExitOnErr(_zx_exception_get_thread(Exception.Handle, &Thread.Handle),
+ "_zx_exception_get_thread");
+
+ zx_thread_state_general_regs_t GeneralRegisters;
+ ExitOnErr(_zx_thread_read_state(Thread.Handle, ZX_THREAD_STATE_GENERAL_REGS,
+ &GeneralRegisters,
+ sizeof(GeneralRegisters)),
+ "_zx_thread_read_state");
+
+ // To unwind properly, we need to push the crashing thread's register state
+ // onto the stack and jump into a trampoline with CFI instructions on how
+ // to restore it.
+#if defined(__x86_64__)
+ uintptr_t StackPtr =
+ (GeneralRegisters.rsp - (128 + sizeof(GeneralRegisters))) &
+ -(uintptr_t)16;
+ __unsanitized_memcpy(reinterpret_cast<void *>(StackPtr), &GeneralRegisters,
+ sizeof(GeneralRegisters));
+ GeneralRegisters.rsp = StackPtr;
+ GeneralRegisters.rip = reinterpret_cast<zx_vaddr_t>(CrashTrampolineAsm);
+
+#elif defined(__aarch64__)
+ uintptr_t StackPtr =
+ (GeneralRegisters.sp - sizeof(GeneralRegisters)) & -(uintptr_t)16;
+ __unsanitized_memcpy(reinterpret_cast<void *>(StackPtr), &GeneralRegisters,
+ sizeof(GeneralRegisters));
+ GeneralRegisters.sp = StackPtr;
+ GeneralRegisters.pc = reinterpret_cast<zx_vaddr_t>(CrashTrampolineAsm);
+
+#else
+#error "Unsupported architecture for fuzzing on Fuchsia"
+#endif
+
+ // Now force the crashing thread's state.
+ ExitOnErr(
+ _zx_thread_write_state(Thread.Handle, ZX_THREAD_STATE_GENERAL_REGS,
+ &GeneralRegisters, sizeof(GeneralRegisters)),
+ "_zx_thread_write_state");
+
+ // Set the exception to HANDLED so it resumes the thread on close.
+ uint32_t ExceptionState = ZX_EXCEPTION_STATE_HANDLED;
+ ExitOnErr(_zx_object_set_property(Exception.Handle, ZX_PROP_EXCEPTION_STATE,
+ &ExceptionState, sizeof(ExceptionState)),
+ "zx_object_set_property");
+ }
+}
+
+} // namespace
+
+bool Mprotect(void *Ptr, size_t Size, bool AllowReadWrite) {
+ return false; // UNIMPLEMENTED
+}
+
+// Platform specific functions.
+void SetSignalHandler(const FuzzingOptions &Options) {
+ // Set up alarm handler if needed.
+ if (Options.UnitTimeoutSec > 0) {
+ std::thread T(AlarmHandler, Options.UnitTimeoutSec / 2 + 1);
+ T.detach();
+ }
+
+ // Set up interrupt handler if needed.
+ if (Options.HandleInt || Options.HandleTerm) {
+ std::thread T(InterruptHandler);
+ T.detach();
+ }
+
+ // Early exit if no crash handler needed.
+ if (!Options.HandleSegv && !Options.HandleBus && !Options.HandleIll &&
+ !Options.HandleFpe && !Options.HandleAbrt)
+ return;
+
+ // Set up the crash handler and wait until it is ready before proceeding.
+ zx_handle_t Event;
+ ExitOnErr(_zx_event_create(0, &Event), "_zx_event_create");
+
+ std::thread T(CrashHandler, &Event);
+ zx_status_t Status =
+ _zx_object_wait_one(Event, ZX_USER_SIGNAL_0, ZX_TIME_INFINITE, nullptr);
+ _zx_handle_close(Event);
+ ExitOnErr(Status, "_zx_object_wait_one");
+
+ T.detach();
+}
+
+void SleepSeconds(int Seconds) {
+ _zx_nanosleep(_zx_deadline_after(ZX_SEC(Seconds)));
+}
+
+unsigned long GetPid() {
+ zx_status_t rc;
+ zx_info_handle_basic_t Info;
+ if ((rc = _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &Info,
+ sizeof(Info), NULL, NULL)) != ZX_OK) {
+ Printf("libFuzzer: unable to get info about self: %s\n",
+ _zx_status_get_string(rc));
+ exit(1);
+ }
+ return Info.koid;
+}
+
+size_t GetPeakRSSMb() {
+ zx_status_t rc;
+ zx_info_task_stats_t Info;
+ if ((rc = _zx_object_get_info(_zx_process_self(), ZX_INFO_TASK_STATS, &Info,
+ sizeof(Info), NULL, NULL)) != ZX_OK) {
+ Printf("libFuzzer: unable to get info about self: %s\n",
+ _zx_status_get_string(rc));
+ exit(1);
+ }
+ return (Info.mem_private_bytes + Info.mem_shared_bytes) >> 20;
+}
+
+template <typename Fn>
+class RunOnDestruction {
+ public:
+ explicit RunOnDestruction(Fn fn) : fn_(fn) {}
+ ~RunOnDestruction() { fn_(); }
+
+ private:
+ Fn fn_;
+};
+
+template <typename Fn>
+RunOnDestruction<Fn> at_scope_exit(Fn fn) {
+ return RunOnDestruction<Fn>(fn);
+}
+
+int ExecuteCommand(const Command &Cmd) {
+ zx_status_t rc;
+
+ // Convert arguments to C array
+ auto Args = Cmd.getArguments();
+ size_t Argc = Args.size();
+ assert(Argc != 0);
+ std::unique_ptr<const char *[]> Argv(new const char *[Argc + 1]);
+ for (size_t i = 0; i < Argc; ++i)
+ Argv[i] = Args[i].c_str();
+ Argv[Argc] = nullptr;
+
+ // Determine output. On Fuchsia, the fuzzer is typically run as a component
+ // that lacks a mutable working directory. Fortunately, when this is the case
+ // a mutable output directory must be specified using "-artifact_prefix=...",
+ // so write the log file(s) there.
+ int FdOut = STDOUT_FILENO;
+ if (Cmd.hasOutputFile()) {
+ std::string Path;
+ if (Cmd.hasFlag("artifact_prefix"))
+ Path = Cmd.getFlagValue("artifact_prefix") + "/" + Cmd.getOutputFile();
+ else
+ Path = Cmd.getOutputFile();
+ FdOut = open(Path.c_str(), O_WRONLY | O_CREAT | O_TRUNC, 0);
+ if (FdOut == -1) {
+ Printf("libFuzzer: failed to open %s: %s\n", Path.c_str(),
+ strerror(errno));
+ return ZX_ERR_IO;
+ }
+ }
+ auto CloseFdOut = at_scope_exit([FdOut]() {
+ if (FdOut != STDOUT_FILENO)
+ close(FdOut);
+ });
+
+ // Determine stderr
+ int FdErr = STDERR_FILENO;
+ if (Cmd.isOutAndErrCombined())
+ FdErr = FdOut;
+
+ // Clone the file descriptors into the new process
+ fdio_spawn_action_t SpawnAction[] = {
+ {
+ .action = FDIO_SPAWN_ACTION_CLONE_FD,
+ .fd =
+ {
+ .local_fd = STDIN_FILENO,
+ .target_fd = STDIN_FILENO,
+ },
+ },
+ {
+ .action = FDIO_SPAWN_ACTION_CLONE_FD,
+ .fd =
+ {
+ .local_fd = FdOut,
+ .target_fd = STDOUT_FILENO,
+ },
+ },
+ {
+ .action = FDIO_SPAWN_ACTION_CLONE_FD,
+ .fd =
+ {
+ .local_fd = FdErr,
+ .target_fd = STDERR_FILENO,
+ },
+ },
+ };
+
+ // Start the process.
+ char ErrorMsg[FDIO_SPAWN_ERR_MSG_MAX_LENGTH];
+ zx_handle_t ProcessHandle = ZX_HANDLE_INVALID;
+ rc = fdio_spawn_etc(
+ ZX_HANDLE_INVALID, FDIO_SPAWN_CLONE_ALL & (~FDIO_SPAWN_CLONE_STDIO),
+ Argv[0], Argv.get(), nullptr, 3, SpawnAction, &ProcessHandle, ErrorMsg);
+ if (rc != ZX_OK) {
+ Printf("libFuzzer: failed to launch '%s': %s, %s\n", Argv[0], ErrorMsg,
+ _zx_status_get_string(rc));
+ return rc;
+ }
+ auto CloseHandle = at_scope_exit([&]() { _zx_handle_close(ProcessHandle); });
+
+ // Now join the process and return the exit status.
+ if ((rc = _zx_object_wait_one(ProcessHandle, ZX_PROCESS_TERMINATED,
+ ZX_TIME_INFINITE, nullptr)) != ZX_OK) {
+ Printf("libFuzzer: failed to join '%s': %s\n", Argv[0],
+ _zx_status_get_string(rc));
+ return rc;
+ }
+
+ zx_info_process_t Info;
+ if ((rc = _zx_object_get_info(ProcessHandle, ZX_INFO_PROCESS, &Info,
+ sizeof(Info), nullptr, nullptr)) != ZX_OK) {
+ Printf("libFuzzer: unable to get return code from '%s': %s\n", Argv[0],
+ _zx_status_get_string(rc));
+ return rc;
+ }
+
+ return Info.return_code;
+}
+
+const void *SearchMemory(const void *Data, size_t DataLen, const void *Patt,
+ size_t PattLen) {
+ return memmem(Data, DataLen, Patt, PattLen);
+}
+
+} // namespace fuzzer
+
+#endif // LIBFUZZER_FUCHSIA
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilFuchsia.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilLinux.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilLinux.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilLinux.cpp (revision 351984)
@@ -0,0 +1,32 @@
+//===- FuzzerUtilLinux.cpp - Misc utils for Linux. ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Misc utils for Linux.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+#if LIBFUZZER_LINUX || LIBFUZZER_NETBSD || LIBFUZZER_FREEBSD || \
+ LIBFUZZER_OPENBSD
+#include "FuzzerCommand.h"
+
+#include <stdlib.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+
+
+namespace fuzzer {
+
+int ExecuteCommand(const Command &Cmd) {
+ std::string CmdLine = Cmd.toString();
+ int exit_code = system(CmdLine.c_str());
+ if (WIFEXITED(exit_code))
+ return WEXITSTATUS(exit_code);
+ return exit_code;
+}
+
+} // namespace fuzzer
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilLinux.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilPosix.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilPosix.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilPosix.cpp (revision 351984)
@@ -0,0 +1,172 @@
+//===- FuzzerUtilPosix.cpp - Misc utils for Posix. ------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Misc utils implementation using Posix API.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+#if LIBFUZZER_POSIX
+#include "FuzzerIO.h"
+#include "FuzzerInternal.h"
+#include "FuzzerTracePC.h"
+#include <cassert>
+#include <chrono>
+#include <cstring>
+#include <errno.h>
+#include <iomanip>
+#include <signal.h>
+#include <stdio.h>
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <thread>
+#include <unistd.h>
+
+namespace fuzzer {
+
+static void AlarmHandler(int, siginfo_t *, void *) {
+ Fuzzer::StaticAlarmCallback();
+}
+
+static void (*upstream_segv_handler)(int, siginfo_t *, void *);
+
+static void SegvHandler(int sig, siginfo_t *si, void *ucontext) {
+ assert(si->si_signo == SIGSEGV);
+ if (TPC.UnprotectLazyCounters(si->si_addr)) return;
+ if (upstream_segv_handler)
+ return upstream_segv_handler(sig, si, ucontext);
+ Fuzzer::StaticCrashSignalCallback();
+}
+
+static void CrashHandler(int, siginfo_t *, void *) {
+ Fuzzer::StaticCrashSignalCallback();
+}
+
+static void InterruptHandler(int, siginfo_t *, void *) {
+ Fuzzer::StaticInterruptCallback();
+}
+
+static void GracefulExitHandler(int, siginfo_t *, void *) {
+ Fuzzer::StaticGracefulExitCallback();
+}
+
+static void FileSizeExceedHandler(int, siginfo_t *, void *) {
+ Fuzzer::StaticFileSizeExceedCallback();
+}
+
+static void SetSigaction(int signum,
+ void (*callback)(int, siginfo_t *, void *)) {
+ struct sigaction sigact = {};
+ if (sigaction(signum, nullptr, &sigact)) {
+ Printf("libFuzzer: sigaction failed with %d\n", errno);
+ exit(1);
+ }
+ if (sigact.sa_flags & SA_SIGINFO) {
+ if (sigact.sa_sigaction) {
+ if (signum != SIGSEGV)
+ return;
+ upstream_segv_handler = sigact.sa_sigaction;
+ }
+ } else {
+ if (sigact.sa_handler != SIG_DFL && sigact.sa_handler != SIG_IGN &&
+ sigact.sa_handler != SIG_ERR)
+ return;
+ }
+
+ sigact = {};
+ sigact.sa_flags = SA_SIGINFO;
+ sigact.sa_sigaction = callback;
+ if (sigaction(signum, &sigact, 0)) {
+ Printf("libFuzzer: sigaction failed with %d\n", errno);
+ exit(1);
+ }
+}
+
+void SetTimer(int Seconds) {
+ struct itimerval T {
+ {Seconds, 0}, { Seconds, 0 }
+ };
+ if (setitimer(ITIMER_REAL, &T, nullptr)) {
+ Printf("libFuzzer: setitimer failed with %d\n", errno);
+ exit(1);
+ }
+ SetSigaction(SIGALRM, AlarmHandler);
+}
+
+bool Mprotect(void *Ptr, size_t Size, bool AllowReadWrite) {
+ return 0 == mprotect(Ptr, Size,
+ AllowReadWrite ? (PROT_READ | PROT_WRITE) : PROT_NONE);
+}
+
+void SetSignalHandler(const FuzzingOptions& Options) {
+ if (Options.UnitTimeoutSec > 0)
+ SetTimer(Options.UnitTimeoutSec / 2 + 1);
+ if (Options.HandleInt)
+ SetSigaction(SIGINT, InterruptHandler);
+ if (Options.HandleTerm)
+ SetSigaction(SIGTERM, InterruptHandler);
+ if (Options.HandleSegv)
+ SetSigaction(SIGSEGV, SegvHandler);
+ if (Options.HandleBus)
+ SetSigaction(SIGBUS, CrashHandler);
+ if (Options.HandleAbrt)
+ SetSigaction(SIGABRT, CrashHandler);
+ if (Options.HandleIll)
+ SetSigaction(SIGILL, CrashHandler);
+ if (Options.HandleFpe)
+ SetSigaction(SIGFPE, CrashHandler);
+ if (Options.HandleXfsz)
+ SetSigaction(SIGXFSZ, FileSizeExceedHandler);
+ if (Options.HandleUsr1)
+ SetSigaction(SIGUSR1, GracefulExitHandler);
+ if (Options.HandleUsr2)
+ SetSigaction(SIGUSR2, GracefulExitHandler);
+}
+
+void SleepSeconds(int Seconds) {
+ sleep(Seconds); // Use C API to avoid coverage from instrumented libc++.
+}
+
+unsigned long GetPid() { return (unsigned long)getpid(); }
+
+size_t GetPeakRSSMb() {
+ struct rusage usage;
+ if (getrusage(RUSAGE_SELF, &usage))
+ return 0;
+ if (LIBFUZZER_LINUX || LIBFUZZER_FREEBSD || LIBFUZZER_NETBSD ||
+ LIBFUZZER_OPENBSD) {
+ // ru_maxrss is in KiB
+ return usage.ru_maxrss >> 10;
+ } else if (LIBFUZZER_APPLE) {
+ // ru_maxrss is in bytes
+ return usage.ru_maxrss >> 20;
+ }
+ assert(0 && "GetPeakRSSMb() is not implemented for your platform");
+ return 0;
+}
+
+FILE *OpenProcessPipe(const char *Command, const char *Mode) {
+ return popen(Command, Mode);
+}
+
+const void *SearchMemory(const void *Data, size_t DataLen, const void *Patt,
+ size_t PattLen) {
+ return memmem(Data, DataLen, Patt, PattLen);
+}
+
+std::string DisassembleCmd(const std::string &FileName) {
+ return "objdump -d " + FileName;
+}
+
+std::string SearchRegexCmd(const std::string &Regex) {
+ return "grep '" + Regex + "'";
+}
+
+} // namespace fuzzer
+
+#endif // LIBFUZZER_POSIX
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilPosix.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilWindows.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilWindows.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilWindows.cpp (revision 351984)
@@ -0,0 +1,199 @@
+//===- FuzzerUtilWindows.cpp - Misc utils for Windows. --------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Misc utils implementation for Windows.
+//===----------------------------------------------------------------------===//
+#include "FuzzerDefs.h"
+#if LIBFUZZER_WINDOWS
+#include "FuzzerCommand.h"
+#include "FuzzerIO.h"
+#include "FuzzerInternal.h"
+#include <cassert>
+#include <chrono>
+#include <cstring>
+#include <errno.h>
+#include <iomanip>
+#include <signal.h>
+#include <stdio.h>
+#include <sys/types.h>
+#include <windows.h>
+
+// This must be included after windows.h.
+#include <psapi.h>
+
+namespace fuzzer {
+
+static const FuzzingOptions* HandlerOpt = nullptr;
+
+static LONG CALLBACK ExceptionHandler(PEXCEPTION_POINTERS ExceptionInfo) {
+ switch (ExceptionInfo->ExceptionRecord->ExceptionCode) {
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ case EXCEPTION_STACK_OVERFLOW:
+ if (HandlerOpt->HandleSegv)
+ Fuzzer::StaticCrashSignalCallback();
+ break;
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ case EXCEPTION_IN_PAGE_ERROR:
+ if (HandlerOpt->HandleBus)
+ Fuzzer::StaticCrashSignalCallback();
+ break;
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ case EXCEPTION_PRIV_INSTRUCTION:
+ if (HandlerOpt->HandleIll)
+ Fuzzer::StaticCrashSignalCallback();
+ break;
+ case EXCEPTION_FLT_DENORMAL_OPERAND:
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ case EXCEPTION_FLT_INEXACT_RESULT:
+ case EXCEPTION_FLT_INVALID_OPERATION:
+ case EXCEPTION_FLT_OVERFLOW:
+ case EXCEPTION_FLT_STACK_CHECK:
+ case EXCEPTION_FLT_UNDERFLOW:
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ case EXCEPTION_INT_OVERFLOW:
+ if (HandlerOpt->HandleFpe)
+ Fuzzer::StaticCrashSignalCallback();
+ break;
+ // TODO: handle (Options.HandleXfsz)
+ }
+ return EXCEPTION_CONTINUE_SEARCH;
+}
+
+BOOL WINAPI CtrlHandler(DWORD dwCtrlType) {
+ switch (dwCtrlType) {
+ case CTRL_C_EVENT:
+ if (HandlerOpt->HandleInt)
+ Fuzzer::StaticInterruptCallback();
+ return TRUE;
+ case CTRL_BREAK_EVENT:
+ if (HandlerOpt->HandleTerm)
+ Fuzzer::StaticInterruptCallback();
+ return TRUE;
+ }
+ return FALSE;
+}
+
+void CALLBACK AlarmHandler(PVOID, BOOLEAN) {
+ Fuzzer::StaticAlarmCallback();
+}
+
+class TimerQ {
+ HANDLE TimerQueue;
+ public:
+ TimerQ() : TimerQueue(NULL) {}
+ ~TimerQ() {
+ if (TimerQueue)
+ DeleteTimerQueueEx(TimerQueue, NULL);
+ }
+ void SetTimer(int Seconds) {
+ if (!TimerQueue) {
+ TimerQueue = CreateTimerQueue();
+ if (!TimerQueue) {
+ Printf("libFuzzer: CreateTimerQueue failed.\n");
+ exit(1);
+ }
+ }
+ HANDLE Timer;
+ if (!CreateTimerQueueTimer(&Timer, TimerQueue, AlarmHandler, NULL,
+ Seconds*1000, Seconds*1000, 0)) {
+ Printf("libFuzzer: CreateTimerQueueTimer failed.\n");
+ exit(1);
+ }
+ }
+};
+
+static TimerQ Timer;
+
+static void CrashHandler(int) { Fuzzer::StaticCrashSignalCallback(); }
+
+bool Mprotect(void *Ptr, size_t Size, bool AllowReadWrite) {
+ return false; // UNIMPLEMENTED
+}
+
+void SetSignalHandler(const FuzzingOptions& Options) {
+ HandlerOpt = &Options;
+
+ if (Options.UnitTimeoutSec > 0)
+ Timer.SetTimer(Options.UnitTimeoutSec / 2 + 1);
+
+ if (Options.HandleInt || Options.HandleTerm)
+ if (!SetConsoleCtrlHandler(CtrlHandler, TRUE)) {
+ DWORD LastError = GetLastError();
+ Printf("libFuzzer: SetConsoleCtrlHandler failed (Error code: %lu).\n",
+ LastError);
+ exit(1);
+ }
+
+ if (Options.HandleSegv || Options.HandleBus || Options.HandleIll ||
+ Options.HandleFpe)
+ SetUnhandledExceptionFilter(ExceptionHandler);
+
+ if (Options.HandleAbrt)
+ if (SIG_ERR == signal(SIGABRT, CrashHandler)) {
+ Printf("libFuzzer: signal failed with %d\n", errno);
+ exit(1);
+ }
+}
+
+void SleepSeconds(int Seconds) { Sleep(Seconds * 1000); }
+
+unsigned long GetPid() { return GetCurrentProcessId(); }
+
+size_t GetPeakRSSMb() {
+ PROCESS_MEMORY_COUNTERS info;
+ if (!GetProcessMemoryInfo(GetCurrentProcess(), &info, sizeof(info)))
+ return 0;
+ return info.PeakWorkingSetSize >> 20;
+}
+
+FILE *OpenProcessPipe(const char *Command, const char *Mode) {
+ return _popen(Command, Mode);
+}
+
+int ExecuteCommand(const Command &Cmd) {
+ std::string CmdLine = Cmd.toString();
+ return system(CmdLine.c_str());
+}
+
+const void *SearchMemory(const void *Data, size_t DataLen, const void *Patt,
+ size_t PattLen) {
+ // TODO: make this implementation more efficient.
+ const char *Cdata = (const char *)Data;
+ const char *Cpatt = (const char *)Patt;
+
+ if (!Data || !Patt || DataLen == 0 || PattLen == 0 || DataLen < PattLen)
+ return NULL;
+
+ if (PattLen == 1)
+ return memchr(Data, *Cpatt, DataLen);
+
+ const char *End = Cdata + DataLen - PattLen + 1;
+
+ for (const char *It = Cdata; It < End; ++It)
+ if (It[0] == Cpatt[0] && memcmp(It, Cpatt, PattLen) == 0)
+ return It;
+
+ return NULL;
+}
+
+std::string DisassembleCmd(const std::string &FileName) {
+ Vector<std::string> command_vector;
+ command_vector.push_back("dumpbin /summary > nul");
+ if (ExecuteCommand(Command(command_vector)) == 0)
+ return "dumpbin /disasm " + FileName;
+ Printf("libFuzzer: couldn't find tool to disassemble (dumpbin)\n");
+ exit(1);
+}
+
+std::string SearchRegexCmd(const std::string &Regex) {
+ return "findstr /r \"" + Regex + "\"";
+}
+
+} // namespace fuzzer
+
+#endif // LIBFUZZER_WINDOWS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerUtilWindows.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerValueBitMap.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerValueBitMap.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerValueBitMap.h (revision 351984)
@@ -0,0 +1,72 @@
+//===- FuzzerValueBitMap.h - INTERNAL - Bit map -----------------*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// ValueBitMap.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_VALUE_BIT_MAP_H
+#define LLVM_FUZZER_VALUE_BIT_MAP_H
+
+#include "FuzzerDefs.h"
+
+namespace fuzzer {
+
+// A bit map containing kMapSizeInWords bits.
+struct ValueBitMap {
+ static const size_t kMapSizeInBits = 1 << 16;
+ static const size_t kMapPrimeMod = 65371; // Largest Prime < kMapSizeInBits;
+ static const size_t kBitsInWord = (sizeof(uintptr_t) * 8);
+ static const size_t kMapSizeInWords = kMapSizeInBits / kBitsInWord;
+ public:
+
+ // Clears all bits.
+ void Reset() { memset(Map, 0, sizeof(Map)); }
+
+ // Computes a hash function of Value and sets the corresponding bit.
+ // Returns true if the bit was changed from 0 to 1.
+ ATTRIBUTE_NO_SANITIZE_ALL
+ inline bool AddValue(uintptr_t Value) {
+ uintptr_t Idx = Value % kMapSizeInBits;
+ uintptr_t WordIdx = Idx / kBitsInWord;
+ uintptr_t BitIdx = Idx % kBitsInWord;
+ uintptr_t Old = Map[WordIdx];
+ uintptr_t New = Old | (1ULL << BitIdx);
+ Map[WordIdx] = New;
+ return New != Old;
+ }
+
+ ATTRIBUTE_NO_SANITIZE_ALL
+ inline bool AddValueModPrime(uintptr_t Value) {
+ return AddValue(Value % kMapPrimeMod);
+ }
+
+ inline bool Get(uintptr_t Idx) {
+ assert(Idx < kMapSizeInBits);
+ uintptr_t WordIdx = Idx / kBitsInWord;
+ uintptr_t BitIdx = Idx % kBitsInWord;
+ return Map[WordIdx] & (1ULL << BitIdx);
+ }
+
+ size_t SizeInBits() const { return kMapSizeInBits; }
+
+ template <class Callback>
+ ATTRIBUTE_NO_SANITIZE_ALL
+ void ForEach(Callback CB) const {
+ for (size_t i = 0; i < kMapSizeInWords; i++)
+ if (uintptr_t M = Map[i])
+ for (size_t j = 0; j < sizeof(M) * 8; j++)
+ if (M & ((uintptr_t)1 << j))
+ CB(i * sizeof(M) * 8 + j);
+ }
+
+ private:
+ ATTRIBUTE_ALIGNED(512) uintptr_t Map[kMapSizeInWords];
+};
+
+} // namespace fuzzer
+
+#endif // LLVM_FUZZER_VALUE_BIT_MAP_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/FuzzerValueBitMap.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/utils/FuzzedDataProvider.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/utils/FuzzedDataProvider.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/utils/FuzzedDataProvider.h (revision 351984)
@@ -0,0 +1,245 @@
+//===- FuzzedDataProvider.h - Utility header for fuzz targets ---*- C++ -* ===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// A single header library providing an utility class to break up an array of
+// bytes. Whenever run on the same input, provides the same output, as long as
+// its methods are called in the same order, with the same arguments.
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
+#define LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
+
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+
+#include <algorithm>
+#include <cstring>
+#include <initializer_list>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+class FuzzedDataProvider {
+public:
+ // |data| is an array of length |size| that the FuzzedDataProvider wraps to
+ // provide more granular access. |data| must outlive the FuzzedDataProvider.
+ FuzzedDataProvider(const uint8_t *data, size_t size)
+ : data_ptr_(data), remaining_bytes_(size) {}
+ ~FuzzedDataProvider() = default;
+
+ // Returns a std::vector containing |num_bytes| of input data. If fewer than
+ // |num_bytes| of data remain, returns a shorter std::vector containing all
+ // of the data that's left. Can be used with any byte sized type, such as
+ // char, unsigned char, uint8_t, etc.
+ template <typename T> std::vector<T> ConsumeBytes(size_t num_bytes) {
+ num_bytes = std::min(num_bytes, remaining_bytes_);
+ return ConsumeBytes<T>(num_bytes, num_bytes);
+ }
+
+ // Similar to |ConsumeBytes|, but also appends the terminator value at the end
+ // of the resulting vector. Useful, when a mutable null-terminated C-string is
+ // needed, for example. But that is a rare case. Better avoid it, if possible,
+ // and prefer using |ConsumeBytes| or |ConsumeBytesAsString| methods.
+ template <typename T>
+ std::vector<T> ConsumeBytesWithTerminator(size_t num_bytes,
+ T terminator = 0) {
+ num_bytes = std::min(num_bytes, remaining_bytes_);
+ std::vector<T> result = ConsumeBytes<T>(num_bytes + 1, num_bytes);
+ result.back() = terminator;
+ return result;
+ }
+
+ // Returns a std::string containing |num_bytes| of input data. Using this and
+ // |.c_str()| on the resulting string is the best way to get an immutable
+ // null-terminated C string. If fewer than |num_bytes| of data remain, returns
+ // a shorter std::string containing all of the data that's left.
+ std::string ConsumeBytesAsString(size_t num_bytes) {
+ static_assert(sizeof(std::string::value_type) == sizeof(uint8_t),
+ "ConsumeBytesAsString cannot convert the data to a string.");
+
+ num_bytes = std::min(num_bytes, remaining_bytes_);
+ std::string result(
+ reinterpret_cast<const std::string::value_type *>(data_ptr_),
+ num_bytes);
+ Advance(num_bytes);
+ return result;
+ }
+
+ // Returns a number in the range [min, max] by consuming bytes from the
+ // input data. The value might not be uniformly distributed in the given
+ // range. If there's no input data left, always returns |min|. |min| must
+ // be less than or equal to |max|.
+ template <typename T> T ConsumeIntegralInRange(T min, T max) {
+ static_assert(std::is_integral<T>::value, "An integral type is required.");
+ static_assert(sizeof(T) <= sizeof(uint64_t), "Unsupported integral type.");
+
+ if (min > max)
+ abort();
+
+ // Use the biggest type possible to hold the range and the result.
+ uint64_t range = static_cast<uint64_t>(max) - min;
+ uint64_t result = 0;
+ size_t offset = 0;
+
+ while (offset < sizeof(T) * CHAR_BIT && (range >> offset) > 0 &&
+ remaining_bytes_ != 0) {
+ // Pull bytes off the end of the seed data. Experimentally, this seems to
+ // allow the fuzzer to more easily explore the input space. This makes
+ // sense, since it works by modifying inputs that caused new code to run,
+ // and this data is often used to encode length of data read by
+ // |ConsumeBytes|. Separating out read lengths makes it easier modify the
+ // contents of the data that is actually read.
+ --remaining_bytes_;
+ result = (result << CHAR_BIT) | data_ptr_[remaining_bytes_];
+ offset += CHAR_BIT;
+ }
+
+ // Avoid division by 0, in case |range + 1| results in overflow.
+ if (range != std::numeric_limits<decltype(range)>::max())
+ result = result % (range + 1);
+
+ return static_cast<T>(min + result);
+ }
+
+ // Returns a std::string of length from 0 to |max_length|. When it runs out of
+ // input data, returns what remains of the input. Designed to be more stable
+ // with respect to a fuzzer inserting characters than just picking a random
+ // length and then consuming that many bytes with |ConsumeBytes|.
+ std::string ConsumeRandomLengthString(size_t max_length) {
+ // Reads bytes from the start of |data_ptr_|. Maps "\\" to "\", and maps "\"
+ // followed by anything else to the end of the string. As a result of this
+ // logic, a fuzzer can insert characters into the string, and the string
+ // will be lengthened to include those new characters, resulting in a more
+ // stable fuzzer than picking the length of a string independently from
+ // picking its contents.
+ std::string result;
+
+ // Reserve the anticipated capaticity to prevent several reallocations.
+ result.reserve(std::min(max_length, remaining_bytes_));
+ for (size_t i = 0; i < max_length && remaining_bytes_ != 0; ++i) {
+ char next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
+ Advance(1);
+ if (next == '\\' && remaining_bytes_ != 0) {
+ next = ConvertUnsignedToSigned<char>(data_ptr_[0]);
+ Advance(1);
+ if (next != '\\')
+ break;
+ }
+ result += next;
+ }
+
+ result.shrink_to_fit();
+ return result;
+ }
+
+ // Returns a std::vector containing all remaining bytes of the input data.
+ template <typename T> std::vector<T> ConsumeRemainingBytes() {
+ return ConsumeBytes<T>(remaining_bytes_);
+ }
+
+ // Prefer using |ConsumeRemainingBytes| unless you actually need a std::string
+ // object.
+ // Returns a std::vector containing all remaining bytes of the input data.
+ std::string ConsumeRemainingBytesAsString() {
+ return ConsumeBytesAsString(remaining_bytes_);
+ }
+
+ // Returns a number in the range [Type's min, Type's max]. The value might
+ // not be uniformly distributed in the given range. If there's no input data
+ // left, always returns |min|.
+ template <typename T> T ConsumeIntegral() {
+ return ConsumeIntegralInRange(std::numeric_limits<T>::min(),
+ std::numeric_limits<T>::max());
+ }
+
+ // Reads one byte and returns a bool, or false when no data remains.
+ bool ConsumeBool() { return 1 & ConsumeIntegral<uint8_t>(); }
+
+ // Returns a copy of a value selected from a fixed-size |array|.
+ template <typename T, size_t size>
+ T PickValueInArray(const T (&array)[size]) {
+ static_assert(size > 0, "The array must be non empty.");
+ return array[ConsumeIntegralInRange<size_t>(0, size - 1)];
+ }
+
+ template <typename T>
+ T PickValueInArray(std::initializer_list<const T> list) {
+ // static_assert(list.size() > 0, "The array must be non empty.");
+ return *(list.begin() + ConsumeIntegralInRange<size_t>(0, list.size() - 1));
+ }
+
+ // Return an enum value. The enum must start at 0 and be contiguous. It must
+ // also contain |kMaxValue| aliased to its largest (inclusive) value. Such as:
+ // enum class Foo { SomeValue, OtherValue, kMaxValue = OtherValue };
+ template <typename T> T ConsumeEnum() {
+ static_assert(std::is_enum<T>::value, "|T| must be an enum type.");
+ return static_cast<T>(ConsumeIntegralInRange<uint32_t>(
+ 0, static_cast<uint32_t>(T::kMaxValue)));
+ }
+
+ // Reports the remaining bytes available for fuzzed input.
+ size_t remaining_bytes() { return remaining_bytes_; }
+
+private:
+ FuzzedDataProvider(const FuzzedDataProvider &) = delete;
+ FuzzedDataProvider &operator=(const FuzzedDataProvider &) = delete;
+
+ void Advance(size_t num_bytes) {
+ if (num_bytes > remaining_bytes_)
+ abort();
+
+ data_ptr_ += num_bytes;
+ remaining_bytes_ -= num_bytes;
+ }
+
+ template <typename T>
+ std::vector<T> ConsumeBytes(size_t size, size_t num_bytes_to_consume) {
+ static_assert(sizeof(T) == sizeof(uint8_t), "Incompatible data type.");
+
+ // The point of using the size-based constructor below is to increase the
+ // odds of having a vector object with capacity being equal to the length.
+ // That part is always implementation specific, but at least both libc++ and
+ // libstdc++ allocate the requested number of bytes in that constructor,
+ // which seems to be a natural choice for other implementations as well.
+ // To increase the odds even more, we also call |shrink_to_fit| below.
+ std::vector<T> result(size);
+ std::memcpy(result.data(), data_ptr_, num_bytes_to_consume);
+ Advance(num_bytes_to_consume);
+
+ // Even though |shrink_to_fit| is also implementation specific, we expect it
+ // to provide an additional assurance in case vector's constructor allocated
+ // a buffer which is larger than the actual amount of data we put inside it.
+ result.shrink_to_fit();
+ return result;
+ }
+
+ template <typename TS, typename TU> TS ConvertUnsignedToSigned(TU value) {
+ static_assert(sizeof(TS) == sizeof(TU), "Incompatible data types.");
+ static_assert(!std::numeric_limits<TU>::is_signed,
+ "Source type must be unsigned.");
+
+ // TODO(Dor1s): change to `if constexpr` once C++17 becomes mainstream.
+ if (std::numeric_limits<TS>::is_modulo)
+ return static_cast<TS>(value);
+
+ // Avoid using implementation-defined unsigned to signer conversions.
+ // To learn more, see https://stackoverflow.com/questions/13150449.
+ if (value <= std::numeric_limits<TS>::max())
+ return static_cast<TS>(value);
+ else {
+ constexpr auto TS_min = std::numeric_limits<TS>::min();
+ return TS_min + static_cast<char>(value - TS_min);
+ }
+ }
+
+ const uint8_t *data_ptr_;
+ size_t remaining_bytes_;
+};
+
+#endif // LLVM_FUZZER_FUZZED_DATA_PROVIDER_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/README.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/README.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/README.txt (revision 351984)
@@ -0,0 +1 @@
+See http://llvm.org/docs/LibFuzzer.html
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/fuzzer/README.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/definitions.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/definitions.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/definitions.h (revision 351984)
@@ -0,0 +1,29 @@
+//===-- gwp_asan_definitions.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_DEFINITIONS_H_
+#define GWP_ASAN_DEFINITIONS_H_
+
+#define TLS_INITIAL_EXEC __thread __attribute__((tls_model("initial-exec")))
+
+#ifdef LIKELY
+# undef LIKELY
+#endif // defined(LIKELY)
+#define LIKELY(X) __builtin_expect(!!(X), 1)
+
+#ifdef UNLIKELY
+# undef UNLIKELY
+#endif // defined(UNLIKELY)
+#define UNLIKELY(X) __builtin_expect(!!(X), 0)
+
+#ifdef ALWAYS_INLINE
+# undef ALWAYS_INLINE
+#endif // defined(ALWAYS_INLINE)
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+
+#endif // GWP_ASAN_DEFINITIONS_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/guarded_pool_allocator.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/guarded_pool_allocator.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/guarded_pool_allocator.cpp (revision 351984)
@@ -0,0 +1,510 @@
+//===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/guarded_pool_allocator.h"
+
+#include "gwp_asan/options.h"
+
+// RHEL creates the PRIu64 format macro (for printing uint64_t's) only when this
+// macro is defined before including <inttypes.h>.
+#ifndef __STDC_FORMAT_MACROS
+ #define __STDC_FORMAT_MACROS 1
+#endif
+
+#include <assert.h>
+#include <inttypes.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+
+using AllocationMetadata = gwp_asan::GuardedPoolAllocator::AllocationMetadata;
+using Error = gwp_asan::GuardedPoolAllocator::Error;
+
+namespace gwp_asan {
+namespace {
+// Forward declare the pointer to the singleton version of this class.
+// Instantiated during initialisation, this allows the signal handler
+// to find this class in order to deduce the root cause of failures. Must not be
+// referenced by users outside this translation unit, in order to avoid
+// init-order-fiasco.
+GuardedPoolAllocator *SingletonPtr = nullptr;
+
+class ScopedBoolean {
+public:
+ ScopedBoolean(bool &B) : Bool(B) { Bool = true; }
+ ~ScopedBoolean() { Bool = false; }
+
+private:
+ bool &Bool;
+};
+
+void defaultPrintStackTrace(uintptr_t *Trace, options::Printf_t Printf) {
+ if (Trace[0] == 0)
+ Printf(" <unknown (does your allocator support backtracing?)>\n");
+
+ for (size_t i = 0; Trace[i] != 0; ++i) {
+ Printf(" #%zu 0x%zx in <unknown>\n", i, Trace[i]);
+ }
+ Printf("\n");
+}
+} // anonymous namespace
+
+// Gets the singleton implementation of this class. Thread-compatible until
+// init() is called, thread-safe afterwards.
+GuardedPoolAllocator *getSingleton() { return SingletonPtr; }
+
+void GuardedPoolAllocator::AllocationMetadata::RecordAllocation(
+ uintptr_t AllocAddr, size_t AllocSize, options::Backtrace_t Backtrace) {
+ Addr = AllocAddr;
+ Size = AllocSize;
+ IsDeallocated = false;
+
+ // TODO(hctim): Ask the caller to provide the thread ID, so we don't waste
+ // other thread's time getting the thread ID under lock.
+ AllocationTrace.ThreadID = getThreadID();
+ DeallocationTrace.ThreadID = kInvalidThreadID;
+ if (Backtrace)
+ Backtrace(AllocationTrace.Trace, kMaximumStackFrames);
+ else
+ AllocationTrace.Trace[0] = 0;
+ DeallocationTrace.Trace[0] = 0;
+}
+
+void GuardedPoolAllocator::AllocationMetadata::RecordDeallocation(
+ options::Backtrace_t Backtrace) {
+ IsDeallocated = true;
+ // Ensure that the unwinder is not called if the recursive flag is set,
+ // otherwise non-reentrant unwinders may deadlock.
+ if (Backtrace && !ThreadLocals.RecursiveGuard) {
+ ScopedBoolean B(ThreadLocals.RecursiveGuard);
+ Backtrace(DeallocationTrace.Trace, kMaximumStackFrames);
+ } else {
+ DeallocationTrace.Trace[0] = 0;
+ }
+ DeallocationTrace.ThreadID = getThreadID();
+}
+
+void GuardedPoolAllocator::init(const options::Options &Opts) {
+ // Note: We return from the constructor here if GWP-ASan is not available.
+ // This will stop heap-allocation of class members, as well as mmap() of the
+ // guarded slots.
+ if (!Opts.Enabled || Opts.SampleRate == 0 ||
+ Opts.MaxSimultaneousAllocations == 0)
+ return;
+
+ // TODO(hctim): Add a death unit test for this.
+ if (SingletonPtr) {
+ (*SingletonPtr->Printf)(
+ "GWP-ASan Error: init() has already been called.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (Opts.SampleRate < 0) {
+ Opts.Printf("GWP-ASan Error: SampleRate is < 0.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (Opts.SampleRate > INT32_MAX) {
+ Opts.Printf("GWP-ASan Error: SampleRate is > 2^31.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (Opts.MaxSimultaneousAllocations < 0) {
+ Opts.Printf("GWP-ASan Error: MaxSimultaneousAllocations is < 0.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ SingletonPtr = this;
+
+ MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations;
+
+ PageSize = getPlatformPageSize();
+
+ PerfectlyRightAlign = Opts.PerfectlyRightAlign;
+ Printf = Opts.Printf;
+ Backtrace = Opts.Backtrace;
+ if (Opts.PrintBacktrace)
+ PrintBacktrace = Opts.PrintBacktrace;
+ else
+ PrintBacktrace = defaultPrintStackTrace;
+
+ size_t PoolBytesRequired =
+ PageSize * (1 + MaxSimultaneousAllocations) +
+ MaxSimultaneousAllocations * maximumAllocationSize();
+ void *GuardedPoolMemory = mapMemory(PoolBytesRequired);
+
+ size_t BytesRequired = MaxSimultaneousAllocations * sizeof(*Metadata);
+ Metadata = reinterpret_cast<AllocationMetadata *>(mapMemory(BytesRequired));
+ markReadWrite(Metadata, BytesRequired);
+
+ // Allocate memory and set up the free pages queue.
+ BytesRequired = MaxSimultaneousAllocations * sizeof(*FreeSlots);
+ FreeSlots = reinterpret_cast<size_t *>(mapMemory(BytesRequired));
+ markReadWrite(FreeSlots, BytesRequired);
+
+ // Multiply the sample rate by 2 to give a good, fast approximation for (1 /
+ // SampleRate) chance of sampling.
+ if (Opts.SampleRate != 1)
+ AdjustedSampleRate = static_cast<uint32_t>(Opts.SampleRate) * 2;
+ else
+ AdjustedSampleRate = 1;
+
+ GuardedPagePool = reinterpret_cast<uintptr_t>(GuardedPoolMemory);
+ GuardedPagePoolEnd =
+ reinterpret_cast<uintptr_t>(GuardedPoolMemory) + PoolBytesRequired;
+
+ // Ensure that signal handlers are installed as late as possible, as the class
+ // is not thread-safe until init() is finished, and thus a SIGSEGV may cause a
+ // race to members if recieved during init().
+ if (Opts.InstallSignalHandlers)
+ installSignalHandlers();
+}
+
+void *GuardedPoolAllocator::allocate(size_t Size) {
+ // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall
+ // back to the supporting allocator.
+ if (GuardedPagePoolEnd == 0)
+ return nullptr;
+
+ // Protect against recursivity.
+ if (ThreadLocals.RecursiveGuard)
+ return nullptr;
+ ScopedBoolean SB(ThreadLocals.RecursiveGuard);
+
+ if (Size == 0 || Size > maximumAllocationSize())
+ return nullptr;
+
+ size_t Index;
+ {
+ ScopedLock L(PoolMutex);
+ Index = reserveSlot();
+ }
+
+ if (Index == kInvalidSlotID)
+ return nullptr;
+
+ uintptr_t Ptr = slotToAddr(Index);
+ Ptr += allocationSlotOffset(Size);
+ AllocationMetadata *Meta = addrToMetadata(Ptr);
+
+ // If a slot is multiple pages in size, and the allocation takes up a single
+ // page, we can improve overflow detection by leaving the unused pages as
+ // unmapped.
+ markReadWrite(reinterpret_cast<void *>(getPageAddr(Ptr)), Size);
+
+ Meta->RecordAllocation(Ptr, Size, Backtrace);
+
+ return reinterpret_cast<void *>(Ptr);
+}
+
+void GuardedPoolAllocator::deallocate(void *Ptr) {
+ assert(pointerIsMine(Ptr) && "Pointer is not mine!");
+ uintptr_t UPtr = reinterpret_cast<uintptr_t>(Ptr);
+ uintptr_t SlotStart = slotToAddr(addrToSlot(UPtr));
+ AllocationMetadata *Meta = addrToMetadata(UPtr);
+ if (Meta->Addr != UPtr) {
+ reportError(UPtr, Error::INVALID_FREE);
+ exit(EXIT_FAILURE);
+ }
+
+ // Intentionally scope the mutex here, so that other threads can access the
+ // pool during the expensive markInaccessible() call.
+ {
+ ScopedLock L(PoolMutex);
+ if (Meta->IsDeallocated) {
+ reportError(UPtr, Error::DOUBLE_FREE);
+ exit(EXIT_FAILURE);
+ }
+
+ // Ensure that the deallocation is recorded before marking the page as
+ // inaccessible. Otherwise, a racy use-after-free will have inconsistent
+ // metadata.
+ Meta->RecordDeallocation(Backtrace);
+ }
+
+ markInaccessible(reinterpret_cast<void *>(SlotStart),
+ maximumAllocationSize());
+
+ // And finally, lock again to release the slot back into the pool.
+ ScopedLock L(PoolMutex);
+ freeSlot(addrToSlot(UPtr));
+}
+
+size_t GuardedPoolAllocator::getSize(const void *Ptr) {
+ assert(pointerIsMine(Ptr));
+ ScopedLock L(PoolMutex);
+ AllocationMetadata *Meta = addrToMetadata(reinterpret_cast<uintptr_t>(Ptr));
+ assert(Meta->Addr == reinterpret_cast<uintptr_t>(Ptr));
+ return Meta->Size;
+}
+
+size_t GuardedPoolAllocator::maximumAllocationSize() const { return PageSize; }
+
+AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const {
+ return &Metadata[addrToSlot(Ptr)];
+}
+
+size_t GuardedPoolAllocator::addrToSlot(uintptr_t Ptr) const {
+ assert(pointerIsMine(reinterpret_cast<void *>(Ptr)));
+ size_t ByteOffsetFromPoolStart = Ptr - GuardedPagePool;
+ return ByteOffsetFromPoolStart / (maximumAllocationSize() + PageSize);
+}
+
+uintptr_t GuardedPoolAllocator::slotToAddr(size_t N) const {
+ return GuardedPagePool + (PageSize * (1 + N)) + (maximumAllocationSize() * N);
+}
+
+uintptr_t GuardedPoolAllocator::getPageAddr(uintptr_t Ptr) const {
+ assert(pointerIsMine(reinterpret_cast<void *>(Ptr)));
+ return Ptr & ~(static_cast<uintptr_t>(PageSize) - 1);
+}
+
+bool GuardedPoolAllocator::isGuardPage(uintptr_t Ptr) const {
+ assert(pointerIsMine(reinterpret_cast<void *>(Ptr)));
+ size_t PageOffsetFromPoolStart = (Ptr - GuardedPagePool) / PageSize;
+ size_t PagesPerSlot = maximumAllocationSize() / PageSize;
+ return (PageOffsetFromPoolStart % (PagesPerSlot + 1)) == 0;
+}
+
+size_t GuardedPoolAllocator::reserveSlot() {
+ // Avoid potential reuse of a slot before we have made at least a single
+ // allocation in each slot. Helps with our use-after-free detection.
+ if (NumSampledAllocations < MaxSimultaneousAllocations)
+ return NumSampledAllocations++;
+
+ if (FreeSlotsLength == 0)
+ return kInvalidSlotID;
+
+ size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength;
+ size_t SlotIndex = FreeSlots[ReservedIndex];
+ FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength];
+ return SlotIndex;
+}
+
+void GuardedPoolAllocator::freeSlot(size_t SlotIndex) {
+ assert(FreeSlotsLength < MaxSimultaneousAllocations);
+ FreeSlots[FreeSlotsLength++] = SlotIndex;
+}
+
+uintptr_t GuardedPoolAllocator::allocationSlotOffset(size_t Size) const {
+ assert(Size > 0);
+
+ bool ShouldRightAlign = getRandomUnsigned32() % 2 == 0;
+ if (!ShouldRightAlign)
+ return 0;
+
+ uintptr_t Offset = maximumAllocationSize();
+ if (!PerfectlyRightAlign) {
+ if (Size == 3)
+ Size = 4;
+ else if (Size > 4 && Size <= 8)
+ Size = 8;
+ else if (Size > 8 && (Size % 16) != 0)
+ Size += 16 - (Size % 16);
+ }
+ Offset -= Size;
+ return Offset;
+}
+
+void GuardedPoolAllocator::reportError(uintptr_t AccessPtr, Error E) {
+ if (SingletonPtr)
+ SingletonPtr->reportErrorInternal(AccessPtr, E);
+}
+
+size_t GuardedPoolAllocator::getNearestSlot(uintptr_t Ptr) const {
+ if (Ptr <= GuardedPagePool + PageSize)
+ return 0;
+ if (Ptr > GuardedPagePoolEnd - PageSize)
+ return MaxSimultaneousAllocations - 1;
+
+ if (!isGuardPage(Ptr))
+ return addrToSlot(Ptr);
+
+ if (Ptr % PageSize <= PageSize / 2)
+ return addrToSlot(Ptr - PageSize); // Round down.
+ return addrToSlot(Ptr + PageSize); // Round up.
+}
+
+Error GuardedPoolAllocator::diagnoseUnknownError(uintptr_t AccessPtr,
+ AllocationMetadata **Meta) {
+ // Let's try and figure out what the source of this error is.
+ if (isGuardPage(AccessPtr)) {
+ size_t Slot = getNearestSlot(AccessPtr);
+ AllocationMetadata *SlotMeta = addrToMetadata(slotToAddr(Slot));
+
+ // Ensure that this slot was allocated once upon a time.
+ if (!SlotMeta->Addr)
+ return Error::UNKNOWN;
+ *Meta = SlotMeta;
+
+ if (SlotMeta->Addr < AccessPtr)
+ return Error::BUFFER_OVERFLOW;
+ return Error::BUFFER_UNDERFLOW;
+ }
+
+ // Access wasn't a guard page, check for use-after-free.
+ AllocationMetadata *SlotMeta = addrToMetadata(AccessPtr);
+ if (SlotMeta->IsDeallocated) {
+ *Meta = SlotMeta;
+ return Error::USE_AFTER_FREE;
+ }
+
+ // If we have reached here, the error is still unknown. There is no metadata
+ // available.
+ *Meta = nullptr;
+ return Error::UNKNOWN;
+}
+
+namespace {
+// Prints the provided error and metadata information.
+void printErrorType(Error E, uintptr_t AccessPtr, AllocationMetadata *Meta,
+ options::Printf_t Printf, uint64_t ThreadID) {
+ // Print using intermediate strings. Platforms like Android don't like when
+ // you print multiple times to the same line, as there may be a newline
+ // appended to a log file automatically per Printf() call.
+ const char *ErrorString;
+ switch (E) {
+ case Error::UNKNOWN:
+ ErrorString = "GWP-ASan couldn't automatically determine the source of "
+ "the memory error. It was likely caused by a wild memory "
+ "access into the GWP-ASan pool. The error occured";
+ break;
+ case Error::USE_AFTER_FREE:
+ ErrorString = "Use after free";
+ break;
+ case Error::DOUBLE_FREE:
+ ErrorString = "Double free";
+ break;
+ case Error::INVALID_FREE:
+ ErrorString = "Invalid (wild) free";
+ break;
+ case Error::BUFFER_OVERFLOW:
+ ErrorString = "Buffer overflow";
+ break;
+ case Error::BUFFER_UNDERFLOW:
+ ErrorString = "Buffer underflow";
+ break;
+ }
+
+ constexpr size_t kDescriptionBufferLen = 128;
+ char DescriptionBuffer[kDescriptionBufferLen];
+ if (Meta) {
+ if (E == Error::USE_AFTER_FREE) {
+ snprintf(DescriptionBuffer, kDescriptionBufferLen,
+ "(%zu byte%s into a %zu-byte allocation at 0x%zx)",
+ AccessPtr - Meta->Addr, (AccessPtr - Meta->Addr == 1) ? "" : "s",
+ Meta->Size, Meta->Addr);
+ } else if (AccessPtr < Meta->Addr) {
+ snprintf(DescriptionBuffer, kDescriptionBufferLen,
+ "(%zu byte%s to the left of a %zu-byte allocation at 0x%zx)",
+ Meta->Addr - AccessPtr, (Meta->Addr - AccessPtr == 1) ? "" : "s",
+ Meta->Size, Meta->Addr);
+ } else if (AccessPtr > Meta->Addr) {
+ snprintf(DescriptionBuffer, kDescriptionBufferLen,
+ "(%zu byte%s to the right of a %zu-byte allocation at 0x%zx)",
+ AccessPtr - Meta->Addr, (AccessPtr - Meta->Addr == 1) ? "" : "s",
+ Meta->Size, Meta->Addr);
+ } else {
+ snprintf(DescriptionBuffer, kDescriptionBufferLen,
+ "(a %zu-byte allocation)", Meta->Size);
+ }
+ }
+
+ // Possible number of digits of a 64-bit number: ceil(log10(2^64)) == 20. Add
+ // a null terminator, and round to the nearest 8-byte boundary.
+ constexpr size_t kThreadBufferLen = 24;
+ char ThreadBuffer[kThreadBufferLen];
+ if (ThreadID == GuardedPoolAllocator::kInvalidThreadID)
+ snprintf(ThreadBuffer, kThreadBufferLen, "<unknown>");
+ else
+ snprintf(ThreadBuffer, kThreadBufferLen, "%" PRIu64, ThreadID);
+
+ Printf("%s at 0x%zx %s by thread %s here:\n", ErrorString, AccessPtr,
+ DescriptionBuffer, ThreadBuffer);
+}
+
+void printAllocDeallocTraces(uintptr_t AccessPtr, AllocationMetadata *Meta,
+ options::Printf_t Printf,
+ options::PrintBacktrace_t PrintBacktrace) {
+ assert(Meta != nullptr && "Metadata is non-null for printAllocDeallocTraces");
+
+ if (Meta->IsDeallocated) {
+ if (Meta->DeallocationTrace.ThreadID ==
+ GuardedPoolAllocator::kInvalidThreadID)
+ Printf("0x%zx was deallocated by thread <unknown> here:\n", AccessPtr);
+ else
+ Printf("0x%zx was deallocated by thread %zu here:\n", AccessPtr,
+ Meta->DeallocationTrace.ThreadID);
+
+ PrintBacktrace(Meta->DeallocationTrace.Trace, Printf);
+ }
+
+ if (Meta->AllocationTrace.ThreadID == GuardedPoolAllocator::kInvalidThreadID)
+ Printf("0x%zx was allocated by thread <unknown> here:\n", Meta->Addr);
+ else
+ Printf("0x%zx was allocated by thread %zu here:\n", Meta->Addr,
+ Meta->AllocationTrace.ThreadID);
+
+ PrintBacktrace(Meta->AllocationTrace.Trace, Printf);
+}
+
+struct ScopedEndOfReportDecorator {
+ ScopedEndOfReportDecorator(options::Printf_t Printf) : Printf(Printf) {}
+ ~ScopedEndOfReportDecorator() { Printf("*** End GWP-ASan report ***\n"); }
+ options::Printf_t Printf;
+};
+} // anonymous namespace
+
+void GuardedPoolAllocator::reportErrorInternal(uintptr_t AccessPtr, Error E) {
+ if (!pointerIsMine(reinterpret_cast<void *>(AccessPtr))) {
+ return;
+ }
+
+ // Attempt to prevent races to re-use the same slot that triggered this error.
+ // This does not guarantee that there are no races, because another thread can
+ // take the locks during the time that the signal handler is being called.
+ PoolMutex.tryLock();
+ ThreadLocals.RecursiveGuard = true;
+
+ Printf("*** GWP-ASan detected a memory error ***\n");
+ ScopedEndOfReportDecorator Decorator(Printf);
+
+ AllocationMetadata *Meta = nullptr;
+
+ if (E == Error::UNKNOWN) {
+ E = diagnoseUnknownError(AccessPtr, &Meta);
+ } else {
+ size_t Slot = getNearestSlot(AccessPtr);
+ Meta = addrToMetadata(slotToAddr(Slot));
+ // Ensure that this slot has been previously allocated.
+ if (!Meta->Addr)
+ Meta = nullptr;
+ }
+
+ // Print the error information.
+ uint64_t ThreadID = getThreadID();
+ printErrorType(E, AccessPtr, Meta, Printf, ThreadID);
+ if (Backtrace) {
+ static constexpr unsigned kMaximumStackFramesForCrashTrace = 128;
+ uintptr_t Trace[kMaximumStackFramesForCrashTrace];
+ Backtrace(Trace, kMaximumStackFramesForCrashTrace);
+
+ PrintBacktrace(Trace, Printf);
+ } else {
+ Printf(" <unknown (does your allocator support backtracing?)>\n\n");
+ }
+
+ if (Meta)
+ printAllocDeallocTraces(AccessPtr, Meta, Printf, PrintBacktrace);
+}
+
+TLS_INITIAL_EXEC
+GuardedPoolAllocator::ThreadLocalPackedVariables
+ GuardedPoolAllocator::ThreadLocals;
+} // namespace gwp_asan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/guarded_pool_allocator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/guarded_pool_allocator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/guarded_pool_allocator.h (revision 351984)
@@ -0,0 +1,265 @@
+//===-- guarded_pool_allocator.h --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_
+#define GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_
+
+#include "gwp_asan/definitions.h"
+#include "gwp_asan/mutex.h"
+#include "gwp_asan/options.h"
+#include "gwp_asan/random.h"
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace gwp_asan {
+// This class is the primary implementation of the allocator portion of GWP-
+// ASan. It is the sole owner of the pool of sequentially allocated guarded
+// slots. It should always be treated as a singleton.
+
+// Functions in the public interface of this class are thread-compatible until
+// init() is called, at which point they become thread-safe (unless specified
+// otherwise).
+class GuardedPoolAllocator {
+public:
+ static constexpr uint64_t kInvalidThreadID = UINT64_MAX;
+
+ enum class Error {
+ UNKNOWN,
+ USE_AFTER_FREE,
+ DOUBLE_FREE,
+ INVALID_FREE,
+ BUFFER_OVERFLOW,
+ BUFFER_UNDERFLOW
+ };
+
+ struct AllocationMetadata {
+ // Maximum number of stack trace frames to collect for allocations + frees.
+ // TODO(hctim): Implement stack frame compression, a-la Chromium.
+ static constexpr size_t kMaximumStackFrames = 64;
+
+ // Records the given allocation metadata into this struct.
+ void RecordAllocation(uintptr_t Addr, size_t Size,
+ options::Backtrace_t Backtrace);
+
+ // Record that this allocation is now deallocated.
+ void RecordDeallocation(options::Backtrace_t Backtrace);
+
+ struct CallSiteInfo {
+ // The backtrace to the allocation/deallocation. If the first value is
+ // zero, we did not collect a trace.
+ uintptr_t Trace[kMaximumStackFrames] = {};
+ // The thread ID for this trace, or kInvalidThreadID if not available.
+ uint64_t ThreadID = kInvalidThreadID;
+ };
+
+ // The address of this allocation.
+ uintptr_t Addr = 0;
+ // Represents the actual size of the allocation.
+ size_t Size = 0;
+
+ CallSiteInfo AllocationTrace;
+ CallSiteInfo DeallocationTrace;
+
+ // Whether this allocation has been deallocated yet.
+ bool IsDeallocated = false;
+ };
+
+ // During program startup, we must ensure that memory allocations do not land
+ // in this allocation pool if the allocator decides to runtime-disable
+ // GWP-ASan. The constructor value-initialises the class such that if no
+ // further initialisation takes place, calls to shouldSample() and
+ // pointerIsMine() will return false.
+ constexpr GuardedPoolAllocator(){};
+ GuardedPoolAllocator(const GuardedPoolAllocator &) = delete;
+ GuardedPoolAllocator &operator=(const GuardedPoolAllocator &) = delete;
+
+ // Note: This class is expected to be a singleton for the lifetime of the
+ // program. If this object is initialised, it will leak the guarded page pool
+ // and metadata allocations during destruction. We can't clean up these areas
+ // as this may cause a use-after-free on shutdown.
+ ~GuardedPoolAllocator() = default;
+
+ // Initialise the rest of the members of this class. Create the allocation
+ // pool using the provided options. See options.inc for runtime configuration
+ // options.
+ void init(const options::Options &Opts);
+
+ // Return whether the allocation should be randomly chosen for sampling.
+ ALWAYS_INLINE bool shouldSample() {
+ // NextSampleCounter == 0 means we "should regenerate the counter".
+ // == 1 means we "should sample this allocation".
+ if (UNLIKELY(ThreadLocals.NextSampleCounter == 0))
+ ThreadLocals.NextSampleCounter =
+ (getRandomUnsigned32() % AdjustedSampleRate) + 1;
+
+ return UNLIKELY(--ThreadLocals.NextSampleCounter == 0);
+ }
+
+ // Returns whether the provided pointer is a current sampled allocation that
+ // is owned by this pool.
+ ALWAYS_INLINE bool pointerIsMine(const void *Ptr) const {
+ uintptr_t P = reinterpret_cast<uintptr_t>(Ptr);
+ return GuardedPagePool <= P && P < GuardedPagePoolEnd;
+ }
+
+ // Allocate memory in a guarded slot, and return a pointer to the new
+ // allocation. Returns nullptr if the pool is empty, the requested size is too
+ // large for this pool to handle, or the requested size is zero.
+ void *allocate(size_t Size);
+
+ // Deallocate memory in a guarded slot. The provided pointer must have been
+ // allocated using this pool. This will set the guarded slot as inaccessible.
+ void deallocate(void *Ptr);
+
+ // Returns the size of the allocation at Ptr.
+ size_t getSize(const void *Ptr);
+
+ // Returns the largest allocation that is supported by this pool. Any
+ // allocations larger than this should go to the regular system allocator.
+ size_t maximumAllocationSize() const;
+
+ // Dumps an error report (including allocation and deallocation stack traces).
+ // An optional error may be provided if the caller knows what the error is
+ // ahead of time. This is primarily a helper function to locate the static
+ // singleton pointer and call the internal version of this function. This
+ // method is never thread safe, and should only be called when fatal errors
+ // occur.
+ static void reportError(uintptr_t AccessPtr, Error E = Error::UNKNOWN);
+
+ // Get the current thread ID, or kInvalidThreadID if failure. Note: This
+ // implementation is platform-specific.
+ static uint64_t getThreadID();
+
+private:
+ static constexpr size_t kInvalidSlotID = SIZE_MAX;
+
+ // These functions anonymously map memory or change the permissions of mapped
+ // memory into this process in a platform-specific way. Pointer and size
+ // arguments are expected to be page-aligned. These functions will never
+ // return on error, instead electing to kill the calling process on failure.
+ // Note that memory is initially mapped inaccessible. In order for RW
+ // mappings, call mapMemory() followed by markReadWrite() on the returned
+ // pointer.
+ void *mapMemory(size_t Size) const;
+ void markReadWrite(void *Ptr, size_t Size) const;
+ void markInaccessible(void *Ptr, size_t Size) const;
+
+ // Get the page size from the platform-specific implementation. Only needs to
+ // be called once, and the result should be cached in PageSize in this class.
+ static size_t getPlatformPageSize();
+
+ // Install the SIGSEGV crash handler for printing use-after-free and heap-
+ // buffer-{under|over}flow exceptions. This is platform specific as even
+ // though POSIX and Windows both support registering handlers through
+ // signal(), we have to use platform-specific signal handlers to obtain the
+ // address that caused the SIGSEGV exception.
+ static void installSignalHandlers();
+
+ // Returns the index of the slot that this pointer resides in. If the pointer
+ // is not owned by this pool, the result is undefined.
+ size_t addrToSlot(uintptr_t Ptr) const;
+
+ // Returns the address of the N-th guarded slot.
+ uintptr_t slotToAddr(size_t N) const;
+
+ // Returns a pointer to the metadata for the owned pointer. If the pointer is
+ // not owned by this pool, the result is undefined.
+ AllocationMetadata *addrToMetadata(uintptr_t Ptr) const;
+
+ // Returns the address of the page that this pointer resides in.
+ uintptr_t getPageAddr(uintptr_t Ptr) const;
+
+ // Gets the nearest slot to the provided address.
+ size_t getNearestSlot(uintptr_t Ptr) const;
+
+ // Returns whether the provided pointer is a guard page or not. The pointer
+ // must be within memory owned by this pool, else the result is undefined.
+ bool isGuardPage(uintptr_t Ptr) const;
+
+ // Reserve a slot for a new guarded allocation. Returns kInvalidSlotID if no
+ // slot is available to be reserved.
+ size_t reserveSlot();
+
+ // Unreserve the guarded slot.
+ void freeSlot(size_t SlotIndex);
+
+ // Returns the offset (in bytes) between the start of a guarded slot and where
+ // the start of the allocation should take place. Determined using the size of
+ // the allocation and the options provided at init-time.
+ uintptr_t allocationSlotOffset(size_t AllocationSize) const;
+
+ // Returns the diagnosis for an unknown error. If the diagnosis is not
+ // Error::INVALID_FREE or Error::UNKNOWN, the metadata for the slot
+ // responsible for the error is placed in *Meta.
+ Error diagnoseUnknownError(uintptr_t AccessPtr, AllocationMetadata **Meta);
+
+ void reportErrorInternal(uintptr_t AccessPtr, Error E);
+
+ // Cached page size for this system in bytes.
+ size_t PageSize = 0;
+
+ // A mutex to protect the guarded slot and metadata pool for this class.
+ Mutex PoolMutex;
+ // The number of guarded slots that this pool holds.
+ size_t MaxSimultaneousAllocations = 0;
+ // Record the number allocations that we've sampled. We store this amount so
+ // that we don't randomly choose to recycle a slot that previously had an
+ // allocation before all the slots have been utilised.
+ size_t NumSampledAllocations = 0;
+ // Pointer to the pool of guarded slots. Note that this points to the start of
+ // the pool (which is a guard page), not a pointer to the first guarded page.
+ uintptr_t GuardedPagePool = UINTPTR_MAX;
+ uintptr_t GuardedPagePoolEnd = 0;
+ // Pointer to the allocation metadata (allocation/deallocation stack traces),
+ // if any.
+ AllocationMetadata *Metadata = nullptr;
+
+ // Pointer to an array of free slot indexes.
+ size_t *FreeSlots = nullptr;
+ // The current length of the list of free slots.
+ size_t FreeSlotsLength = 0;
+
+ // See options.{h, inc} for more information.
+ bool PerfectlyRightAlign = false;
+
+ // Printf function supplied by the implementing allocator. We can't (in
+ // general) use printf() from the cstdlib as it may malloc(), causing infinite
+ // recursion.
+ options::Printf_t Printf = nullptr;
+ options::Backtrace_t Backtrace = nullptr;
+ options::PrintBacktrace_t PrintBacktrace = nullptr;
+
+ // The adjusted sample rate for allocation sampling. Default *must* be
+ // nonzero, as dynamic initialisation may call malloc (e.g. from libstdc++)
+ // before GPA::init() is called. This would cause an error in shouldSample(),
+ // where we would calculate modulo zero. This value is set UINT32_MAX, as when
+ // GWP-ASan is disabled, we wish to never spend wasted cycles recalculating
+ // the sample rate.
+ uint32_t AdjustedSampleRate = UINT32_MAX;
+
+ // Pack the thread local variables into a struct to ensure that they're in
+ // the same cache line for performance reasons. These are the most touched
+ // variables in GWP-ASan.
+ struct alignas(8) ThreadLocalPackedVariables {
+ constexpr ThreadLocalPackedVariables() {}
+ // Thread-local decrementing counter that indicates that a given allocation
+ // should be sampled when it reaches zero.
+ uint32_t NextSampleCounter = 0;
+ // Guard against recursivity. Unwinders often contain complex behaviour that
+ // may not be safe for the allocator (i.e. the unwinder calls dlopen(),
+ // which calls malloc()). When recursive behaviour is detected, we will
+ // automatically fall back to the supporting allocator to supply the
+ // allocation.
+ bool RecursiveGuard = false;
+ };
+ static TLS_INITIAL_EXEC ThreadLocalPackedVariables ThreadLocals;
+};
+} // namespace gwp_asan
+
+#endif // GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/mutex.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/mutex.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/mutex.h (revision 351984)
@@ -0,0 +1,50 @@
+//===-- mutex.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_MUTEX_H_
+#define GWP_ASAN_MUTEX_H_
+
+#ifdef __unix__
+#include <pthread.h>
+#else
+#error "GWP-ASan is not supported on this platform."
+#endif
+
+namespace gwp_asan {
+class Mutex {
+public:
+ constexpr Mutex() = default;
+ ~Mutex() = default;
+ Mutex(const Mutex &) = delete;
+ Mutex &operator=(const Mutex &) = delete;
+ // Lock the mutex.
+ void lock();
+ // Nonblocking trylock of the mutex. Returns true if the lock was acquired.
+ bool tryLock();
+ // Unlock the mutex.
+ void unlock();
+
+private:
+#ifdef __unix__
+ pthread_mutex_t Mu = PTHREAD_MUTEX_INITIALIZER;
+#endif // defined(__unix__)
+};
+
+class ScopedLock {
+public:
+ explicit ScopedLock(Mutex &Mx) : Mu(Mx) { Mu.lock(); }
+ ~ScopedLock() { Mu.unlock(); }
+ ScopedLock(const ScopedLock &) = delete;
+ ScopedLock &operator=(const ScopedLock &) = delete;
+
+private:
+ Mutex &Mu;
+};
+} // namespace gwp_asan
+
+#endif // GWP_ASAN_MUTEX_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/backtrace.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/backtrace.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/backtrace.h (revision 351984)
@@ -0,0 +1,23 @@
+//===-- backtrace.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_OPTIONAL_BACKTRACE_H_
+#define GWP_ASAN_OPTIONAL_BACKTRACE_H_
+
+#include "gwp_asan/options.h"
+
+namespace gwp_asan {
+namespace options {
+// Functions to get the platform-specific and implementation-specific backtrace
+// and backtrace printing functions.
+Backtrace_t getBacktraceFunction();
+PrintBacktrace_t getPrintBacktraceFunction();
+} // namespace options
+} // namespace gwp_asan
+
+#endif // GWP_ASAN_OPTIONAL_BACKTRACE_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/backtrace_linux_libc.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/backtrace_linux_libc.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/backtrace_linux_libc.cpp (revision 351984)
@@ -0,0 +1,64 @@
+//===-- backtrace_linux_libc.cpp --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <assert.h>
+#include <execinfo.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "gwp_asan/optional/backtrace.h"
+#include "gwp_asan/options.h"
+
+namespace {
+void Backtrace(uintptr_t *TraceBuffer, size_t Size) {
+ // Grab (what seems to be) one more trace than we need. TraceBuffer needs to
+ // be null-terminated, but we wish to remove the frame of this function call.
+ static_assert(sizeof(uintptr_t) == sizeof(void *), "uintptr_t is not void*");
+ int NumTraces =
+ backtrace(reinterpret_cast<void **>(TraceBuffer), Size);
+
+ // Now shift the entire trace one place to the left and null-terminate.
+ memmove(TraceBuffer, TraceBuffer + 1, NumTraces * sizeof(void *));
+ TraceBuffer[NumTraces - 1] = 0;
+}
+
+static void PrintBacktrace(uintptr_t *Trace,
+ gwp_asan::options::Printf_t Printf) {
+ size_t NumTraces = 0;
+ for (; Trace[NumTraces] != 0; ++NumTraces) {
+ }
+
+ if (NumTraces == 0) {
+ Printf(" <not found (does your allocator support backtracing?)>\n\n");
+ return;
+ }
+
+ char **BacktraceSymbols =
+ backtrace_symbols(reinterpret_cast<void **>(Trace), NumTraces);
+
+ for (size_t i = 0; i < NumTraces; ++i) {
+ if (!BacktraceSymbols)
+ Printf(" #%zu %p\n", i, Trace[i]);
+ else
+ Printf(" #%zu %s\n", i, BacktraceSymbols[i]);
+ }
+
+ Printf("\n");
+ if (BacktraceSymbols)
+ free(BacktraceSymbols);
+}
+} // anonymous namespace
+
+namespace gwp_asan {
+namespace options {
+Backtrace_t getBacktraceFunction() { return Backtrace; }
+PrintBacktrace_t getPrintBacktraceFunction() { return PrintBacktrace; }
+} // namespace options
+} // namespace gwp_asan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/backtrace_sanitizer_common.cpp (revision 351984)
@@ -0,0 +1,69 @@
+//===-- backtrace_sanitizer_common.cpp --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+
+#include "gwp_asan/optional/backtrace.h"
+#include "gwp_asan/options.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(uptr pc, uptr bp,
+ void *context,
+ bool request_fast,
+ u32 max_depth) {
+ if (!StackTrace::WillUseFastUnwind(request_fast)) {
+ return Unwind(max_depth, pc, bp, context, 0, 0, request_fast);
+ }
+ Unwind(max_depth, pc, 0, context, 0, 0, false);
+}
+
+namespace {
+void Backtrace(uintptr_t *TraceBuffer, size_t Size) {
+ __sanitizer::BufferedStackTrace Trace;
+ Trace.Reset();
+ if (Size > __sanitizer::kStackTraceMax)
+ Size = __sanitizer::kStackTraceMax;
+
+ Trace.Unwind((__sanitizer::uptr)__builtin_return_address(0),
+ (__sanitizer::uptr)__builtin_frame_address(0),
+ /* ucontext */ nullptr,
+ /* fast unwind */ true, Size - 1);
+
+ memcpy(TraceBuffer, Trace.trace, Trace.size * sizeof(uintptr_t));
+ TraceBuffer[Trace.size] = 0;
+}
+
+static void PrintBacktrace(uintptr_t *Trace,
+ gwp_asan::options::Printf_t Printf) {
+ __sanitizer::StackTrace StackTrace;
+ StackTrace.trace = reinterpret_cast<__sanitizer::uptr *>(Trace);
+
+ for (StackTrace.size = 0; StackTrace.size < __sanitizer::kStackTraceMax;
+ ++StackTrace.size) {
+ if (Trace[StackTrace.size] == 0)
+ break;
+ }
+
+ if (StackTrace.size == 0) {
+ Printf(" <unknown (does your allocator support backtracing?)>\n\n");
+ return;
+ }
+
+ StackTrace.Print();
+}
+} // anonymous namespace
+
+namespace gwp_asan {
+namespace options {
+Backtrace_t getBacktraceFunction() { return Backtrace; }
+PrintBacktrace_t getPrintBacktraceFunction() { return PrintBacktrace; }
+} // namespace options
+} // namespace gwp_asan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/options_parser.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/options_parser.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/options_parser.cpp (revision 351984)
@@ -0,0 +1,93 @@
+//===-- options_parser.cpp --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/optional/options_parser.h"
+
+#include <stdarg.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "gwp_asan/options.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace gwp_asan {
+namespace options {
+namespace {
+void registerGwpAsanFlags(__sanitizer::FlagParser *parser, Options *o) {
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &o->Name);
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+}
+
+const char *getCompileDefinitionGwpAsanDefaultOptions() {
+#ifdef GWP_ASAN_DEFAULT_OPTIONS
+ return SANITIZER_STRINGIFY(GWP_ASAN_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+const char *getGwpAsanDefaultOptions() {
+ return (__gwp_asan_default_options) ? __gwp_asan_default_options() : "";
+}
+
+Options *getOptionsInternal() {
+ static Options GwpAsanFlags;
+ return &GwpAsanFlags;
+}
+} // anonymous namespace
+
+void initOptions() {
+ __sanitizer::SetCommonFlagsDefaults();
+
+ Options *o = getOptionsInternal();
+ o->setDefaults();
+
+ __sanitizer::FlagParser Parser;
+ registerGwpAsanFlags(&Parser, o);
+
+ // Override from compile definition.
+ Parser.ParseString(getCompileDefinitionGwpAsanDefaultOptions());
+
+ // Override from user-specified string.
+ Parser.ParseString(getGwpAsanDefaultOptions());
+
+ // Override from environment.
+ Parser.ParseString(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"));
+
+ __sanitizer::InitializeCommonFlags();
+ if (__sanitizer::Verbosity())
+ __sanitizer::ReportUnrecognizedFlags();
+
+ if (!o->Enabled)
+ return;
+
+ // Sanity checks for the parameters.
+ if (o->MaxSimultaneousAllocations <= 0) {
+ __sanitizer::Printf("GWP-ASan ERROR: MaxSimultaneousAllocations must be > "
+ "0 when GWP-ASan is enabled.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ if (o->SampleRate < 1) {
+ __sanitizer::Printf(
+ "GWP-ASan ERROR: SampleRate must be > 0 when GWP-ASan is enabled.\n");
+ exit(EXIT_FAILURE);
+ }
+
+ o->Printf = __sanitizer::Printf;
+}
+
+Options &getOptions() { return *getOptionsInternal(); }
+
+} // namespace options
+} // namespace gwp_asan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/options_parser.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/options_parser.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/optional/options_parser.h (revision 351984)
@@ -0,0 +1,31 @@
+//===-- options_parser.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_OPTIONAL_OPTIONS_PARSER_H_
+#define GWP_ASAN_OPTIONAL_OPTIONS_PARSER_H_
+
+#include "gwp_asan/optional/backtrace.h"
+#include "gwp_asan/options.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace gwp_asan {
+namespace options {
+// Parse the options from the GWP_ASAN_FLAGS environment variable.
+void initOptions();
+// Returns the initialised options. Call initOptions() prior to calling this
+// function.
+Options &getOptions();
+} // namespace options
+} // namespace gwp_asan
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE const char *
+__gwp_asan_default_options();
+}
+
+#endif // GWP_ASAN_OPTIONAL_OPTIONS_PARSER_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/options.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/options.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/options.h (revision 351984)
@@ -0,0 +1,59 @@
+//===-- options.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_OPTIONS_H_
+#define GWP_ASAN_OPTIONS_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+namespace gwp_asan {
+namespace options {
+// The function pointer type for printf(). Follows the standard format from the
+// sanitizers library. If the supported allocator exposes printing via a
+// different function signature, please provide a wrapper which has this
+// printf() signature, and pass the wrapper instead.
+typedef void (*Printf_t)(const char *Format, ...);
+
+// The function pointer type for backtrace information. Required to be
+// implemented by the supporting allocator. The callee should elide itself and
+// all frames below itself from TraceBuffer, i.e. the caller's frame should be
+// in TraceBuffer[0], and subsequent frames 1..n into TraceBuffer[1..n], where a
+// maximum of `MaximumDepth - 1` frames are stored. TraceBuffer should be
+// nullptr-terminated (i.e. if there are 5 frames; TraceBuffer[5] == nullptr).
+// If the allocator cannot supply backtrace information, it should set
+// TraceBuffer[0] == nullptr.
+typedef void (*Backtrace_t)(uintptr_t *TraceBuffer, size_t Size);
+typedef void (*PrintBacktrace_t)(uintptr_t *TraceBuffer, Printf_t Print);
+
+struct Options {
+ Printf_t Printf = nullptr;
+ Backtrace_t Backtrace = nullptr;
+ PrintBacktrace_t PrintBacktrace = nullptr;
+
+ // Read the options from the included definitions file.
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ Type Name = DefaultValue;
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+
+ void setDefaults() {
+#define GWP_ASAN_OPTION(Type, Name, DefaultValue, Description) \
+ Name = DefaultValue;
+#include "gwp_asan/options.inc"
+#undef GWP_ASAN_OPTION
+
+ Printf = nullptr;
+ Backtrace = nullptr;
+ PrintBacktrace = nullptr;
+ }
+};
+} // namespace options
+} // namespace gwp_asan
+
+#endif // GWP_ASAN_OPTIONS_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/options.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/options.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/options.inc (revision 351984)
@@ -0,0 +1,41 @@
+//===-- options.inc ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_OPTION
+#error "Define GWP_ASAN_OPTION prior to including this file!"
+#endif
+
+GWP_ASAN_OPTION(bool, Enabled, true, "Is GWP-ASan enabled? Defaults to true.")
+
+GWP_ASAN_OPTION(
+ bool, PerfectlyRightAlign, false,
+ "When allocations are right-aligned, should we perfectly align them up to "
+ "the page boundary? By default (false), we round up allocation size to the "
+ "nearest power of two (1, 2, 4, 8, 16) up to a maximum of 16-byte "
+ "alignment for performance reasons. Setting this to true can find single "
+ "byte buffer-overflows for multibyte allocations at the cost of "
+ "performance, and may be incompatible with some architectures.")
+
+GWP_ASAN_OPTION(
+ int, MaxSimultaneousAllocations, 16,
+ "Number of usable guarded slots in the allocation pool. Defaults to 16.")
+
+GWP_ASAN_OPTION(int, SampleRate, 5000,
+ "The probability (1 / SampleRate) that an allocation is "
+ "selected for GWP-ASan sampling. Default is 5000. Sample rates "
+ "up to (2^31 - 1) are supported.")
+
+GWP_ASAN_OPTION(
+ bool, InstallSignalHandlers, true,
+ "Install GWP-ASan signal handlers for SIGSEGV during dynamic loading. This "
+ "allows better error reports by providing stack traces for allocation and "
+ "deallocation when reporting a memory error. GWP-ASan's signal handler "
+ "will forward the signal to any previously-installed handler, and user "
+ "programs that install further signal handlers should make sure they do "
+ "the same. Note, if the previously installed SIGSEGV handler is SIG_IGN, "
+ "we terminate the process after dumping the error report.")
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp (revision 351984)
@@ -0,0 +1,96 @@
+//===-- guarded_pool_allocator_posix.cpp ------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/guarded_pool_allocator.h"
+
+#include <stdlib.h>
+#include <errno.h>
+#include <signal.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+namespace gwp_asan {
+
+void *GuardedPoolAllocator::mapMemory(size_t Size) const {
+ void *Ptr =
+ mmap(nullptr, Size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
+
+ if (Ptr == MAP_FAILED) {
+ Printf("Failed to map guarded pool allocator memory, errno: %d\n", errno);
+ Printf(" mmap(nullptr, %zu, ...) failed.\n", Size);
+ exit(EXIT_FAILURE);
+ }
+ return Ptr;
+}
+
+void GuardedPoolAllocator::markReadWrite(void *Ptr, size_t Size) const {
+ if (mprotect(Ptr, Size, PROT_READ | PROT_WRITE) != 0) {
+ Printf("Failed to set guarded pool allocator memory at as RW, errno: %d\n",
+ errno);
+ Printf(" mprotect(%p, %zu, RW) failed.\n", Ptr, Size);
+ exit(EXIT_FAILURE);
+ }
+}
+
+void GuardedPoolAllocator::markInaccessible(void *Ptr, size_t Size) const {
+ // mmap() a PROT_NONE page over the address to release it to the system, if
+ // we used mprotect() here the system would count pages in the quarantine
+ // against the RSS.
+ if (mmap(Ptr, Size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1,
+ 0) == MAP_FAILED) {
+ Printf("Failed to set guarded pool allocator memory as inaccessible, "
+ "errno: %d\n",
+ errno);
+ Printf(" mmap(%p, %zu, NONE, ...) failed.\n", Ptr, Size);
+ exit(EXIT_FAILURE);
+ }
+}
+
+size_t GuardedPoolAllocator::getPlatformPageSize() {
+ return sysconf(_SC_PAGESIZE);
+}
+
+struct sigaction PreviousHandler;
+
+static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) {
+ gwp_asan::GuardedPoolAllocator::reportError(
+ reinterpret_cast<uintptr_t>(info->si_addr));
+
+ // Process any previous handlers.
+ if (PreviousHandler.sa_flags & SA_SIGINFO) {
+ PreviousHandler.sa_sigaction(sig, info, ucontext);
+ } else if (PreviousHandler.sa_handler == SIG_IGN ||
+ PreviousHandler.sa_handler == SIG_DFL) {
+ // If the previous handler was the default handler, or was ignoring this
+ // signal, install the default handler and re-raise the signal in order to
+ // get a core dump and terminate this process.
+ signal(SIGSEGV, SIG_DFL);
+ raise(SIGSEGV);
+ } else {
+ PreviousHandler.sa_handler(sig);
+ }
+}
+
+void GuardedPoolAllocator::installSignalHandlers() {
+ struct sigaction Action;
+ Action.sa_sigaction = sigSegvHandler;
+ Action.sa_flags = SA_SIGINFO;
+ sigaction(SIGSEGV, &Action, &PreviousHandler);
+}
+
+uint64_t GuardedPoolAllocator::getThreadID() {
+#ifdef SYS_gettid
+ return syscall(SYS_gettid);
+#else
+ return kInvalidThreadID;
+#endif
+}
+
+} // namespace gwp_asan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/platform_specific/mutex_posix.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/platform_specific/mutex_posix.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/platform_specific/mutex_posix.cpp (revision 351984)
@@ -0,0 +1,30 @@
+//===-- mutex_posix.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/mutex.h"
+
+#include <assert.h>
+#include <pthread.h>
+
+namespace gwp_asan {
+void Mutex::lock() {
+ int Status = pthread_mutex_lock(&Mu);
+ assert(Status == 0);
+ // Remove warning for non-debug builds.
+ (void)Status;
+}
+
+bool Mutex::tryLock() { return pthread_mutex_trylock(&Mu) == 0; }
+
+void Mutex::unlock() {
+ int Status = pthread_mutex_unlock(&Mu);
+ assert(Status == 0);
+ // Remove warning for non-debug builds.
+ (void)Status;
+}
+} // namespace gwp_asan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/random.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/random.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/random.cpp (revision 351984)
@@ -0,0 +1,23 @@
+//===-- random.cpp ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "gwp_asan/random.h"
+#include "gwp_asan/guarded_pool_allocator.h"
+
+#include <time.h>
+
+namespace gwp_asan {
+uint32_t getRandomUnsigned32() {
+ thread_local uint32_t RandomState =
+ time(nullptr) + GuardedPoolAllocator::getThreadID();
+ RandomState ^= RandomState << 13;
+ RandomState ^= RandomState >> 17;
+ RandomState ^= RandomState << 5;
+ return RandomState;
+}
+} // namespace gwp_asan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/random.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/random.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/gwp_asan/random.h (revision 351984)
@@ -0,0 +1,20 @@
+//===-- random.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef GWP_ASAN_RANDOM_H_
+#define GWP_ASAN_RANDOM_H_
+
+#include <stdint.h>
+
+namespace gwp_asan {
+// xorshift (32-bit output), extremely fast PRNG that uses arithmetic operations
+// only. Seeded using walltime.
+uint32_t getRandomUnsigned32();
+} // namespace gwp_asan
+
+#endif // GWP_ASAN_RANDOM_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.cpp (revision 351984)
@@ -0,0 +1,499 @@
+//===-- hwasan.cpp --------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// HWAddressSanitizer runtime.
+//===----------------------------------------------------------------------===//
+
+#include "hwasan.h"
+#include "hwasan_checks.h"
+#include "hwasan_dynamic_shadow.h"
+#include "hwasan_poisoning.h"
+#include "hwasan_report.h"
+#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "ubsan/ubsan_flags.h"
+#include "ubsan/ubsan_init.h"
+
+// ACHTUNG! No system header includes in this file.
+
+using namespace __sanitizer;
+
+namespace __hwasan {
+
+void EnterSymbolizer() {
+ Thread *t = GetCurrentThread();
+ CHECK(t);
+ t->EnterSymbolizer();
+}
+void ExitSymbolizer() {
+ Thread *t = GetCurrentThread();
+ CHECK(t);
+ t->LeaveSymbolizer();
+}
+bool IsInSymbolizer() {
+ Thread *t = GetCurrentThread();
+ return t && t->InSymbolizer();
+}
+
+static Flags hwasan_flags;
+
+Flags *flags() {
+ return &hwasan_flags;
+}
+
+int hwasan_inited = 0;
+int hwasan_instrumentation_inited = 0;
+bool hwasan_init_is_running;
+
+int hwasan_report_count = 0;
+
+void Flags::SetDefaults() {
+#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "hwasan_flags.inc"
+#undef HWASAN_FLAG
+}
+
+static void RegisterHwasanFlags(FlagParser *parser, Flags *f) {
+#define HWASAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "hwasan_flags.inc"
+#undef HWASAN_FLAG
+}
+
+static void InitializeFlags() {
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("HWASAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = 20;
+ cf.handle_ioctl = true;
+ // FIXME: test and enable.
+ cf.check_printf = false;
+ cf.intercept_tls_get_addr = true;
+ cf.exitcode = 99;
+ // 8 shadow pages ~512kB, small enough to cover common stack sizes.
+ cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8);
+ // Sigtrap is used in error reporting.
+ cf.handle_sigtrap = kHandleSignalExclusive;
+
+#if SANITIZER_ANDROID
+ // Let platform handle other signals. It is better at reporting them then we
+ // are.
+ cf.handle_segv = kHandleSignalNo;
+ cf.handle_sigbus = kHandleSignalNo;
+ cf.handle_abort = kHandleSignalNo;
+ cf.handle_sigill = kHandleSignalNo;
+ cf.handle_sigfpe = kHandleSignalNo;
+#endif
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterHwasanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+#if HWASAN_CONTAINS_UBSAN
+ __ubsan::Flags *uf = __ubsan::flags();
+ uf->SetDefaults();
+
+ FlagParser ubsan_parser;
+ __ubsan::RegisterUbsanFlags(&ubsan_parser, uf);
+ RegisterCommonFlags(&ubsan_parser);
+#endif
+
+ // Override from user-specified string.
+ if (__hwasan_default_options)
+ parser.ParseString(__hwasan_default_options());
+#if HWASAN_CONTAINS_UBSAN
+ const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions();
+ ubsan_parser.ParseString(ubsan_default_options);
+#endif
+
+ parser.ParseStringFromEnv("HWASAN_OPTIONS");
+#if HWASAN_CONTAINS_UBSAN
+ ubsan_parser.ParseStringFromEnv("UBSAN_OPTIONS");
+#endif
+
+ InitializeCommonFlags();
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+}
+
+static void HWAsanCheckFailed(const char *file, int line, const char *cond,
+ u64 v1, u64 v2) {
+ Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file,
+ line, cond, (uptr)v1, (uptr)v2);
+ PRINT_CURRENT_STACK_CHECK();
+ Die();
+}
+
+static constexpr uptr kMemoryUsageBufferSize = 4096;
+
+static void HwasanFormatMemoryUsage(InternalScopedString &s) {
+ HwasanThreadList &thread_list = hwasanThreadList();
+ auto thread_stats = thread_list.GetThreadStats();
+ auto *sds = StackDepotGetStats();
+ AllocatorStatCounters asc;
+ GetAllocatorStats(asc);
+ s.append(
+ "HWASAN pid: %d rss: %zd threads: %zd stacks: %zd"
+ " thr_aux: %zd stack_depot: %zd uniq_stacks: %zd"
+ " heap: %zd",
+ internal_getpid(), GetRSS(), thread_stats.n_live_threads,
+ thread_stats.total_stack_size,
+ thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(),
+ sds->allocated, sds->n_uniq_ids, asc[AllocatorStatMapped]);
+}
+
+#if SANITIZER_ANDROID
+static char *memory_usage_buffer = nullptr;
+
+static void InitMemoryUsage() {
+ memory_usage_buffer =
+ (char *)MmapOrDie(kMemoryUsageBufferSize, "memory usage string");
+ CHECK(memory_usage_buffer);
+ memory_usage_buffer[0] = '\0';
+ DecorateMapping((uptr)memory_usage_buffer, kMemoryUsageBufferSize,
+ memory_usage_buffer);
+}
+
+void UpdateMemoryUsage() {
+ if (!flags()->export_memory_stats)
+ return;
+ if (!memory_usage_buffer)
+ InitMemoryUsage();
+ InternalScopedString s(kMemoryUsageBufferSize);
+ HwasanFormatMemoryUsage(s);
+ internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1);
+ memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0';
+}
+#else
+void UpdateMemoryUsage() {}
+#endif
+
+// Prepare to run instrumented code on the main thread.
+void InitInstrumentation() {
+ if (hwasan_instrumentation_inited) return;
+
+ if (!InitShadow()) {
+ Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n");
+ DumpProcessMap();
+ Die();
+ }
+
+ InitThreads();
+ hwasanThreadList().CreateCurrentThread();
+
+ hwasan_instrumentation_inited = 1;
+}
+
+} // namespace __hwasan
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ using namespace __hwasan;
+ Thread *t = GetCurrentThread();
+ if (!t) {
+ // the thread is still being created.
+ size = 0;
+ return;
+ }
+ if (!StackTrace::WillUseFastUnwind(request_fast)) {
+ // Block reports from our interceptors during _Unwind_Backtrace.
+ SymbolizerScope sym_scope;
+ return Unwind(max_depth, pc, bp, context, 0, 0, request_fast);
+ }
+ if (StackTrace::WillUseFastUnwind(request_fast))
+ Unwind(max_depth, pc, bp, nullptr, t->stack_top(), t->stack_bottom(), true);
+ else
+ Unwind(max_depth, pc, 0, context, 0, 0, false);
+}
+
+// Interface.
+
+using namespace __hwasan;
+
+uptr __hwasan_shadow_memory_dynamic_address; // Global interface symbol.
+
+// This function was used by the old frame descriptor mechanism. We keep it
+// around to avoid breaking ABI.
+void __hwasan_init_frames(uptr beg, uptr end) {}
+
+void __hwasan_init_static() {
+ InitShadowGOT();
+ InitInstrumentation();
+}
+
+void __hwasan_init() {
+ CHECK(!hwasan_init_is_running);
+ if (hwasan_inited) return;
+ hwasan_init_is_running = 1;
+ SanitizerToolName = "HWAddressSanitizer";
+
+ InitTlsSize();
+
+ CacheBinaryName();
+ InitializeFlags();
+
+ // Install tool-specific callbacks in sanitizer_common.
+ SetCheckFailedCallback(HWAsanCheckFailed);
+
+ __sanitizer_set_report_path(common_flags()->log_path);
+
+ AndroidTestTlsSlot();
+
+ DisableCoreDumperIfNecessary();
+
+ InitInstrumentation();
+
+ // Needs to be called here because flags()->random_tags might not have been
+ // initialized when InitInstrumentation() was called.
+ GetCurrentThread()->InitRandomState();
+
+ MadviseShadow();
+
+ SetPrintfAndReportCallback(AppendToErrorMessageBuffer);
+ // This may call libc -> needs initialized shadow.
+ AndroidLogInit();
+
+ InitializeInterceptors();
+ InstallDeadlySignalHandlers(HwasanOnDeadlySignal);
+ InstallAtExitHandler(); // Needs __cxa_atexit interceptor.
+
+ Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer);
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ HwasanTSDInit();
+ HwasanTSDThreadInit();
+
+ HwasanAllocatorInit();
+
+#if HWASAN_CONTAINS_UBSAN
+ __ubsan::InitAsPlugin();
+#endif
+
+ VPrintf(1, "HWAddressSanitizer init done\n");
+
+ hwasan_init_is_running = 0;
+ hwasan_inited = 1;
+}
+
+void __hwasan_print_shadow(const void *p, uptr sz) {
+ uptr ptr_raw = UntagAddr(reinterpret_cast<uptr>(p));
+ uptr shadow_first = MemToShadow(ptr_raw);
+ uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
+ Printf("HWASan shadow map for %zx .. %zx (pointer tag %x)\n", ptr_raw,
+ ptr_raw + sz, GetTagFromPointer((uptr)p));
+ for (uptr s = shadow_first; s <= shadow_last; ++s)
+ Printf(" %zx: %x\n", ShadowToMem(s), *(tag_t *)s);
+}
+
+sptr __hwasan_test_shadow(const void *p, uptr sz) {
+ if (sz == 0)
+ return -1;
+ tag_t ptr_tag = GetTagFromPointer((uptr)p);
+ uptr ptr_raw = UntagAddr(reinterpret_cast<uptr>(p));
+ uptr shadow_first = MemToShadow(ptr_raw);
+ uptr shadow_last = MemToShadow(ptr_raw + sz - 1);
+ for (uptr s = shadow_first; s <= shadow_last; ++s)
+ if (*(tag_t *)s != ptr_tag) {
+ sptr offset = ShadowToMem(s) - ptr_raw;
+ return offset < 0 ? 0 : offset;
+ }
+ return -1;
+}
+
+u16 __sanitizer_unaligned_load16(const uu16 *p) {
+ return *p;
+}
+u32 __sanitizer_unaligned_load32(const uu32 *p) {
+ return *p;
+}
+u64 __sanitizer_unaligned_load64(const uu64 *p) {
+ return *p;
+}
+void __sanitizer_unaligned_store16(uu16 *p, u16 x) {
+ *p = x;
+}
+void __sanitizer_unaligned_store32(uu32 *p, u32 x) {
+ *p = x;
+}
+void __sanitizer_unaligned_store64(uu64 *p, u64 x) {
+ *p = x;
+}
+
+void __hwasan_loadN(uptr p, uptr sz) {
+ CheckAddressSized<ErrorAction::Abort, AccessType::Load>(p, sz);
+}
+void __hwasan_load1(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 0>(p);
+}
+void __hwasan_load2(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 1>(p);
+}
+void __hwasan_load4(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 2>(p);
+}
+void __hwasan_load8(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 3>(p);
+}
+void __hwasan_load16(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Load, 4>(p);
+}
+
+void __hwasan_loadN_noabort(uptr p, uptr sz) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(p, sz);
+}
+void __hwasan_load1_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 0>(p);
+}
+void __hwasan_load2_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 1>(p);
+}
+void __hwasan_load4_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 2>(p);
+}
+void __hwasan_load8_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 3>(p);
+}
+void __hwasan_load16_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Load, 4>(p);
+}
+
+void __hwasan_storeN(uptr p, uptr sz) {
+ CheckAddressSized<ErrorAction::Abort, AccessType::Store>(p, sz);
+}
+void __hwasan_store1(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 0>(p);
+}
+void __hwasan_store2(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 1>(p);
+}
+void __hwasan_store4(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 2>(p);
+}
+void __hwasan_store8(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 3>(p);
+}
+void __hwasan_store16(uptr p) {
+ CheckAddress<ErrorAction::Abort, AccessType::Store, 4>(p);
+}
+
+void __hwasan_storeN_noabort(uptr p, uptr sz) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(p, sz);
+}
+void __hwasan_store1_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 0>(p);
+}
+void __hwasan_store2_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 1>(p);
+}
+void __hwasan_store4_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 2>(p);
+}
+void __hwasan_store8_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 3>(p);
+}
+void __hwasan_store16_noabort(uptr p) {
+ CheckAddress<ErrorAction::Recover, AccessType::Store, 4>(p);
+}
+
+void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) {
+ TagMemoryAligned(p, sz, tag);
+}
+
+uptr __hwasan_tag_pointer(uptr p, u8 tag) {
+ return AddTagToPointer(p, tag);
+}
+
+void __hwasan_handle_longjmp(const void *sp_dst) {
+ uptr dst = (uptr)sp_dst;
+ // HWASan does not support tagged SP.
+ CHECK(GetTagFromPointer(dst) == 0);
+
+ uptr sp = (uptr)__builtin_frame_address(0);
+ static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M
+ if (dst < sp || dst - sp > kMaxExpectedCleanupSize) {
+ Report(
+ "WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: "
+ "stack top: %p; target %p; distance: %p (%zd)\n"
+ "False positive error reports may follow\n",
+ (void *)sp, (void *)dst, dst - sp);
+ return;
+ }
+ TagMemory(sp, dst - sp, 0);
+}
+
+void __hwasan_handle_vfork(const void *sp_dst) {
+ uptr sp = (uptr)sp_dst;
+ Thread *t = GetCurrentThread();
+ CHECK(t);
+ uptr top = t->stack_top();
+ uptr bottom = t->stack_bottom();
+ if (top == 0 || bottom == 0 || sp < bottom || sp >= top) {
+ Report(
+ "WARNING: HWASan is ignoring requested __hwasan_handle_vfork: "
+ "stack top: %zx; current %zx; bottom: %zx \n"
+ "False positive error reports may follow\n",
+ top, sp, bottom);
+ return;
+ }
+ TagMemory(bottom, sp - bottom, 0);
+}
+
+extern "C" void *__hwasan_extra_spill_area() {
+ Thread *t = GetCurrentThread();
+ return &t->vfork_spill();
+}
+
+void __hwasan_print_memory_usage() {
+ InternalScopedString s(kMemoryUsageBufferSize);
+ HwasanFormatMemoryUsage(s);
+ Printf("%s\n", s.data());
+}
+
+static const u8 kFallbackTag = 0xBB;
+
+u8 __hwasan_generate_tag() {
+ Thread *t = GetCurrentThread();
+ if (!t) return kFallbackTag;
+ return t->GenerateRandomTag();
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __hwasan_default_options() { return ""; }
+} // extern "C"
+#endif
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME());
+ stack.Print();
+}
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.h (revision 351984)
@@ -0,0 +1,175 @@
+//===-- hwasan.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Private Hwasan header.
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_H
+#define HWASAN_H
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_flags.h"
+#include "ubsan/ubsan_platform.h"
+
+#ifndef HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
+# define HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE 1
+#endif
+
+#ifndef HWASAN_CONTAINS_UBSAN
+# define HWASAN_CONTAINS_UBSAN CAN_SANITIZE_UB
+#endif
+
+#ifndef HWASAN_WITH_INTERCEPTORS
+#define HWASAN_WITH_INTERCEPTORS 0
+#endif
+
+typedef u8 tag_t;
+
+// TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in address
+// translation and can be used to store a tag.
+const unsigned kAddressTagShift = 56;
+const uptr kAddressTagMask = 0xFFUL << kAddressTagShift;
+
+// Minimal alignment of the shadow base address. Determines the space available
+// for threads and stack histories. This is an ABI constant.
+const unsigned kShadowBaseAlignment = 32;
+
+const unsigned kRecordAddrBaseTagShift = 3;
+const unsigned kRecordFPShift = 48;
+const unsigned kRecordFPLShift = 4;
+const unsigned kRecordFPModulus = 1 << (64 - kRecordFPShift + kRecordFPLShift);
+
+static inline tag_t GetTagFromPointer(uptr p) {
+ return p >> kAddressTagShift;
+}
+
+static inline uptr UntagAddr(uptr tagged_addr) {
+ return tagged_addr & ~kAddressTagMask;
+}
+
+static inline void *UntagPtr(const void *tagged_ptr) {
+ return reinterpret_cast<void *>(
+ UntagAddr(reinterpret_cast<uptr>(tagged_ptr)));
+}
+
+static inline uptr AddTagToPointer(uptr p, tag_t tag) {
+ return (p & ~kAddressTagMask) | ((uptr)tag << kAddressTagShift);
+}
+
+namespace __hwasan {
+
+extern int hwasan_inited;
+extern bool hwasan_init_is_running;
+extern int hwasan_report_count;
+
+bool ProtectRange(uptr beg, uptr end);
+bool InitShadow();
+void InitThreads();
+void InitInstrumentation();
+void MadviseShadow();
+char *GetProcSelfMaps();
+void InitializeInterceptors();
+
+void HwasanAllocatorInit();
+void HwasanAllocatorThreadFinish();
+
+void *hwasan_malloc(uptr size, StackTrace *stack);
+void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack);
+void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack);
+void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack);
+void *hwasan_valloc(uptr size, StackTrace *stack);
+void *hwasan_pvalloc(uptr size, StackTrace *stack);
+void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack);
+void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack);
+int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ StackTrace *stack);
+void hwasan_free(void *ptr, StackTrace *stack);
+
+void InstallTrapHandler();
+void InstallAtExitHandler();
+
+void EnterSymbolizer();
+void ExitSymbolizer();
+bool IsInSymbolizer();
+
+struct SymbolizerScope {
+ SymbolizerScope() { EnterSymbolizer(); }
+ ~SymbolizerScope() { ExitSymbolizer(); }
+};
+
+// Returns a "chained" origin id, pointing to the given stack trace followed by
+// the previous origin id.
+u32 ChainOrigin(u32 id, StackTrace *stack);
+
+const int STACK_TRACE_TAG_POISON = StackTrace::TAG_CUSTOM + 1;
+
+#define GET_MALLOC_STACK_TRACE \
+ BufferedStackTrace stack; \
+ if (hwasan_inited) \
+ stack.Unwind(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME(), \
+ nullptr, common_flags()->fast_unwind_on_malloc, \
+ common_flags()->malloc_context_size)
+
+#define GET_FATAL_STACK_TRACE_PC_BP(pc, bp) \
+ BufferedStackTrace stack; \
+ if (hwasan_inited) \
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal)
+
+#define GET_FATAL_STACK_TRACE_HERE \
+ GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME())
+
+#define PRINT_CURRENT_STACK_CHECK() \
+ { \
+ GET_FATAL_STACK_TRACE_HERE; \
+ stack.Print(); \
+ }
+
+class ScopedThreadLocalStateBackup {
+ public:
+ ScopedThreadLocalStateBackup() { Backup(); }
+ ~ScopedThreadLocalStateBackup() { Restore(); }
+ void Backup();
+ void Restore();
+ private:
+ u64 va_arg_overflow_size_tls;
+};
+
+void HwasanTSDInit();
+void HwasanTSDThreadInit();
+
+void HwasanOnDeadlySignal(int signo, void *info, void *context);
+
+void UpdateMemoryUsage();
+
+void AppendToErrorMessageBuffer(const char *buffer);
+
+void AndroidTestTlsSlot();
+
+} // namespace __hwasan
+
+#define HWASAN_MALLOC_HOOK(ptr, size) \
+ do { \
+ if (&__sanitizer_malloc_hook) { \
+ __sanitizer_malloc_hook(ptr, size); \
+ } \
+ RunMallocHooks(ptr, size); \
+ } while (false)
+#define HWASAN_FREE_HOOK(ptr) \
+ do { \
+ if (&__sanitizer_free_hook) { \
+ __sanitizer_free_hook(ptr); \
+ } \
+ RunFreeHooks(ptr); \
+ } while (false)
+
+#endif // HWASAN_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_allocator.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_allocator.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_allocator.cpp (revision 351984)
@@ -0,0 +1,435 @@
+//===-- hwasan_allocator.cpp ------------------------ ---------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// HWAddressSanitizer allocator.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_checks.h"
+#include "hwasan_mapping.h"
+#include "hwasan_malloc_bisect.h"
+#include "hwasan_thread.h"
+#include "hwasan_report.h"
+
+#if HWASAN_WITH_INTERCEPTORS
+DEFINE_REAL(void *, realloc, void *ptr, uptr size)
+DEFINE_REAL(void, free, void *ptr)
+#endif
+
+namespace __hwasan {
+
+static Allocator allocator;
+static AllocatorCache fallback_allocator_cache;
+static SpinMutex fallback_mutex;
+static atomic_uint8_t hwasan_allocator_tagging_enabled;
+
+static const tag_t kFallbackAllocTag = 0xBB;
+static const tag_t kFallbackFreeTag = 0xBC;
+
+enum RightAlignMode {
+ kRightAlignNever,
+ kRightAlignSometimes,
+ kRightAlignAlways
+};
+
+// Initialized in HwasanAllocatorInit, an never changed.
+static ALIGNED(16) u8 tail_magic[kShadowAlignment - 1];
+
+bool HwasanChunkView::IsAllocated() const {
+ return metadata_ && metadata_->alloc_context_id && metadata_->requested_size;
+}
+
+// Aligns the 'addr' right to the granule boundary.
+static uptr AlignRight(uptr addr, uptr requested_size) {
+ uptr tail_size = requested_size % kShadowAlignment;
+ if (!tail_size) return addr;
+ return addr + kShadowAlignment - tail_size;
+}
+
+uptr HwasanChunkView::Beg() const {
+ if (metadata_ && metadata_->right_aligned)
+ return AlignRight(block_, metadata_->requested_size);
+ return block_;
+}
+uptr HwasanChunkView::End() const {
+ return Beg() + UsedSize();
+}
+uptr HwasanChunkView::UsedSize() const {
+ return metadata_->requested_size;
+}
+u32 HwasanChunkView::GetAllocStackId() const {
+ return metadata_->alloc_context_id;
+}
+
+uptr HwasanChunkView::ActualSize() const {
+ return allocator.GetActuallyAllocatedSize(reinterpret_cast<void *>(block_));
+}
+
+bool HwasanChunkView::FromSmallHeap() const {
+ return allocator.FromPrimary(reinterpret_cast<void *>(block_));
+}
+
+void GetAllocatorStats(AllocatorStatCounters s) {
+ allocator.GetStats(s);
+}
+
+void HwasanAllocatorInit() {
+ atomic_store_relaxed(&hwasan_allocator_tagging_enabled,
+ !flags()->disable_allocator_tagging);
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.Init(common_flags()->allocator_release_to_os_interval_ms);
+ for (uptr i = 0; i < sizeof(tail_magic); i++)
+ tail_magic[i] = GetCurrentThread()->GenerateRandomTag();
+}
+
+void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) {
+ allocator.SwallowCache(cache);
+}
+
+static uptr TaggedSize(uptr size) {
+ if (!size) size = 1;
+ uptr new_size = RoundUpTo(size, kShadowAlignment);
+ CHECK_GE(new_size, size);
+ return new_size;
+}
+
+static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
+ bool zeroise) {
+ if (orig_size > kMaxAllowedMallocSize) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
+ orig_size);
+ return nullptr;
+ }
+ ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
+ }
+
+ alignment = Max(alignment, kShadowAlignment);
+ uptr size = TaggedSize(orig_size);
+ Thread *t = GetCurrentThread();
+ void *allocated;
+ if (t) {
+ allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocated = allocator.Allocate(cache, size, alignment);
+ }
+ if (UNLIKELY(!allocated)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, stack);
+ }
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
+ meta->requested_size = static_cast<u32>(orig_size);
+ meta->alloc_context_id = StackDepotPut(*stack);
+ meta->right_aligned = false;
+ if (zeroise) {
+ internal_memset(allocated, 0, size);
+ } else if (flags()->max_malloc_fill_size > 0) {
+ uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
+ internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
+ }
+ if (size != orig_size) {
+ internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
+ size - orig_size - 1);
+ }
+
+ void *user_ptr = allocated;
+ // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
+ // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
+ // retag to 0.
+ if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
+ if (flags()->tag_in_malloc && malloc_bisect(stack, orig_size)) {
+ tag_t tag = t ? t->GenerateRandomTag() : kFallbackAllocTag;
+ uptr tag_size = orig_size ? orig_size : 1;
+ uptr full_granule_size = RoundDownTo(tag_size, kShadowAlignment);
+ user_ptr =
+ (void *)TagMemoryAligned((uptr)user_ptr, full_granule_size, tag);
+ if (full_granule_size != tag_size) {
+ u8 *short_granule =
+ reinterpret_cast<u8 *>(allocated) + full_granule_size;
+ TagMemoryAligned((uptr)short_granule, kShadowAlignment,
+ tag_size % kShadowAlignment);
+ short_granule[kShadowAlignment - 1] = tag;
+ }
+ } else {
+ user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, 0);
+ }
+ }
+
+ HWASAN_MALLOC_HOOK(user_ptr, size);
+ return user_ptr;
+}
+
+static bool PointerAndMemoryTagsMatch(void *tagged_ptr) {
+ CHECK(tagged_ptr);
+ uptr tagged_uptr = reinterpret_cast<uptr>(tagged_ptr);
+ tag_t mem_tag = *reinterpret_cast<tag_t *>(
+ MemToShadow(reinterpret_cast<uptr>(UntagPtr(tagged_ptr))));
+ return PossiblyShortTagMatches(mem_tag, tagged_uptr, 1);
+}
+
+static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) {
+ CHECK(tagged_ptr);
+ HWASAN_FREE_HOOK(tagged_ptr);
+
+ if (!PointerAndMemoryTagsMatch(tagged_ptr))
+ ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr));
+
+ void *untagged_ptr = UntagPtr(tagged_ptr);
+ void *aligned_ptr = reinterpret_cast<void *>(
+ RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment));
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr));
+ uptr orig_size = meta->requested_size;
+ u32 free_context_id = StackDepotPut(*stack);
+ u32 alloc_context_id = meta->alloc_context_id;
+
+ // Check tail magic.
+ uptr tagged_size = TaggedSize(orig_size);
+ if (flags()->free_checks_tail_magic && orig_size &&
+ tagged_size != orig_size) {
+ uptr tail_size = tagged_size - orig_size - 1;
+ CHECK_LT(tail_size, kShadowAlignment);
+ void *tail_beg = reinterpret_cast<void *>(
+ reinterpret_cast<uptr>(aligned_ptr) + orig_size);
+ if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size))
+ ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr),
+ orig_size, tail_magic);
+ }
+
+ meta->requested_size = 0;
+ meta->alloc_context_id = 0;
+ // This memory will not be reused by anyone else, so we are free to keep it
+ // poisoned.
+ Thread *t = GetCurrentThread();
+ if (flags()->max_free_fill_size > 0) {
+ uptr fill_size =
+ Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size);
+ internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size);
+ }
+ if (flags()->tag_in_free && malloc_bisect(stack, 0) &&
+ atomic_load_relaxed(&hwasan_allocator_tagging_enabled))
+ TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size),
+ t ? t->GenerateRandomTag() : kFallbackFreeTag);
+ if (t) {
+ allocator.Deallocate(t->allocator_cache(), aligned_ptr);
+ if (auto *ha = t->heap_allocations())
+ ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id,
+ free_context_id, static_cast<u32>(orig_size)});
+ } else {
+ SpinMutexLock l(&fallback_mutex);
+ AllocatorCache *cache = &fallback_allocator_cache;
+ allocator.Deallocate(cache, aligned_ptr);
+ }
+}
+
+static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
+ uptr new_size, uptr alignment) {
+ if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
+ ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));
+
+ void *tagged_ptr_new =
+ HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
+ if (tagged_ptr_old && tagged_ptr_new) {
+ void *untagged_ptr_old = UntagPtr(tagged_ptr_old);
+ Metadata *meta =
+ reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
+ internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
+ Min(new_size, static_cast<uptr>(meta->requested_size)));
+ HwasanDeallocate(stack, tagged_ptr_old);
+ }
+ return tagged_ptr_new;
+}
+
+static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, stack);
+ }
+ return HwasanAllocate(stack, nmemb * size, sizeof(u64), true);
+}
+
+HwasanChunkView FindHeapChunkByAddress(uptr address) {
+ void *block = allocator.GetBlockBegin(reinterpret_cast<void*>(address));
+ if (!block)
+ return HwasanChunkView();
+ Metadata *metadata =
+ reinterpret_cast<Metadata*>(allocator.GetMetaData(block));
+ return HwasanChunkView(reinterpret_cast<uptr>(block), metadata);
+}
+
+static uptr AllocationSize(const void *tagged_ptr) {
+ const void *untagged_ptr = UntagPtr(tagged_ptr);
+ if (!untagged_ptr) return 0;
+ const void *beg = allocator.GetBlockBegin(untagged_ptr);
+ Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr);
+ if (b->right_aligned) {
+ if (beg != reinterpret_cast<void *>(RoundDownTo(
+ reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)))
+ return 0;
+ } else {
+ if (beg != untagged_ptr) return 0;
+ }
+ return b->requested_size;
+}
+
+void *hwasan_malloc(uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
+}
+
+void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size));
+}
+
+void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) {
+ if (!ptr)
+ return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false));
+
+#if HWASAN_WITH_INTERCEPTORS
+ // A tag of 0 means that this is a system allocator allocation, so we must use
+ // the system allocator to realloc it.
+ if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0)
+ return REAL(realloc)(ptr, size);
+#endif
+
+ if (size == 0) {
+ HwasanDeallocate(stack, ptr);
+ return nullptr;
+ }
+ return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64)));
+}
+
+void *hwasan_reallocarray(void *ptr, uptr nmemb, uptr size, StackTrace *stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportReallocArrayOverflow(nmemb, size, stack);
+ }
+ return hwasan_realloc(ptr, nmemb * size, stack);
+}
+
+void *hwasan_valloc(uptr size, StackTrace *stack) {
+ return SetErrnoOnNull(
+ HwasanAllocate(stack, size, GetPageSizeCached(), false));
+}
+
+void *hwasan_pvalloc(uptr size, StackTrace *stack) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false));
+}
+
+void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, stack);
+ }
+ return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
+}
+
+void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, stack);
+ }
+ return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false));
+}
+
+int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ StackTrace *stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, stack);
+ }
+ void *ptr = HwasanAllocate(stack, size, alignment, false);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by HwasanAllocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+void hwasan_free(void *ptr, StackTrace *stack) {
+#if HWASAN_WITH_INTERCEPTORS
+ // A tag of 0 means that this is a system allocator allocation, so we must use
+ // the system allocator to free it.
+ if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0)
+ return REAL(free)(ptr);
+#endif
+
+ return HwasanDeallocate(stack, ptr);
+}
+
+} // namespace __hwasan
+
+using namespace __hwasan;
+
+void __hwasan_enable_allocator_tagging() {
+ atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1);
+}
+
+void __hwasan_disable_allocator_tagging() {
+#if HWASAN_WITH_INTERCEPTORS
+ // Allocator tagging must be enabled for the system allocator fallback to work
+ // correctly. This means that we can't disable it at runtime if it was enabled
+ // at startup since that might result in our deallocations going to the system
+ // allocator. If tagging was disabled at startup we avoid this problem by
+ // disabling the fallback altogether.
+ CHECK(flags()->disable_allocator_tagging);
+#endif
+
+ atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0);
+}
+
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() { return 1; }
+
+uptr __sanitizer_get_unmapped_bytes() { return 1; }
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; }
+
+uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); }
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_allocator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_allocator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_allocator.h (revision 351984)
@@ -0,0 +1,105 @@
+//===-- hwasan_allocator.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_ALLOCATOR_H
+#define HWASAN_ALLOCATOR_H
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_ring_buffer.h"
+#include "hwasan_poisoning.h"
+
+#if !defined(__aarch64__) && !defined(__x86_64__)
+#error Unsupported platform
+#endif
+
+#if HWASAN_WITH_INTERCEPTORS
+DECLARE_REAL(void *, realloc, void *ptr, uptr size)
+DECLARE_REAL(void, free, void *ptr)
+#endif
+
+namespace __hwasan {
+
+struct Metadata {
+ u32 requested_size : 31; // sizes are < 2G.
+ u32 right_aligned : 1;
+ u32 alloc_context_id;
+};
+
+struct HwasanMapUnmapCallback {
+ void OnMap(uptr p, uptr size) const { UpdateMemoryUsage(); }
+ void OnUnmap(uptr p, uptr size) const {
+ // We are about to unmap a chunk of user memory.
+ // It can return as user-requested mmap() or another thread stack.
+ // Make it accessible with zero-tagged pointer.
+ TagMemory(p, size, 0);
+ }
+};
+
+static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G
+
+struct AP64 {
+ static const uptr kSpaceBeg = ~0ULL;
+ static const uptr kSpaceSize = 0x2000000000ULL;
+ static const uptr kMetadataSize = sizeof(Metadata);
+ typedef __sanitizer::VeryDenseSizeClassMap SizeClassMap;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef HwasanMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+typedef SizeClassAllocator64<AP64> PrimaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator> Allocator;
+typedef Allocator::AllocatorCache AllocatorCache;
+
+void AllocatorSwallowThreadLocalCache(AllocatorCache *cache);
+
+class HwasanChunkView {
+ public:
+ HwasanChunkView() : block_(0), metadata_(nullptr) {}
+ HwasanChunkView(uptr block, Metadata *metadata)
+ : block_(block), metadata_(metadata) {}
+ bool IsAllocated() const; // Checks if the memory is currently allocated
+ uptr Beg() const; // First byte of user memory
+ uptr End() const; // Last byte of user memory
+ uptr UsedSize() const; // Size requested by the user
+ uptr ActualSize() const; // Size allocated by the allocator.
+ u32 GetAllocStackId() const;
+ bool FromSmallHeap() const;
+ private:
+ uptr block_;
+ Metadata *const metadata_;
+};
+
+HwasanChunkView FindHeapChunkByAddress(uptr address);
+
+// Information about one (de)allocation that happened in the past.
+// These are recorded in a thread-local ring buffer.
+// TODO: this is currently 24 bytes (20 bytes + alignment).
+// Compress it to 16 bytes or extend it to be more useful.
+struct HeapAllocationRecord {
+ uptr tagged_addr;
+ u32 alloc_context_id;
+ u32 free_context_id;
+ u32 requested_size;
+};
+
+typedef RingBuffer<HeapAllocationRecord> HeapAllocationsRingBuffer;
+
+void GetAllocatorStats(AllocatorStatCounters s);
+
+} // namespace __hwasan
+
+#endif // HWASAN_ALLOCATOR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_allocator.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_checks.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_checks.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_checks.h (revision 351984)
@@ -0,0 +1,124 @@
+//===-- hwasan_checks.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_CHECKS_H
+#define HWASAN_CHECKS_H
+
+#include "hwasan_mapping.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __hwasan {
+template <unsigned X>
+__attribute__((always_inline)) static void SigTrap(uptr p) {
+#if defined(__aarch64__)
+ (void)p;
+ // 0x900 is added to do not interfere with the kernel use of lower values of
+ // brk immediate.
+ register uptr x0 asm("x0") = p;
+ asm("brk %1\n\t" ::"r"(x0), "n"(0x900 + X));
+#elif defined(__x86_64__)
+ // INT3 + NOP DWORD ptr [EAX + X] to pass X to our signal handler, 5 bytes
+ // total. The pointer is passed via rdi.
+ // 0x40 is added as a safeguard, to help distinguish our trap from others and
+ // to avoid 0 offsets in the command (otherwise it'll be reduced to a
+ // different nop command, the three bytes one).
+ asm volatile(
+ "int3\n"
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + X),
+ "D"(p));
+#else
+ // FIXME: not always sigill.
+ __builtin_trap();
+#endif
+ // __builtin_unreachable();
+}
+
+// Version with access size which is not power of 2
+template <unsigned X>
+__attribute__((always_inline)) static void SigTrap(uptr p, uptr size) {
+#if defined(__aarch64__)
+ register uptr x0 asm("x0") = p;
+ register uptr x1 asm("x1") = size;
+ asm("brk %2\n\t" ::"r"(x0), "r"(x1), "n"(0x900 + X));
+#elif defined(__x86_64__)
+ // Size is stored in rsi.
+ asm volatile(
+ "int3\n"
+ "nopl %c0(%%rax)\n" ::"n"(0x40 + X),
+ "D"(p), "S"(size));
+#else
+ __builtin_trap();
+#endif
+ // __builtin_unreachable();
+}
+
+__attribute__((always_inline, nodebug)) static bool PossiblyShortTagMatches(
+ tag_t mem_tag, uptr ptr, uptr sz) {
+ tag_t ptr_tag = GetTagFromPointer(ptr);
+ if (ptr_tag == mem_tag)
+ return true;
+ if (mem_tag >= kShadowAlignment)
+ return false;
+ if ((ptr & (kShadowAlignment - 1)) + sz > mem_tag)
+ return false;
+#ifndef __aarch64__
+ ptr = UntagAddr(ptr);
+#endif
+ return *(u8 *)(ptr | (kShadowAlignment - 1)) == ptr_tag;
+}
+
+enum class ErrorAction { Abort, Recover };
+enum class AccessType { Load, Store };
+
+template <ErrorAction EA, AccessType AT, unsigned LogSize>
+__attribute__((always_inline, nodebug)) static void CheckAddress(uptr p) {
+ uptr ptr_raw = p & ~kAddressTagMask;
+ tag_t mem_tag = *(tag_t *)MemToShadow(ptr_raw);
+ if (UNLIKELY(!PossiblyShortTagMatches(mem_tag, p, 1 << LogSize))) {
+ SigTrap<0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + LogSize>(p);
+ if (EA == ErrorAction::Abort)
+ __builtin_unreachable();
+ }
+}
+
+template <ErrorAction EA, AccessType AT>
+__attribute__((always_inline, nodebug)) static void CheckAddressSized(uptr p,
+ uptr sz) {
+ if (sz == 0)
+ return;
+ tag_t ptr_tag = GetTagFromPointer(p);
+ uptr ptr_raw = p & ~kAddressTagMask;
+ tag_t *shadow_first = (tag_t *)MemToShadow(ptr_raw);
+ tag_t *shadow_last = (tag_t *)MemToShadow(ptr_raw + sz);
+ for (tag_t *t = shadow_first; t < shadow_last; ++t)
+ if (UNLIKELY(ptr_tag != *t)) {
+ SigTrap<0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
+ if (EA == ErrorAction::Abort)
+ __builtin_unreachable();
+ }
+ uptr end = p + sz;
+ uptr tail_sz = end & 0xf;
+ if (UNLIKELY(tail_sz != 0 &&
+ !PossiblyShortTagMatches(
+ *shadow_last, end & ~(kShadowAlignment - 1), tail_sz))) {
+ SigTrap<0x20 * (EA == ErrorAction::Recover) +
+ 0x10 * (AT == AccessType::Store) + 0xf>(p, sz);
+ if (EA == ErrorAction::Abort)
+ __builtin_unreachable();
+ }
+}
+
+} // end namespace __hwasan
+
+#endif // HWASAN_CHECKS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_dynamic_shadow.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_dynamic_shadow.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_dynamic_shadow.cpp (revision 351984)
@@ -0,0 +1,164 @@
+//===-- hwasan_dynamic_shadow.cpp -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory
+/// region and handles ifunc resolver case, when necessary.
+///
+//===----------------------------------------------------------------------===//
+
+#include "hwasan.h"
+#include "hwasan_dynamic_shadow.h"
+#include "hwasan_mapping.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_posix.h"
+
+#include <elf.h>
+#include <link.h>
+
+// The code in this file needs to run in an unrelocated binary. It should not
+// access any external symbol, including its own non-hidden globals.
+
+namespace __hwasan {
+
+static void UnmapFromTo(uptr from, uptr to) {
+ if (to == from)
+ return;
+ CHECK(to >= from);
+ uptr res = internal_munmap(reinterpret_cast<void *>(from), to - from);
+ if (UNLIKELY(internal_iserror(res))) {
+ Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n",
+ SanitizerToolName, to - from, to - from, from);
+ CHECK("unable to unmap" && 0);
+ }
+}
+
+// Returns an address aligned to kShadowBaseAlignment, such that
+// 2**kShadowBaseAlingment on the left and shadow_size_bytes bytes on the right
+// of it are mapped no access.
+static uptr MapDynamicShadow(uptr shadow_size_bytes) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr min_alignment = granularity << kShadowScale;
+ const uptr alignment = 1ULL << kShadowBaseAlignment;
+ CHECK_GE(alignment, min_alignment);
+
+ const uptr left_padding = 1ULL << kShadowBaseAlignment;
+ const uptr shadow_size =
+ RoundUpTo(shadow_size_bytes, granularity);
+ const uptr map_size = shadow_size + left_padding + alignment;
+
+ const uptr map_start = (uptr)MmapNoAccess(map_size);
+ CHECK_NE(map_start, ~(uptr)0);
+
+ const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment);
+
+ UnmapFromTo(map_start, shadow_start - left_padding);
+ UnmapFromTo(shadow_start + shadow_size, map_start + map_size);
+
+ return shadow_start;
+}
+
+} // namespace __hwasan
+
+#if SANITIZER_ANDROID
+extern "C" {
+
+INTERFACE_ATTRIBUTE void __hwasan_shadow();
+decltype(__hwasan_shadow)* __hwasan_premap_shadow();
+
+} // extern "C"
+
+namespace __hwasan {
+
+// Conservative upper limit.
+static uptr PremapShadowSize() {
+ return RoundUpTo(GetMaxVirtualAddress() >> kShadowScale,
+ GetMmapGranularity());
+}
+
+static uptr PremapShadow() {
+ return MapDynamicShadow(PremapShadowSize());
+}
+
+static bool IsPremapShadowAvailable() {
+ const uptr shadow = reinterpret_cast<uptr>(&__hwasan_shadow);
+ const uptr resolver = reinterpret_cast<uptr>(&__hwasan_premap_shadow);
+ // shadow == resolver is how Android KitKat and older handles ifunc.
+ // shadow == 0 just in case.
+ return shadow != 0 && shadow != resolver;
+}
+
+static uptr FindPremappedShadowStart(uptr shadow_size_bytes) {
+ const uptr granularity = GetMmapGranularity();
+ const uptr shadow_start = reinterpret_cast<uptr>(&__hwasan_shadow);
+ const uptr premap_shadow_size = PremapShadowSize();
+ const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity);
+
+ // We may have mapped too much. Release extra memory.
+ UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size);
+ return shadow_start;
+}
+
+} // namespace __hwasan
+
+extern "C" {
+
+decltype(__hwasan_shadow)* __hwasan_premap_shadow() {
+ // The resolver might be called multiple times. Map the shadow just once.
+ static __sanitizer::uptr shadow = 0;
+ if (!shadow)
+ shadow = __hwasan::PremapShadow();
+ return reinterpret_cast<decltype(__hwasan_shadow)*>(shadow);
+}
+
+// __hwasan_shadow is a "function" that has the same address as the first byte
+// of the shadow mapping.
+INTERFACE_ATTRIBUTE __attribute__((ifunc("__hwasan_premap_shadow")))
+void __hwasan_shadow();
+
+extern __attribute((weak, visibility("hidden"))) ElfW(Rela) __rela_iplt_start[],
+ __rela_iplt_end[];
+
+} // extern "C"
+
+namespace __hwasan {
+
+void InitShadowGOT() {
+ // Call the ifunc resolver for __hwasan_shadow and fill in its GOT entry. This
+ // needs to be done before other ifunc resolvers (which are handled by libc)
+ // because a resolver might read __hwasan_shadow.
+ typedef ElfW(Addr) (*ifunc_resolver_t)(void);
+ for (ElfW(Rela) *r = __rela_iplt_start; r != __rela_iplt_end; ++r) {
+ ElfW(Addr)* offset = reinterpret_cast<ElfW(Addr)*>(r->r_offset);
+ ElfW(Addr) resolver = r->r_addend;
+ if (resolver == reinterpret_cast<ElfW(Addr)>(&__hwasan_premap_shadow)) {
+ *offset = reinterpret_cast<ifunc_resolver_t>(resolver)();
+ break;
+ }
+ }
+}
+
+uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
+ if (IsPremapShadowAvailable())
+ return FindPremappedShadowStart(shadow_size_bytes);
+ return MapDynamicShadow(shadow_size_bytes);
+}
+
+} // namespace __hwasan
+#else
+namespace __hwasan {
+
+void InitShadowGOT() {}
+
+uptr FindDynamicShadowStart(uptr shadow_size_bytes) {
+ return MapDynamicShadow(shadow_size_bytes);
+}
+
+} // namespace __hwasan
+
+#endif // SANITIZER_ANDROID
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_dynamic_shadow.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_dynamic_shadow.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_dynamic_shadow.h (revision 351984)
@@ -0,0 +1,27 @@
+//===-- hwasan_dynamic_shadow.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory
+/// region.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_PREMAP_SHADOW_H
+#define HWASAN_PREMAP_SHADOW_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __hwasan {
+
+uptr FindDynamicShadowStart(uptr shadow_size_bytes);
+void InitShadowGOT();
+
+} // namespace __hwasan
+
+#endif // HWASAN_PREMAP_SHADOW_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_dynamic_shadow.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_flags.h (revision 351984)
@@ -0,0 +1,29 @@
+//===-- hwasan_flags.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef HWASAN_FLAGS_H
+#define HWASAN_FLAGS_H
+
+namespace __hwasan {
+
+struct Flags {
+#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "hwasan_flags.inc"
+#undef HWASAN_FLAG
+
+ void SetDefaults();
+};
+
+Flags *flags();
+
+} // namespace __hwasan
+
+#endif // HWASAN_FLAGS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_flags.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_flags.inc (revision 351984)
@@ -0,0 +1,74 @@
+//===-- hwasan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Hwasan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef HWASAN_FLAG
+# error "Define HWASAN_FLAG prior to including this file!"
+#endif
+
+// HWASAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+HWASAN_FLAG(bool, verbose_threads, false,
+ "inform on thread creation/destruction")
+HWASAN_FLAG(bool, tag_in_malloc, true, "")
+HWASAN_FLAG(bool, tag_in_free, true, "")
+HWASAN_FLAG(bool, print_stats, false, "")
+HWASAN_FLAG(bool, halt_on_error, true, "")
+HWASAN_FLAG(bool, atexit, false, "")
+
+// Test only flag to disable malloc/realloc/free memory tagging on startup.
+// Tagging can be reenabled with __hwasan_enable_allocator_tagging().
+HWASAN_FLAG(bool, disable_allocator_tagging, false, "")
+
+// If false, use simple increment of a thread local counter to generate new
+// tags.
+HWASAN_FLAG(bool, random_tags, true, "")
+
+HWASAN_FLAG(
+ int, max_malloc_fill_size, 0x1000, // By default, fill only the first 4K.
+ "HWASan allocator flag. max_malloc_fill_size is the maximal amount of "
+ "bytes that will be filled with malloc_fill_byte on malloc.")
+
+HWASAN_FLAG(bool, free_checks_tail_magic, 1,
+ "If set, free() will check the magic values "
+ "to the right of the allocated object "
+ "if the allocation size is not a divident of the granule size")
+HWASAN_FLAG(
+ int, max_free_fill_size, 0,
+ "HWASan allocator flag. max_free_fill_size is the maximal amount of "
+ "bytes that will be filled with free_fill_byte during free.")
+HWASAN_FLAG(int, malloc_fill_byte, 0xbe,
+ "Value used to fill the newly allocated memory.")
+HWASAN_FLAG(int, free_fill_byte, 0x55,
+ "Value used to fill deallocated memory.")
+HWASAN_FLAG(int, heap_history_size, 1023,
+ "The number of heap (de)allocations remembered per thread. "
+ "Affects the quality of heap-related reports, but not the ability "
+ "to find bugs.")
+HWASAN_FLAG(bool, export_memory_stats, true,
+ "Export up-to-date memory stats through /proc")
+HWASAN_FLAG(int, stack_history_size, 1024,
+ "The number of stack frames remembered per thread. "
+ "Affects the quality of stack-related reports, but not the ability "
+ "to find bugs.")
+
+// Malloc / free bisection. Only tag malloc and free calls when a hash of
+// allocation size and stack trace is between malloc_bisect_left and
+// malloc_bisect_right (both inclusive). [0, 0] range is special and disables
+// bisection (i.e. everything is tagged). Once the range is narrowed down
+// enough, use malloc_bisect_dump to see interesting allocations.
+HWASAN_FLAG(uptr, malloc_bisect_left, 0,
+ "Left bound of malloc bisection, inclusive.")
+HWASAN_FLAG(uptr, malloc_bisect_right, 0,
+ "Right bound of malloc bisection, inclusive.")
+HWASAN_FLAG(bool, malloc_bisect_dump, false,
+ "Print all allocations within [malloc_bisect_left, "
+ "malloc_bisect_right] range ")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interceptors.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interceptors.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interceptors.cpp (revision 351984)
@@ -0,0 +1,269 @@
+//===-- hwasan_interceptors.cpp -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Interceptors for standard library functions.
+//
+// FIXME: move as many interceptors as possible into
+// sanitizer_common/sanitizer_common_interceptors.h
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_mapping.h"
+#include "hwasan_thread.h"
+#include "hwasan_poisoning.h"
+#include "hwasan_report.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+#include <stdarg.h>
+// ACHTUNG! No other system header includes in this file.
+// Ideally, we should get rid of stdarg.h as well.
+
+using namespace __hwasan;
+
+using __sanitizer::memory_order;
+using __sanitizer::atomic_load;
+using __sanitizer::atomic_store;
+using __sanitizer::atomic_uintptr_t;
+
+static uptr allocated_for_dlsym;
+static const uptr kDlsymAllocPoolSize = 1024;
+static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize];
+
+static bool IsInDlsymAllocPool(const void *ptr) {
+ uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ return off < sizeof(alloc_memory_for_dlsym);
+}
+
+static void *AllocateFromLocalPool(uptr size_in_bytes) {
+ uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize;
+ void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym];
+ allocated_for_dlsym += size_in_words;
+ CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize);
+ return mem;
+}
+
+#define ENSURE_HWASAN_INITED() do { \
+ CHECK(!hwasan_init_is_running); \
+ if (!hwasan_inited) { \
+ __hwasan_init(); \
+ } \
+} while (0)
+
+
+int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ CHECK_NE(memptr, 0);
+ int res = hwasan_posix_memalign(memptr, alignment, size, &stack);
+ return res;
+}
+
+void * __sanitizer_memalign(uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_memalign(alignment, size, &stack);
+}
+
+void * __sanitizer_aligned_alloc(uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_aligned_alloc(alignment, size, &stack);
+}
+
+void * __sanitizer___libc_memalign(uptr alignment, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ void *ptr = hwasan_memalign(alignment, size, &stack);
+ if (ptr)
+ DTLS_on_libc_memalign(ptr, size);
+ return ptr;
+}
+
+void * __sanitizer_valloc(uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_valloc(size, &stack);
+}
+
+void * __sanitizer_pvalloc(uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_pvalloc(size, &stack);
+}
+
+void __sanitizer_free(void *ptr) {
+ GET_MALLOC_STACK_TRACE;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
+ hwasan_free(ptr, &stack);
+}
+
+void __sanitizer_cfree(void *ptr) {
+ GET_MALLOC_STACK_TRACE;
+ if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return;
+ hwasan_free(ptr, &stack);
+}
+
+uptr __sanitizer_malloc_usable_size(const void *ptr) {
+ return __sanitizer_get_allocated_size(ptr);
+}
+
+struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() {
+ __sanitizer_struct_mallinfo sret;
+ internal_memset(&sret, 0, sizeof(sret));
+ return sret;
+}
+
+int __sanitizer_mallopt(int cmd, int value) {
+ return 0;
+}
+
+void __sanitizer_malloc_stats(void) {
+ // FIXME: implement, but don't call REAL(malloc_stats)!
+}
+
+void * __sanitizer_calloc(uptr nmemb, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(!hwasan_inited))
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(nmemb * size);
+ return hwasan_calloc(nmemb, size, &stack);
+}
+
+void * __sanitizer_realloc(void *ptr, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(IsInDlsymAllocPool(ptr))) {
+ uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym;
+ uptr copy_size = Min(size, kDlsymAllocPoolSize - offset);
+ void *new_ptr;
+ if (UNLIKELY(!hwasan_inited)) {
+ new_ptr = AllocateFromLocalPool(copy_size);
+ } else {
+ copy_size = size;
+ new_ptr = hwasan_malloc(copy_size, &stack);
+ }
+ internal_memcpy(new_ptr, ptr, copy_size);
+ return new_ptr;
+ }
+ return hwasan_realloc(ptr, size, &stack);
+}
+
+void * __sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ return hwasan_reallocarray(ptr, nmemb, size, &stack);
+}
+
+void * __sanitizer_malloc(uptr size) {
+ GET_MALLOC_STACK_TRACE;
+ if (UNLIKELY(!hwasan_init_is_running))
+ ENSURE_HWASAN_INITED();
+ if (UNLIKELY(!hwasan_inited))
+ // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym.
+ return AllocateFromLocalPool(size);
+ return hwasan_malloc(size, &stack);
+}
+
+#if HWASAN_WITH_INTERCEPTORS
+#define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \
+ ALIAS("__sanitizer_" #FN); \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \
+ ARGS) ALIAS("__sanitizer_" #FN)
+
+INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment,
+ SIZE_T size);
+INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size);
+INTERCEPTOR_ALIAS(void, free, void *ptr);
+INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr);
+INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size);
+
+#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD
+INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size);
+INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size);
+INTERCEPTOR_ALIAS(void, cfree, void *ptr);
+INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo);
+INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value);
+INTERCEPTOR_ALIAS(void, malloc_stats, void);
+#endif
+#endif // HWASAN_WITH_INTERCEPTORS
+
+
+#if HWASAN_WITH_INTERCEPTORS && !defined(__aarch64__)
+INTERCEPTOR(int, pthread_create, void *th, void *attr,
+ void *(*callback)(void *), void *param) {
+ ScopedTaggingDisabler disabler;
+ int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr),
+ callback, param);
+ return res;
+}
+#endif
+
+#if HWASAN_WITH_INTERCEPTORS
+DEFINE_REAL(int, vfork)
+DECLARE_EXTERN_INTERCEPTOR_AND_WRAPPER(int, vfork)
+#endif
+
+static void BeforeFork() {
+ StackDepotLockAll();
+}
+
+static void AfterFork() {
+ StackDepotUnlockAll();
+}
+
+INTERCEPTOR(int, fork, void) {
+ ENSURE_HWASAN_INITED();
+ BeforeFork();
+ int pid = REAL(fork)();
+ AfterFork();
+ return pid;
+}
+
+namespace __hwasan {
+
+int OnExit() {
+ // FIXME: ask frontend whether we need to return failure.
+ return 0;
+}
+
+} // namespace __hwasan
+
+namespace __hwasan {
+
+void InitializeInterceptors() {
+ static int inited = 0;
+ CHECK_EQ(inited, 0);
+
+ INTERCEPT_FUNCTION(fork);
+
+#if HWASAN_WITH_INTERCEPTORS
+#if defined(__linux__)
+ INTERCEPT_FUNCTION(vfork);
+#endif // __linux__
+#if !defined(__aarch64__)
+ INTERCEPT_FUNCTION(pthread_create);
+#endif // __aarch64__
+ INTERCEPT_FUNCTION(realloc);
+ INTERCEPT_FUNCTION(free);
+#endif
+
+ inited = 1;
+}
+} // namespace __hwasan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interceptors_vfork.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interceptors_vfork.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interceptors_vfork.S (revision 351984)
@@ -0,0 +1,10 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+#if defined(__linux__) && HWASAN_WITH_INTERCEPTORS
+#define COMMON_INTERCEPTOR_SPILL_AREA __hwasan_extra_spill_area
+#define COMMON_INTERCEPTOR_HANDLE_VFORK __hwasan_handle_vfork
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_aarch64.inc.S"
+#include "sanitizer_common/sanitizer_common_interceptors_vfork_x86_64.inc.S"
+#endif
+
+NO_EXEC_STACK_DIRECTIVE
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interface_internal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interface_internal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interface_internal.h (revision 351984)
@@ -0,0 +1,214 @@
+//===-- hwasan_interface_internal.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Private Hwasan interface header.
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_INTERFACE_INTERNAL_H
+#define HWASAN_INTERFACE_INTERNAL_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+
+extern "C" {
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_init_static();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_init();
+
+using __sanitizer::uptr;
+using __sanitizer::sptr;
+using __sanitizer::uu64;
+using __sanitizer::uu32;
+using __sanitizer::uu16;
+using __sanitizer::u64;
+using __sanitizer::u32;
+using __sanitizer::u16;
+using __sanitizer::u8;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_init_frames(uptr, uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+extern uptr __hwasan_shadow_memory_dynamic_address;
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN(uptr, uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16(uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_loadN_noabort(uptr, uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load1_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load2_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load4_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load8_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_load16_noabort(uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN(uptr, uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16(uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_storeN_noabort(uptr, uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store1_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store2_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store4_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store8_noabort(uptr);
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_store16_noabort(uptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_tag_memory(uptr p, u8 tag, uptr sz);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __hwasan_tag_pointer(uptr p, u8 tag);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_tag_mismatch(uptr addr, u8 ts);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u8 __hwasan_generate_tag();
+
+// Returns the offset of the first tag mismatch or -1 if the whole range is
+// good.
+SANITIZER_INTERFACE_ATTRIBUTE
+sptr __hwasan_test_shadow(const void *x, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+/* OPTIONAL */ const char* __hwasan_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_print_shadow(const void *x, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_handle_longjmp(const void *sp_dst);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_handle_vfork(const void *sp_dst);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u16 __sanitizer_unaligned_load16(const uu16 *p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u32 __sanitizer_unaligned_load32(const uu32 *p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+u64 __sanitizer_unaligned_load64(const uu64 *p);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store16(uu16 *p, u16 x);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store32(uu32 *p, u32 x);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_unaligned_store64(uu64 *p, u64 x);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_enable_allocator_tagging();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_disable_allocator_tagging();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_thread_enter();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_thread_exit();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __hwasan_print_memory_usage();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_memalign(uptr alignment, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_aligned_alloc(uptr alignment, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer___libc_memalign(uptr alignment, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_valloc(uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_pvalloc(uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_free(void *ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_cfree(void *ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_malloc_usable_size(const void *ptr);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+__hwasan::__sanitizer_struct_mallinfo __sanitizer_mallinfo();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_mallopt(int cmd, int value);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_malloc_stats(void);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_calloc(uptr nmemb, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_realloc(void *ptr, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_reallocarray(void *ptr, uptr nmemb, uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void * __sanitizer_malloc(uptr size);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memcpy(void *dst, const void *src, uptr size);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memset(void *s, int c, uptr n);
+SANITIZER_INTERFACE_ATTRIBUTE
+void *__hwasan_memmove(void *dest, const void *src, uptr n);
+} // extern "C"
+
+#endif // HWASAN_INTERFACE_INTERNAL_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_interface_internal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_linux.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_linux.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_linux.cpp (revision 351984)
@@ -0,0 +1,458 @@
+//===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and
+/// FreeBSD-specific code.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
+
+#include "hwasan.h"
+#include "hwasan_dynamic_shadow.h"
+#include "hwasan_interface_internal.h"
+#include "hwasan_mapping.h"
+#include "hwasan_report.h"
+#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+
+#include <dlfcn.h>
+#include <elf.h>
+#include <link.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <unwind.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+
+// Configurations of HWASAN_WITH_INTERCEPTORS and SANITIZER_ANDROID.
+//
+// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=OFF
+// Not currently tested.
+// HWASAN_WITH_INTERCEPTORS=OFF, SANITIZER_ANDROID=ON
+// Integration tests downstream exist.
+// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=OFF
+// Tested with check-hwasan on x86_64-linux.
+// HWASAN_WITH_INTERCEPTORS=ON, SANITIZER_ANDROID=ON
+// Tested with check-hwasan on aarch64-linux-android.
+#if !SANITIZER_ANDROID
+SANITIZER_INTERFACE_ATTRIBUTE
+THREADLOCAL uptr __hwasan_tls;
+#endif
+
+namespace __hwasan {
+
+static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) {
+ CHECK_EQ((beg % GetMmapGranularity()), 0);
+ CHECK_EQ(((end + 1) % GetMmapGranularity()), 0);
+ uptr size = end - beg + 1;
+ DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb.
+ if (!MmapFixedNoReserve(beg, size, name)) {
+ Report(
+ "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. "
+ "Perhaps you're using ulimit -v\n",
+ size);
+ Abort();
+ }
+}
+
+static void ProtectGap(uptr addr, uptr size) {
+ if (!size)
+ return;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ // A few pages at the start of the address space can not be protected.
+ // But we really want to protect as much as possible, to prevent this memory
+ // being returned as a result of a non-FIXED mmap().
+ if (addr == 0) {
+ uptr step = GetMmapGranularity();
+ while (size > step) {
+ addr += step;
+ size -= step;
+ void *res = MmapFixedNoAccess(addr, size, "shadow gap");
+ if (addr == (uptr)res)
+ return;
+ }
+ }
+
+ Report(
+ "ERROR: Failed to protect shadow gap [%p, %p]. "
+ "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr,
+ (void *)(addr + size));
+ DumpProcessMap();
+ Die();
+}
+
+static uptr kLowMemStart;
+static uptr kLowMemEnd;
+static uptr kLowShadowEnd;
+static uptr kLowShadowStart;
+static uptr kHighShadowStart;
+static uptr kHighShadowEnd;
+static uptr kHighMemStart;
+static uptr kHighMemEnd;
+
+static void PrintRange(uptr start, uptr end, const char *name) {
+ Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name);
+}
+
+static void PrintAddressSpaceLayout() {
+ PrintRange(kHighMemStart, kHighMemEnd, "HighMem");
+ if (kHighShadowEnd + 1 < kHighMemStart)
+ PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kHighShadowEnd + 1, kHighMemStart);
+ PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow");
+ if (kLowShadowEnd + 1 < kHighShadowStart)
+ PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kLowMemEnd + 1, kHighShadowStart);
+ PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow");
+ if (kLowMemEnd + 1 < kLowShadowStart)
+ PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap");
+ else
+ CHECK_EQ(kLowMemEnd + 1, kLowShadowStart);
+ PrintRange(kLowMemStart, kLowMemEnd, "LowMem");
+ CHECK_EQ(0, kLowMemStart);
+}
+
+static uptr GetHighMemEnd() {
+ // HighMem covers the upper part of the address space.
+ uptr max_address = GetMaxUserVirtualAddress();
+ // Adjust max address to make sure that kHighMemEnd and kHighMemStart are
+ // properly aligned:
+ max_address |= (GetMmapGranularity() << kShadowScale) - 1;
+ return max_address;
+}
+
+static void InitializeShadowBaseAddress(uptr shadow_size_bytes) {
+ __hwasan_shadow_memory_dynamic_address =
+ FindDynamicShadowStart(shadow_size_bytes);
+}
+
+bool InitShadow() {
+ // Define the entire memory range.
+ kHighMemEnd = GetHighMemEnd();
+
+ // Determine shadow memory base offset.
+ InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd));
+
+ // Place the low memory first.
+ kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1;
+ kLowMemStart = 0;
+
+ // Define the low shadow based on the already placed low memory.
+ kLowShadowEnd = MemToShadow(kLowMemEnd);
+ kLowShadowStart = __hwasan_shadow_memory_dynamic_address;
+
+ // High shadow takes whatever memory is left up there (making sure it is not
+ // interfering with low memory in the fixed case).
+ kHighShadowEnd = MemToShadow(kHighMemEnd);
+ kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1;
+
+ // High memory starts where allocated shadow allows.
+ kHighMemStart = ShadowToMem(kHighShadowStart);
+
+ // Check the sanity of the defined memory ranges (there might be gaps).
+ CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0);
+ CHECK_GT(kHighMemStart, kHighShadowEnd);
+ CHECK_GT(kHighShadowEnd, kHighShadowStart);
+ CHECK_GT(kHighShadowStart, kLowMemEnd);
+ CHECK_GT(kLowMemEnd, kLowMemStart);
+ CHECK_GT(kLowShadowEnd, kLowShadowStart);
+ CHECK_GT(kLowShadowStart, kLowMemEnd);
+
+ if (Verbosity())
+ PrintAddressSpaceLayout();
+
+ // Reserve shadow memory.
+ ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow");
+ ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow");
+
+ // Protect all the gaps.
+ ProtectGap(0, Min(kLowMemStart, kLowShadowStart));
+ if (kLowMemEnd + 1 < kLowShadowStart)
+ ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1);
+ if (kLowShadowEnd + 1 < kHighShadowStart)
+ ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1);
+ if (kHighShadowEnd + 1 < kHighMemStart)
+ ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1);
+
+ return true;
+}
+
+void InitThreads() {
+ CHECK(__hwasan_shadow_memory_dynamic_address);
+ uptr guard_page_size = GetMmapGranularity();
+ uptr thread_space_start =
+ __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment);
+ uptr thread_space_end =
+ __hwasan_shadow_memory_dynamic_address - guard_page_size;
+ ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1,
+ "hwasan threads");
+ ProtectGap(thread_space_end,
+ __hwasan_shadow_memory_dynamic_address - thread_space_end);
+ InitThreadList(thread_space_start, thread_space_end - thread_space_start);
+}
+
+static void MadviseShadowRegion(uptr beg, uptr end) {
+ uptr size = end - beg + 1;
+ if (common_flags()->no_huge_pages_for_shadow)
+ NoHugePagesInRegion(beg, size);
+ if (common_flags()->use_madv_dontdump)
+ DontDumpShadowMemory(beg, size);
+}
+
+void MadviseShadow() {
+ MadviseShadowRegion(kLowShadowStart, kLowShadowEnd);
+ MadviseShadowRegion(kHighShadowStart, kHighShadowEnd);
+}
+
+bool MemIsApp(uptr p) {
+ CHECK(GetTagFromPointer(p) == 0);
+ return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd);
+}
+
+static void HwasanAtExit(void) {
+ if (common_flags()->print_module_map)
+ DumpProcessMap();
+ if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0))
+ ReportStats();
+ if (hwasan_report_count > 0) {
+ // ReportAtExitStatistics();
+ if (common_flags()->exitcode)
+ internal__exit(common_flags()->exitcode);
+ }
+}
+
+void InstallAtExitHandler() {
+ atexit(HwasanAtExit);
+}
+
+// ---------------------- TSD ---------------- {{{1
+
+extern "C" void __hwasan_thread_enter() {
+ hwasanThreadList().CreateCurrentThread()->InitRandomState();
+}
+
+extern "C" void __hwasan_thread_exit() {
+ Thread *t = GetCurrentThread();
+ // Make sure that signal handler can not see a stale current thread pointer.
+ atomic_signal_fence(memory_order_seq_cst);
+ if (t)
+ hwasanThreadList().ReleaseThread(t);
+}
+
+#if HWASAN_WITH_INTERCEPTORS
+static pthread_key_t tsd_key;
+static bool tsd_key_inited = false;
+
+void HwasanTSDThreadInit() {
+ if (tsd_key_inited)
+ CHECK_EQ(0, pthread_setspecific(tsd_key,
+ (void *)GetPthreadDestructorIterations()));
+}
+
+void HwasanTSDDtor(void *tsd) {
+ uptr iterations = (uptr)tsd;
+ if (iterations > 1) {
+ CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1)));
+ return;
+ }
+ __hwasan_thread_exit();
+}
+
+void HwasanTSDInit() {
+ CHECK(!tsd_key_inited);
+ tsd_key_inited = true;
+ CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor));
+}
+#else
+void HwasanTSDInit() {}
+void HwasanTSDThreadInit() {}
+#endif
+
+#if SANITIZER_ANDROID
+uptr *GetCurrentThreadLongPtr() {
+ return (uptr *)get_android_tls_ptr();
+}
+#else
+uptr *GetCurrentThreadLongPtr() {
+ return &__hwasan_tls;
+}
+#endif
+
+#if SANITIZER_ANDROID
+void AndroidTestTlsSlot() {
+ uptr kMagicValue = 0x010203040A0B0C0D;
+ uptr *tls_ptr = GetCurrentThreadLongPtr();
+ uptr old_value = *tls_ptr;
+ *tls_ptr = kMagicValue;
+ dlerror();
+ if (*(uptr *)get_android_tls_ptr() != kMagicValue) {
+ Printf(
+ "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used "
+ "for dlerror().\n");
+ Die();
+ }
+ *tls_ptr = old_value;
+}
+#else
+void AndroidTestTlsSlot() {}
+#endif
+
+Thread *GetCurrentThread() {
+ uptr *ThreadLong = GetCurrentThreadLongPtr();
+#if HWASAN_WITH_INTERCEPTORS
+ if (!*ThreadLong)
+ __hwasan_thread_enter();
+#endif
+ auto *R = (StackAllocationsRingBuffer *)ThreadLong;
+ return hwasanThreadList().GetThreadByBufferAddress((uptr)(R->Next()));
+}
+
+struct AccessInfo {
+ uptr addr;
+ uptr size;
+ bool is_store;
+ bool is_load;
+ bool recover;
+};
+
+static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) {
+ // Access type is passed in a platform dependent way (see below) and encoded
+ // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is
+ // recoverable. Valid values of Y are 0 to 4, which are interpreted as
+ // log2(access_size), and 0xF, which means that access size is passed via
+ // platform dependent register (see below).
+#if defined(__aarch64__)
+ // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF,
+ // access size is stored in X1 register. Access address is always in X0
+ // register.
+ uptr pc = (uptr)info->si_addr;
+ const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff;
+ if ((code & 0xff00) != 0x900)
+ return AccessInfo{}; // Not ours.
+
+ const bool is_store = code & 0x10;
+ const bool recover = code & 0x20;
+ const uptr addr = uc->uc_mcontext.regs[0];
+ const unsigned size_log = code & 0xf;
+ if (size_log > 4 && size_log != 0xf)
+ return AccessInfo{}; // Not ours.
+ const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log;
+
+#elif defined(__x86_64__)
+ // Access type is encoded in the instruction following INT3 as
+ // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in
+ // RSI register. Access address is always in RDI register.
+ uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP];
+ uint8_t *nop = (uint8_t*)pc;
+ if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 ||
+ *(nop + 3) < 0x40)
+ return AccessInfo{}; // Not ours.
+ const unsigned code = *(nop + 3);
+
+ const bool is_store = code & 0x10;
+ const bool recover = code & 0x20;
+ const uptr addr = uc->uc_mcontext.gregs[REG_RDI];
+ const unsigned size_log = code & 0xf;
+ if (size_log > 4 && size_log != 0xf)
+ return AccessInfo{}; // Not ours.
+ const uptr size =
+ size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log;
+
+#else
+# error Unsupported architecture
+#endif
+
+ return AccessInfo{addr, size, is_store, !is_store, recover};
+}
+
+static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame,
+ ucontext_t *uc, uptr *registers_frame = nullptr) {
+ InternalMmapVector<BufferedStackTrace> stack_buffer(1);
+ BufferedStackTrace *stack = stack_buffer.data();
+ stack->Reset();
+ stack->Unwind(pc, frame, uc, common_flags()->fast_unwind_on_fatal);
+
+ // The second stack frame contains the failure __hwasan_check function, as
+ // we have a stack frame for the registers saved in __hwasan_tag_mismatch that
+ // we wish to ignore. This (currently) only occurs on AArch64, as x64
+ // implementations use SIGTRAP to implement the failure, and thus do not go
+ // through the stack saver.
+ if (registers_frame && stack->trace && stack->size > 0) {
+ stack->trace++;
+ stack->size--;
+ }
+
+ bool fatal = flags()->halt_on_error || !ai.recover;
+ ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal,
+ registers_frame);
+}
+
+static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) {
+ AccessInfo ai = GetAccessInfo(info, uc);
+ if (!ai.is_store && !ai.is_load)
+ return false;
+
+ SignalContext sig{info, uc};
+ HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc);
+
+#if defined(__aarch64__)
+ uc->uc_mcontext.pc += 4;
+#elif defined(__x86_64__)
+#else
+# error Unsupported architecture
+#endif
+ return true;
+}
+
+// Entry point stub for interoperability between __hwasan_tag_mismatch (ASM) and
+// the rest of the mismatch handling code (C++).
+extern "C" void __hwasan_tag_mismatch_stub(uptr addr, uptr access_info,
+ uptr *registers_frame) {
+ AccessInfo ai;
+ ai.is_store = access_info & 0x10;
+ ai.recover = false;
+ ai.addr = addr;
+ ai.size = 1 << (access_info & 0xf);
+
+ HandleTagMismatch(ai, (uptr)__builtin_return_address(0),
+ (uptr)__builtin_frame_address(0), nullptr, registers_frame);
+ __builtin_unreachable();
+}
+
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ stack->Unwind(StackTrace::GetNextInstructionPc(sig.pc), sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+void HwasanOnDeadlySignal(int signo, void *info, void *context) {
+ // Probably a tag mismatch.
+ if (signo == SIGTRAP)
+ if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context))
+ return;
+
+ HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr);
+}
+
+
+} // namespace __hwasan
+
+#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_malloc_bisect.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_malloc_bisect.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_malloc_bisect.h (revision 351984)
@@ -0,0 +1,50 @@
+//===-- hwasan_malloc_bisect.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_hash.h"
+#include "hwasan.h"
+
+namespace __hwasan {
+
+static u32 malloc_hash(StackTrace *stack, uptr orig_size) {
+ uptr len = Min(stack->size, (unsigned)7);
+ MurMur2HashBuilder H(len);
+ H.add(orig_size);
+ // Start with frame #1 to skip __sanitizer_malloc frame, which is
+ // (a) almost always the same (well, could be operator new or new[])
+ // (b) can change hashes when compiler-rt is rebuilt, invalidating previous
+ // bisection results.
+ // Because of ASLR, use only offset inside the page.
+ for (uptr i = 1; i < len; ++i) H.add(((u32)stack->trace[i]) & 0xFFF);
+ return H.get();
+}
+
+static INLINE bool malloc_bisect(StackTrace *stack, uptr orig_size) {
+ uptr left = flags()->malloc_bisect_left;
+ uptr right = flags()->malloc_bisect_right;
+ if (LIKELY(left == 0 && right == 0))
+ return true;
+ if (!stack)
+ return true;
+ // Allow malloc_bisect_right > (u32)(-1) to avoid spelling the latter in
+ // decimal.
+ uptr h = (uptr)malloc_hash(stack, orig_size);
+ if (h < left || h > right)
+ return false;
+ if (flags()->malloc_bisect_dump) {
+ Printf("[alloc] %u %zu\n", h, orig_size);
+ stack->Print();
+ }
+ return true;
+}
+
+} // namespace __hwasan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_mapping.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_mapping.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_mapping.h (revision 351984)
@@ -0,0 +1,57 @@
+//===-- hwasan_mapping.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and defines memory mapping.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_MAPPING_H
+#define HWASAN_MAPPING_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "hwasan_interface_internal.h"
+
+// Typical mapping on Linux/x86_64:
+// with dynamic shadow mapped at [0x770d59f40000, 0x7f0d59f40000]:
+// || [0x7f0d59f40000, 0x7fffffffffff] || HighMem ||
+// || [0x7efe2f934000, 0x7f0d59f3ffff] || HighShadow ||
+// || [0x7e7e2f934000, 0x7efe2f933fff] || ShadowGap ||
+// || [0x770d59f40000, 0x7e7e2f933fff] || LowShadow ||
+// || [0x000000000000, 0x770d59f3ffff] || LowMem ||
+
+// Typical mapping on Android/AArch64
+// with dynamic shadow mapped: [0x007477480000, 0x007c77480000]:
+// || [0x007c77480000, 0x007fffffffff] || HighMem ||
+// || [0x007c3ebc8000, 0x007c7747ffff] || HighShadow ||
+// || [0x007bbebc8000, 0x007c3ebc7fff] || ShadowGap ||
+// || [0x007477480000, 0x007bbebc7fff] || LowShadow ||
+// || [0x000000000000, 0x00747747ffff] || LowMem ||
+
+// Reasonable values are 4 (for 1/16th shadow) and 6 (for 1/64th).
+constexpr uptr kShadowScale = 4;
+constexpr uptr kShadowAlignment = 1ULL << kShadowScale;
+
+namespace __hwasan {
+
+inline uptr MemToShadow(uptr untagged_addr) {
+ return (untagged_addr >> kShadowScale) +
+ __hwasan_shadow_memory_dynamic_address;
+}
+inline uptr ShadowToMem(uptr shadow_addr) {
+ return (shadow_addr - __hwasan_shadow_memory_dynamic_address) << kShadowScale;
+}
+inline uptr MemToShadowSize(uptr size) {
+ return size >> kShadowScale;
+}
+
+bool MemIsApp(uptr p);
+
+} // namespace __hwasan
+
+#endif // HWASAN_MAPPING_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_mapping.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_memintrinsics.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_memintrinsics.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_memintrinsics.cpp (revision 351984)
@@ -0,0 +1,44 @@
+//===-- hwasan_memintrinsics.cpp --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer and contains HWASAN versions of
+/// memset, memcpy and memmove
+///
+//===----------------------------------------------------------------------===//
+
+#include <string.h>
+#include "hwasan.h"
+#include "hwasan_checks.h"
+#include "hwasan_flags.h"
+#include "hwasan_interface_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+using namespace __hwasan;
+
+void *__hwasan_memset(void *block, int c, uptr size) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(block), size);
+ return memset(UntagPtr(block), c, size);
+}
+
+void *__hwasan_memcpy(void *to, const void *from, uptr size) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memcpy(UntagPtr(to), UntagPtr(from), size);
+}
+
+void *__hwasan_memmove(void *to, const void *from, uptr size) {
+ CheckAddressSized<ErrorAction::Recover, AccessType::Store>(
+ reinterpret_cast<uptr>(to), size);
+ CheckAddressSized<ErrorAction::Recover, AccessType::Load>(
+ reinterpret_cast<uptr>(from), size);
+ return memmove(UntagPtr(to), UntagPtr(from), size);
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_new_delete.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_new_delete.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_new_delete.cpp (revision 351984)
@@ -0,0 +1,66 @@
+//===-- hwasan_new_delete.cpp ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Interceptors for operators new and delete.
+//===----------------------------------------------------------------------===//
+
+#include "hwasan.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+
+#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
+
+#include <stddef.h>
+
+using namespace __hwasan; // NOLINT
+
+// Fake std::nothrow_t to avoid including <new>.
+namespace std {
+ struct nothrow_t {};
+} // namespace std
+
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow) \
+ GET_MALLOC_STACK_TRACE; \
+ void *res = hwasan_malloc(size, &stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res
+
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(true /*nothrow*/);
+}
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&) {
+ OPERATOR_NEW_BODY(true /*nothrow*/);
+}
+
+#define OPERATOR_DELETE_BODY \
+ GET_MALLOC_STACK_TRACE; \
+ if (ptr) hwasan_free(ptr, &stack)
+
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&) {
+ OPERATOR_DELETE_BODY;
+}
+
+#endif // HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_poisoning.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_poisoning.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_poisoning.cpp (revision 351984)
@@ -0,0 +1,52 @@
+//===-- hwasan_poisoning.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "hwasan_poisoning.h"
+
+#include "hwasan_mapping.h"
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_linux.h"
+
+namespace __hwasan {
+
+uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) {
+ CHECK(IsAligned(p, kShadowAlignment));
+ CHECK(IsAligned(size, kShadowAlignment));
+ uptr shadow_start = MemToShadow(p);
+ uptr shadow_size = MemToShadowSize(size);
+
+ uptr page_size = GetPageSizeCached();
+ uptr page_start = RoundUpTo(shadow_start, page_size);
+ uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size);
+ uptr threshold = common_flags()->clear_shadow_mmap_threshold;
+ if (SANITIZER_LINUX &&
+ UNLIKELY(page_end >= page_start + threshold && tag == 0)) {
+ internal_memset((void *)shadow_start, tag, page_start - shadow_start);
+ internal_memset((void *)page_end, tag,
+ shadow_start + shadow_size - page_end);
+ // For an anonymous private mapping MADV_DONTNEED will return a zero page on
+ // Linux.
+ ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end);
+ } else {
+ internal_memset((void *)shadow_start, tag, shadow_size);
+ }
+ return AddTagToPointer(p, tag);
+}
+
+uptr TagMemory(uptr p, uptr size, tag_t tag) {
+ uptr start = RoundDownTo(p, kShadowAlignment);
+ uptr end = RoundUpTo(p + size, kShadowAlignment);
+ return TagMemoryAligned(start, end - start, tag);
+}
+
+} // namespace __hwasan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_poisoning.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_poisoning.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_poisoning.h (revision 351984)
@@ -0,0 +1,24 @@
+//===-- hwasan_poisoning.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_POISONING_H
+#define HWASAN_POISONING_H
+
+#include "hwasan.h"
+
+namespace __hwasan {
+uptr TagMemory(uptr p, uptr size, tag_t tag);
+uptr TagMemoryAligned(uptr p, uptr size, tag_t tag);
+
+} // namespace __hwasan
+
+#endif // HWASAN_POISONING_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_poisoning.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_report.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_report.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_report.cpp (revision 351984)
@@ -0,0 +1,547 @@
+//===-- hwasan_report.cpp -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Error reporting.
+//===----------------------------------------------------------------------===//
+
+#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_mapping.h"
+#include "hwasan_report.h"
+#include "hwasan_thread.h"
+#include "hwasan_thread_list.h"
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+using namespace __sanitizer;
+
+namespace __hwasan {
+
+class ScopedReport {
+ public:
+ ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) {
+ BlockingMutexLock lock(&error_message_lock_);
+ error_message_ptr_ = fatal ? &error_message_ : nullptr;
+ ++hwasan_report_count;
+ }
+
+ ~ScopedReport() {
+ {
+ BlockingMutexLock lock(&error_message_lock_);
+ if (fatal)
+ SetAbortMessage(error_message_.data());
+ error_message_ptr_ = nullptr;
+ }
+ if (common_flags()->print_module_map >= 2 ||
+ (fatal && common_flags()->print_module_map))
+ DumpProcessMap();
+ if (fatal)
+ Die();
+ }
+
+ static void MaybeAppendToErrorMessage(const char *msg) {
+ BlockingMutexLock lock(&error_message_lock_);
+ if (!error_message_ptr_)
+ return;
+ uptr len = internal_strlen(msg);
+ uptr old_size = error_message_ptr_->size();
+ error_message_ptr_->resize(old_size + len);
+ // overwrite old trailing '\0', keep new trailing '\0' untouched.
+ internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len);
+ }
+ private:
+ ScopedErrorReportLock error_report_lock_;
+ InternalMmapVector<char> error_message_;
+ bool fatal;
+
+ static InternalMmapVector<char> *error_message_ptr_;
+ static BlockingMutex error_message_lock_;
+};
+
+InternalMmapVector<char> *ScopedReport::error_message_ptr_;
+BlockingMutex ScopedReport::error_message_lock_;
+
+// If there is an active ScopedReport, append to its error message.
+void AppendToErrorMessageBuffer(const char *buffer) {
+ ScopedReport::MaybeAppendToErrorMessage(buffer);
+}
+
+static StackTrace GetStackTraceFromId(u32 id) {
+ CHECK(id);
+ StackTrace res = StackDepotGet(id);
+ CHECK(res.trace);
+ return res;
+}
+
+// A RAII object that holds a copy of the current thread stack ring buffer.
+// The actual stack buffer may change while we are iterating over it (for
+// example, Printf may call syslog() which can itself be built with hwasan).
+class SavedStackAllocations {
+ public:
+ SavedStackAllocations(StackAllocationsRingBuffer *rb) {
+ uptr size = rb->size() * sizeof(uptr);
+ void *storage =
+ MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations");
+ new (&rb_) StackAllocationsRingBuffer(*rb, storage);
+ }
+
+ ~SavedStackAllocations() {
+ StackAllocationsRingBuffer *rb = get();
+ UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr));
+ }
+
+ StackAllocationsRingBuffer *get() {
+ return (StackAllocationsRingBuffer *)&rb_;
+ }
+
+ private:
+ uptr rb_;
+};
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() { }
+ const char *Access() { return Blue(); }
+ const char *Allocation() const { return Magenta(); }
+ const char *Origin() const { return Magenta(); }
+ const char *Name() const { return Green(); }
+ const char *Location() { return Green(); }
+ const char *Thread() { return Green(); }
+};
+
+// Returns the index of the rb element that matches tagged_addr (plus one),
+// or zero if found nothing.
+uptr FindHeapAllocation(HeapAllocationsRingBuffer *rb,
+ uptr tagged_addr,
+ HeapAllocationRecord *har) {
+ if (!rb) return 0;
+ for (uptr i = 0, size = rb->size(); i < size; i++) {
+ auto h = (*rb)[i];
+ if (h.tagged_addr <= tagged_addr &&
+ h.tagged_addr + h.requested_size > tagged_addr) {
+ *har = h;
+ return i + 1;
+ }
+ }
+ return 0;
+}
+
+static void PrintStackAllocations(StackAllocationsRingBuffer *sa,
+ tag_t addr_tag, uptr untagged_addr) {
+ uptr frames = Min((uptr)flags()->stack_history_size, sa->size());
+ bool found_local = false;
+ for (uptr i = 0; i < frames; i++) {
+ const uptr *record_addr = &(*sa)[i];
+ uptr record = *record_addr;
+ if (!record)
+ break;
+ tag_t base_tag =
+ reinterpret_cast<uptr>(record_addr) >> kRecordAddrBaseTagShift;
+ uptr fp = (record >> kRecordFPShift) << kRecordFPLShift;
+ uptr pc_mask = (1ULL << kRecordFPShift) - 1;
+ uptr pc = record & pc_mask;
+ FrameInfo frame;
+ if (Symbolizer::GetOrInit()->SymbolizeFrame(pc, &frame)) {
+ for (LocalInfo &local : frame.locals) {
+ if (!local.has_frame_offset || !local.has_size || !local.has_tag_offset)
+ continue;
+ tag_t obj_tag = base_tag ^ local.tag_offset;
+ if (obj_tag != addr_tag)
+ continue;
+ // Calculate the offset from the object address to the faulting
+ // address. Because we only store bits 4-19 of FP (bits 0-3 are
+ // guaranteed to be zero), the calculation is performed mod 2^20 and may
+ // harmlessly underflow if the address mod 2^20 is below the object
+ // address.
+ uptr obj_offset =
+ (untagged_addr - fp - local.frame_offset) & (kRecordFPModulus - 1);
+ if (obj_offset >= local.size)
+ continue;
+ if (!found_local) {
+ Printf("Potentially referenced stack objects:\n");
+ found_local = true;
+ }
+ Printf(" %s in %s %s:%d\n", local.name, local.function_name,
+ local.decl_file, local.decl_line);
+ }
+ frame.Clear();
+ }
+ }
+
+ if (found_local)
+ return;
+
+ // We didn't find any locals. Most likely we don't have symbols, so dump
+ // the information that we have for offline analysis.
+ InternalScopedString frame_desc(GetPageSizeCached() * 2);
+ Printf("Previously allocated frames:\n");
+ for (uptr i = 0; i < frames; i++) {
+ const uptr *record_addr = &(*sa)[i];
+ uptr record = *record_addr;
+ if (!record)
+ break;
+ uptr pc_mask = (1ULL << 48) - 1;
+ uptr pc = record & pc_mask;
+ frame_desc.append(" record_addr:0x%zx record:0x%zx",
+ reinterpret_cast<uptr>(record_addr), record);
+ if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) {
+ RenderFrame(&frame_desc, " %F %L\n", 0, frame->info,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ frame->ClearAll();
+ }
+ Printf("%s", frame_desc.data());
+ frame_desc.clear();
+ }
+}
+
+// Returns true if tag == *tag_ptr, reading tags from short granules if
+// necessary. This may return a false positive if tags 1-15 are used as a
+// regular tag rather than a short granule marker.
+static bool TagsEqual(tag_t tag, tag_t *tag_ptr) {
+ if (tag == *tag_ptr)
+ return true;
+ if (*tag_ptr == 0 || *tag_ptr > kShadowAlignment - 1)
+ return false;
+ uptr mem = ShadowToMem(reinterpret_cast<uptr>(tag_ptr));
+ tag_t inline_tag = *reinterpret_cast<tag_t *>(mem + kShadowAlignment - 1);
+ return tag == inline_tag;
+}
+
+void PrintAddressDescription(
+ uptr tagged_addr, uptr access_size,
+ StackAllocationsRingBuffer *current_stack_allocations) {
+ Decorator d;
+ int num_descriptions_printed = 0;
+ uptr untagged_addr = UntagAddr(tagged_addr);
+
+ // Print some very basic information about the address, if it's a heap.
+ HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
+ if (uptr beg = chunk.Beg()) {
+ uptr size = chunk.ActualSize();
+ Printf("%s[%p,%p) is a %s %s heap chunk; "
+ "size: %zd offset: %zd\n%s",
+ d.Location(),
+ beg, beg + size,
+ chunk.FromSmallHeap() ? "small" : "large",
+ chunk.IsAllocated() ? "allocated" : "unallocated",
+ size, untagged_addr - beg,
+ d.Default());
+ }
+
+ // Check if this looks like a heap buffer overflow by scanning
+ // the shadow left and right and looking for the first adjacent
+ // object with a different memory tag. If that tag matches addr_tag,
+ // check the allocator if it has a live chunk there.
+ tag_t addr_tag = GetTagFromPointer(tagged_addr);
+ tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
+ tag_t *candidate = nullptr, *left = tag_ptr, *right = tag_ptr;
+ for (int i = 0; i < 1000; i++) {
+ if (TagsEqual(addr_tag, left)) {
+ candidate = left;
+ break;
+ }
+ --left;
+ if (TagsEqual(addr_tag, right)) {
+ candidate = right;
+ break;
+ }
+ ++right;
+ }
+
+ if (candidate) {
+ uptr mem = ShadowToMem(reinterpret_cast<uptr>(candidate));
+ HwasanChunkView chunk = FindHeapChunkByAddress(mem);
+ if (chunk.IsAllocated()) {
+ Printf("%s", d.Location());
+ Printf("%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n",
+ untagged_addr,
+ candidate == left ? untagged_addr - chunk.End()
+ : chunk.Beg() - untagged_addr,
+ candidate == left ? "right" : "left", chunk.UsedSize(),
+ chunk.Beg(), chunk.End());
+ Printf("%s", d.Allocation());
+ Printf("allocated here:\n");
+ Printf("%s", d.Default());
+ GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ num_descriptions_printed++;
+ }
+ }
+
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) {
+ // Scan all threads' ring buffers to find if it's a heap-use-after-free.
+ HeapAllocationRecord har;
+ if (uptr D = FindHeapAllocation(t->heap_allocations(), tagged_addr, &har)) {
+ Printf("%s", d.Location());
+ Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n",
+ untagged_addr, untagged_addr - UntagAddr(har.tagged_addr),
+ har.requested_size, UntagAddr(har.tagged_addr),
+ UntagAddr(har.tagged_addr) + har.requested_size);
+ Printf("%s", d.Allocation());
+ Printf("freed by thread T%zd here:\n", t->unique_id());
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.free_context_id).Print();
+
+ Printf("%s", d.Allocation());
+ Printf("previously allocated here:\n", t);
+ Printf("%s", d.Default());
+ GetStackTraceFromId(har.alloc_context_id).Print();
+
+ // Print a developer note: the index of this heap object
+ // in the thread's deallocation ring buffer.
+ Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", D,
+ flags()->heap_history_size);
+
+ t->Announce();
+ num_descriptions_printed++;
+ }
+
+ // Very basic check for stack memory.
+ if (t->AddrIsInStack(untagged_addr)) {
+ Printf("%s", d.Location());
+ Printf("Address %p is located in stack of thread T%zd\n", untagged_addr,
+ t->unique_id());
+ Printf("%s", d.Default());
+ t->Announce();
+
+ auto *sa = (t == GetCurrentThread() && current_stack_allocations)
+ ? current_stack_allocations
+ : t->stack_allocations();
+ PrintStackAllocations(sa, addr_tag, untagged_addr);
+ num_descriptions_printed++;
+ }
+ });
+
+ // Print the remaining threads, as an extra information, 1 line per thread.
+ hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); });
+
+ if (!num_descriptions_printed)
+ // We exhausted our possibilities. Bail out.
+ Printf("HWAddressSanitizer can not describe address in more detail.\n");
+}
+
+void ReportStats() {}
+
+static void PrintTagInfoAroundAddr(tag_t *tag_ptr, uptr num_rows,
+ void (*print_tag)(InternalScopedString &s,
+ tag_t *tag)) {
+ const uptr row_len = 16; // better be power of two.
+ tag_t *center_row_beg = reinterpret_cast<tag_t *>(
+ RoundDownTo(reinterpret_cast<uptr>(tag_ptr), row_len));
+ tag_t *beg_row = center_row_beg - row_len * (num_rows / 2);
+ tag_t *end_row = center_row_beg + row_len * ((num_rows + 1) / 2);
+ InternalScopedString s(GetPageSizeCached() * 8);
+ for (tag_t *row = beg_row; row < end_row; row += row_len) {
+ s.append("%s", row == center_row_beg ? "=>" : " ");
+ for (uptr i = 0; i < row_len; i++) {
+ s.append("%s", row + i == tag_ptr ? "[" : " ");
+ print_tag(s, &row[i]);
+ s.append("%s", row + i == tag_ptr ? "]" : " ");
+ }
+ s.append("%s\n", row == center_row_beg ? "<=" : " ");
+ }
+ Printf("%s", s.data());
+}
+
+static void PrintTagsAroundAddr(tag_t *tag_ptr) {
+ Printf(
+ "Memory tags around the buggy address (one tag corresponds to %zd "
+ "bytes):\n", kShadowAlignment);
+ PrintTagInfoAroundAddr(tag_ptr, 17, [](InternalScopedString &s, tag_t *tag) {
+ s.append("%02x", *tag);
+ });
+
+ Printf(
+ "Tags for short granules around the buggy address (one tag corresponds "
+ "to %zd bytes):\n",
+ kShadowAlignment);
+ PrintTagInfoAroundAddr(tag_ptr, 3, [](InternalScopedString &s, tag_t *tag) {
+ if (*tag >= 1 && *tag <= kShadowAlignment) {
+ uptr granule_addr = ShadowToMem(reinterpret_cast<uptr>(tag));
+ s.append("%02x",
+ *reinterpret_cast<u8 *>(granule_addr + kShadowAlignment - 1));
+ } else {
+ s.append("..");
+ }
+ });
+ Printf(
+ "See "
+ "https://clang.llvm.org/docs/"
+ "HardwareAssistedAddressSanitizerDesign.html#short-granules for a "
+ "description of short granule tags\n");
+}
+
+void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) {
+ ScopedReport R(flags()->halt_on_error);
+
+ uptr untagged_addr = UntagAddr(tagged_addr);
+ tag_t ptr_tag = GetTagFromPointer(tagged_addr);
+ tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
+ tag_t mem_tag = *tag_ptr;
+ Decorator d;
+ Printf("%s", d.Error());
+ uptr pc = stack->size ? stack->trace[0] : 0;
+ const char *bug_type = "invalid-free";
+ Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
+ untagged_addr, pc);
+ Printf("%s", d.Access());
+ Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag);
+ Printf("%s", d.Default());
+
+ stack->Print();
+
+ PrintAddressDescription(tagged_addr, 0, nullptr);
+
+ PrintTagsAroundAddr(tag_ptr);
+
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size,
+ const u8 *expected) {
+ uptr tail_size = kShadowAlignment - (orig_size % kShadowAlignment);
+ ScopedReport R(flags()->halt_on_error);
+ Decorator d;
+ uptr untagged_addr = UntagAddr(tagged_addr);
+ Printf("%s", d.Error());
+ const char *bug_type = "alocation-tail-overwritten";
+ Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName,
+ bug_type, untagged_addr, untagged_addr + orig_size, orig_size);
+ Printf("\n%s", d.Default());
+ stack->Print();
+ HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr);
+ if (chunk.Beg()) {
+ Printf("%s", d.Allocation());
+ Printf("allocated here:\n");
+ Printf("%s", d.Default());
+ GetStackTraceFromId(chunk.GetAllocStackId()).Print();
+ }
+
+ InternalScopedString s(GetPageSizeCached() * 8);
+ CHECK_GT(tail_size, 0U);
+ CHECK_LT(tail_size, kShadowAlignment);
+ u8 *tail = reinterpret_cast<u8*>(untagged_addr + orig_size);
+ s.append("Tail contains: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
+ s.append(".. ");
+ for (uptr i = 0; i < tail_size; i++)
+ s.append("%02x ", tail[i]);
+ s.append("\n");
+ s.append("Expected: ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
+ s.append(".. ");
+ for (uptr i = 0; i < tail_size; i++)
+ s.append("%02x ", expected[i]);
+ s.append("\n");
+ s.append(" ");
+ for (uptr i = 0; i < kShadowAlignment - tail_size; i++)
+ s.append(" ");
+ for (uptr i = 0; i < tail_size; i++)
+ s.append("%s ", expected[i] != tail[i] ? "^^" : " ");
+
+ s.append("\nThis error occurs when a buffer overflow overwrites memory\n"
+ "to the right of a heap object, but within the %zd-byte granule, e.g.\n"
+ " char *x = new char[20];\n"
+ " x[25] = 42;\n"
+ "%s does not detect such bugs in uninstrumented code at the time of write,"
+ "\nbut can detect them at the time of free/delete.\n"
+ "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0\n",
+ kShadowAlignment, SanitizerToolName);
+ Printf("%s", s.data());
+ GetCurrentThread()->Announce();
+
+ tag_t *tag_ptr = reinterpret_cast<tag_t*>(MemToShadow(untagged_addr));
+ PrintTagsAroundAddr(tag_ptr);
+
+ ReportErrorSummary(bug_type, stack);
+}
+
+void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size,
+ bool is_store, bool fatal, uptr *registers_frame) {
+ ScopedReport R(fatal);
+ SavedStackAllocations current_stack_allocations(
+ GetCurrentThread()->stack_allocations());
+
+ Decorator d;
+ Printf("%s", d.Error());
+ uptr untagged_addr = UntagAddr(tagged_addr);
+ // TODO: when possible, try to print heap-use-after-free, etc.
+ const char *bug_type = "tag-mismatch";
+ uptr pc = stack->size ? stack->trace[0] : 0;
+ Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type,
+ untagged_addr, pc);
+
+ Thread *t = GetCurrentThread();
+
+ sptr offset =
+ __hwasan_test_shadow(reinterpret_cast<void *>(tagged_addr), access_size);
+ CHECK(offset >= 0 && offset < static_cast<sptr>(access_size));
+ tag_t ptr_tag = GetTagFromPointer(tagged_addr);
+ tag_t *tag_ptr =
+ reinterpret_cast<tag_t *>(MemToShadow(untagged_addr + offset));
+ tag_t mem_tag = *tag_ptr;
+
+ Printf("%s", d.Access());
+ Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n",
+ is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag,
+ mem_tag, t->unique_id());
+ if (offset != 0)
+ Printf("Invalid access starting at offset [%zu, %zu)\n", offset,
+ Min(access_size, static_cast<uptr>(offset) + (1 << kShadowScale)));
+ Printf("%s", d.Default());
+
+ stack->Print();
+
+ PrintAddressDescription(tagged_addr, access_size,
+ current_stack_allocations.get());
+ t->Announce();
+
+ PrintTagsAroundAddr(tag_ptr);
+
+ if (registers_frame)
+ ReportRegisters(registers_frame, pc);
+
+ ReportErrorSummary(bug_type, stack);
+}
+
+// See the frame breakdown defined in __hwasan_tag_mismatch (from
+// hwasan_tag_mismatch_aarch64.S).
+void ReportRegisters(uptr *frame, uptr pc) {
+ Printf("Registers where the failure occurred (pc %p):\n", pc);
+
+ // We explicitly print a single line (4 registers/line) each iteration to
+ // reduce the amount of logcat error messages printed. Each Printf() will
+ // result in a new logcat line, irrespective of whether a newline is present,
+ // and so we wish to reduce the number of Printf() calls we have to make.
+ Printf(" x0 %016llx x1 %016llx x2 %016llx x3 %016llx\n",
+ frame[0], frame[1], frame[2], frame[3]);
+ Printf(" x4 %016llx x5 %016llx x6 %016llx x7 %016llx\n",
+ frame[4], frame[5], frame[6], frame[7]);
+ Printf(" x8 %016llx x9 %016llx x10 %016llx x11 %016llx\n",
+ frame[8], frame[9], frame[10], frame[11]);
+ Printf(" x12 %016llx x13 %016llx x14 %016llx x15 %016llx\n",
+ frame[12], frame[13], frame[14], frame[15]);
+ Printf(" x16 %016llx x17 %016llx x18 %016llx x19 %016llx\n",
+ frame[16], frame[17], frame[18], frame[19]);
+ Printf(" x20 %016llx x21 %016llx x22 %016llx x23 %016llx\n",
+ frame[20], frame[21], frame[22], frame[23]);
+ Printf(" x24 %016llx x25 %016llx x26 %016llx x27 %016llx\n",
+ frame[24], frame[25], frame[26], frame[27]);
+ Printf(" x28 %016llx x29 %016llx x30 %016llx\n",
+ frame[28], frame[29], frame[30]);
+}
+
+} // namespace __hwasan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_report.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_report.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_report.h (revision 351984)
@@ -0,0 +1,35 @@
+//===-- hwasan_report.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// \file
+/// This file is a part of HWAddressSanitizer. HWASan-private header for error
+/// reporting functions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_REPORT_H
+#define HWASAN_REPORT_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+namespace __hwasan {
+
+void ReportStats();
+void ReportTagMismatch(StackTrace *stack, uptr addr, uptr access_size,
+ bool is_store, bool fatal, uptr *registers_frame);
+void ReportInvalidFree(StackTrace *stack, uptr addr);
+void ReportTailOverwritten(StackTrace *stack, uptr addr, uptr orig_size,
+ const u8 *expected);
+void ReportRegisters(uptr *registers_frame, uptr pc);
+void ReportAtExitStatistics();
+
+
+} // namespace __hwasan
+
+#endif // HWASAN_REPORT_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_report.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_tag_mismatch_aarch64.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_tag_mismatch_aarch64.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_tag_mismatch_aarch64.S (revision 351984)
@@ -0,0 +1,106 @@
+#include "sanitizer_common/sanitizer_asm.h"
+
+// The content of this file is AArch64-only:
+#if defined(__aarch64__)
+
+// The responsibility of the HWASan entry point in compiler-rt is to primarily
+// readjust the stack from the callee and save the current register values to
+// the stack.
+// This entry point function should be called from a __hwasan_check_* symbol.
+// These are generated during a lowering pass in the backend, and are found in
+// AArch64AsmPrinter::EmitHwasanMemaccessSymbols(). Please look there for
+// further information.
+// The __hwasan_check_* caller of this function should have expanded the stack
+// and saved the previous values of x0, x1, x29, and x30. This function will
+// "consume" these saved values and treats it as part of its own stack frame.
+// In this sense, the __hwasan_check_* callee and this function "share" a stack
+// frame. This allows us to omit having unwinding information (.cfi_*) present
+// in every __hwasan_check_* function, therefore reducing binary size. This is
+// particularly important as hwasan_check_* instances are duplicated in every
+// translation unit where HWASan is enabled.
+// This function calls HwasanTagMismatch to step back into the C++ code that
+// completes the stack unwinding and error printing. This function is is not
+// permitted to return.
+
+
+// Frame from __hwasan_check_:
+// | ... |
+// | ... |
+// | Previous stack frames... |
+// +=================================+
+// | Unused 8-bytes for maintaining |
+// | 16-byte SP alignment. |
+// +---------------------------------+
+// | Return address (x30) for caller |
+// | of __hwasan_check_*. |
+// +---------------------------------+
+// | Frame address (x29) for caller |
+// | of __hwasan_check_* |
+// +---------------------------------+ <-- [SP + 232]
+// | ... |
+// | |
+// | Stack frame space for x2 - x28. |
+// | |
+// | ... |
+// +---------------------------------+ <-- [SP + 16]
+// | |
+// | Saved x1, as __hwasan_check_* |
+// | clobbers it. |
+// +---------------------------------+
+// | Saved x0, likewise above. |
+// +---------------------------------+ <-- [x30 / SP]
+
+// This function takes two arguments:
+// * x0: The address of read/write instruction that caused HWASan check fail.
+// * x1: The tag size.
+
+.section .text
+.file "hwasan_tag_mismatch_aarch64.S"
+.global __hwasan_tag_mismatch
+.type __hwasan_tag_mismatch, %function
+__hwasan_tag_mismatch:
+ CFI_STARTPROC
+
+ // Set the CFA to be the return address for caller of __hwasan_check_*. Note
+ // that we do not emit CFI predicates to describe the contents of this stack
+ // frame, as this proxy entry point should never be debugged. The contents
+ // are static and are handled by the unwinder after calling
+ // __hwasan_tag_mismatch. The frame pointer is already correctly setup
+ // by __hwasan_check_*.
+ add x29, sp, #232
+ CFI_DEF_CFA(w29, 24)
+ CFI_OFFSET(w30, -16)
+ CFI_OFFSET(w29, -24)
+
+ // Save the rest of the registers into the preallocated space left by
+ // __hwasan_check.
+ str x28, [sp, #224]
+ stp x26, x27, [sp, #208]
+ stp x24, x25, [sp, #192]
+ stp x22, x23, [sp, #176]
+ stp x20, x21, [sp, #160]
+ stp x18, x19, [sp, #144]
+ stp x16, x17, [sp, #128]
+ stp x14, x15, [sp, #112]
+ stp x12, x13, [sp, #96]
+ stp x10, x11, [sp, #80]
+ stp x8, x9, [sp, #64]
+ stp x6, x7, [sp, #48]
+ stp x4, x5, [sp, #32]
+ stp x2, x3, [sp, #16]
+
+ // Pass the address of the frame to __hwasan_tag_mismatch_stub, so that it can
+ // extract the saved registers from this frame without having to worry about
+ // finding this frame.
+ mov x2, sp
+
+ bl __hwasan_tag_mismatch_stub
+ CFI_ENDPROC
+
+.Lfunc_end0:
+ .size __hwasan_tag_mismatch, .Lfunc_end0-__hwasan_tag_mismatch
+
+#endif // defined(__aarch64__)
+
+// We do not need executable stack.
+NO_EXEC_STACK_DIRECTIVE
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread.cpp (revision 351984)
@@ -0,0 +1,127 @@
+
+#include "hwasan.h"
+#include "hwasan_mapping.h"
+#include "hwasan_thread.h"
+#include "hwasan_poisoning.h"
+#include "hwasan_interface_internal.h"
+
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+
+namespace __hwasan {
+
+static u32 RandomSeed() {
+ u32 seed;
+ do {
+ if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&seed), sizeof(seed),
+ /*blocking=*/false))) {
+ seed = static_cast<u32>(
+ (NanoTime() >> 12) ^
+ (reinterpret_cast<uptr>(__builtin_frame_address(0)) >> 4));
+ }
+ } while (!seed);
+ return seed;
+}
+
+void Thread::InitRandomState() {
+ random_state_ = flags()->random_tags ? RandomSeed() : unique_id_;
+
+ // Push a random number of zeros onto the ring buffer so that the first stack
+ // tag base will be random.
+ for (tag_t i = 0, e = GenerateRandomTag(); i != e; ++i)
+ stack_allocations_->push(0);
+}
+
+void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) {
+ static u64 unique_id;
+ unique_id_ = unique_id++;
+ if (auto sz = flags()->heap_history_size)
+ heap_allocations_ = HeapAllocationsRingBuffer::New(sz);
+
+ HwasanTSDThreadInit(); // Only needed with interceptors.
+ uptr *ThreadLong = GetCurrentThreadLongPtr();
+ // The following implicitly sets (this) as the current thread.
+ stack_allocations_ = new (ThreadLong)
+ StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size);
+ // Check that it worked.
+ CHECK_EQ(GetCurrentThread(), this);
+
+ // ScopedTaggingDisable needs GetCurrentThread to be set up.
+ ScopedTaggingDisabler disabler;
+
+ uptr tls_size;
+ uptr stack_size;
+ GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_,
+ &tls_size);
+ stack_top_ = stack_bottom_ + stack_size;
+ tls_end_ = tls_begin_ + tls_size;
+
+ if (stack_bottom_) {
+ int local;
+ CHECK(AddrIsInStack((uptr)&local));
+ CHECK(MemIsApp(stack_bottom_));
+ CHECK(MemIsApp(stack_top_ - 1));
+ }
+
+ if (flags()->verbose_threads) {
+ if (IsMainThread()) {
+ Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n",
+ sizeof(Thread), heap_allocations_->SizeInBytes(),
+ stack_allocations_->size() * sizeof(uptr));
+ }
+ Print("Creating : ");
+ }
+}
+
+void Thread::ClearShadowForThreadStackAndTLS() {
+ if (stack_top_ != stack_bottom_)
+ TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0);
+ if (tls_begin_ != tls_end_)
+ TagMemory(tls_begin_, tls_end_ - tls_begin_, 0);
+}
+
+void Thread::Destroy() {
+ if (flags()->verbose_threads)
+ Print("Destroying: ");
+ AllocatorSwallowThreadLocalCache(allocator_cache());
+ ClearShadowForThreadStackAndTLS();
+ if (heap_allocations_)
+ heap_allocations_->Delete();
+ DTLS_Destroy();
+}
+
+void Thread::Print(const char *Prefix) {
+ Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix,
+ unique_id_, this, stack_bottom(), stack_top(),
+ stack_top() - stack_bottom(),
+ tls_begin(), tls_end());
+}
+
+static u32 xorshift(u32 state) {
+ state ^= state << 13;
+ state ^= state >> 17;
+ state ^= state << 5;
+ return state;
+}
+
+// Generate a (pseudo-)random non-zero tag.
+tag_t Thread::GenerateRandomTag() {
+ if (tagging_disabled_) return 0;
+ tag_t tag;
+ do {
+ if (flags()->random_tags) {
+ if (!random_buffer_)
+ random_buffer_ = random_state_ = xorshift(random_state_);
+ CHECK(random_buffer_);
+ tag = random_buffer_ & 0xFF;
+ random_buffer_ >>= 8;
+ } else {
+ tag = random_state_ = (random_state_ + 1) & 0xFF;
+ }
+ } while (!tag);
+ return tag;
+}
+
+} // namespace __hwasan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread.h (revision 351984)
@@ -0,0 +1,112 @@
+//===-- hwasan_thread.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef HWASAN_THREAD_H
+#define HWASAN_THREAD_H
+
+#include "hwasan_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_ring_buffer.h"
+
+namespace __hwasan {
+
+typedef __sanitizer::CompactRingBuffer<uptr> StackAllocationsRingBuffer;
+
+class Thread {
+ public:
+ void Init(uptr stack_buffer_start, uptr stack_buffer_size); // Must be called from the thread itself.
+ void InitRandomState();
+ void Destroy();
+
+ uptr stack_top() { return stack_top_; }
+ uptr stack_bottom() { return stack_bottom_; }
+ uptr stack_size() { return stack_top() - stack_bottom(); }
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
+ bool IsMainThread() { return unique_id_ == 0; }
+
+ bool AddrIsInStack(uptr addr) {
+ return addr >= stack_bottom_ && addr < stack_top_;
+ }
+
+ bool InSignalHandler() { return in_signal_handler_; }
+ void EnterSignalHandler() { in_signal_handler_++; }
+ void LeaveSignalHandler() { in_signal_handler_--; }
+
+ bool InSymbolizer() { return in_symbolizer_; }
+ void EnterSymbolizer() { in_symbolizer_++; }
+ void LeaveSymbolizer() { in_symbolizer_--; }
+
+ AllocatorCache *allocator_cache() { return &allocator_cache_; }
+ HeapAllocationsRingBuffer *heap_allocations() { return heap_allocations_; }
+ StackAllocationsRingBuffer *stack_allocations() { return stack_allocations_; }
+
+ tag_t GenerateRandomTag();
+
+ void DisableTagging() { tagging_disabled_++; }
+ void EnableTagging() { tagging_disabled_--; }
+ bool TaggingIsDisabled() const { return tagging_disabled_; }
+
+ u64 unique_id() const { return unique_id_; }
+ void Announce() {
+ if (announced_) return;
+ announced_ = true;
+ Print("Thread: ");
+ }
+
+ uptr &vfork_spill() { return vfork_spill_; }
+
+ private:
+ // NOTE: There is no Thread constructor. It is allocated
+ // via mmap() and *must* be valid in zero-initialized state.
+ void ClearShadowForThreadStackAndTLS();
+ void Print(const char *prefix);
+ uptr vfork_spill_;
+ uptr stack_top_;
+ uptr stack_bottom_;
+ uptr tls_begin_;
+ uptr tls_end_;
+
+ unsigned in_signal_handler_;
+ unsigned in_symbolizer_;
+
+ u32 random_state_;
+ u32 random_buffer_;
+
+ AllocatorCache allocator_cache_;
+ HeapAllocationsRingBuffer *heap_allocations_;
+ StackAllocationsRingBuffer *stack_allocations_;
+
+ static void InsertIntoThreadList(Thread *t);
+ static void RemoveFromThreadList(Thread *t);
+ Thread *next_; // All live threads form a linked list.
+
+ u64 unique_id_; // counting from zero.
+
+ u32 tagging_disabled_; // if non-zero, malloc uses zero tag in this thread.
+
+ bool announced_;
+
+ friend struct ThreadListHead;
+};
+
+Thread *GetCurrentThread();
+uptr *GetCurrentThreadLongPtr();
+
+struct ScopedTaggingDisabler {
+ ScopedTaggingDisabler() { GetCurrentThread()->DisableTagging(); }
+ ~ScopedTaggingDisabler() { GetCurrentThread()->EnableTagging(); }
+};
+
+} // namespace __hwasan
+
+#endif // HWASAN_THREAD_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread_list.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread_list.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread_list.cpp (revision 351984)
@@ -0,0 +1,15 @@
+#include "hwasan_thread_list.h"
+
+namespace __hwasan {
+static ALIGNED(16) char thread_list_placeholder[sizeof(HwasanThreadList)];
+static HwasanThreadList *hwasan_thread_list;
+
+HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; }
+
+void InitThreadList(uptr storage, uptr size) {
+ CHECK(hwasan_thread_list == nullptr);
+ hwasan_thread_list =
+ new (thread_list_placeholder) HwasanThreadList(storage, size);
+}
+
+} // namespace
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread_list.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread_list.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_thread_list.h (revision 351984)
@@ -0,0 +1,215 @@
+//===-- hwasan_thread_list.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+// HwasanThreadList is a registry for live threads, as well as an allocator for
+// HwasanThread objects and their stack history ring buffers. There are
+// constraints on memory layout of the shadow region and CompactRingBuffer that
+// are part of the ABI contract between compiler-rt and llvm.
+//
+// * Start of the shadow memory region is aligned to 2**kShadowBaseAlignment.
+// * All stack ring buffers are located within (2**kShadowBaseAlignment)
+// sized region below and adjacent to the shadow region.
+// * Each ring buffer has a size of (2**N)*4096 where N is in [0, 8), and is
+// aligned to twice its size. The value of N can be different for each buffer.
+//
+// These constrains guarantee that, given an address A of any element of the
+// ring buffer,
+// A_next = (A + sizeof(uptr)) & ~((1 << (N + 13)) - 1)
+// is the address of the next element of that ring buffer (with wrap-around).
+// And, with K = kShadowBaseAlignment,
+// S = (A | ((1 << K) - 1)) + 1
+// (align up to kShadowBaseAlignment) is the start of the shadow region.
+//
+// These calculations are used in compiler instrumentation to update the ring
+// buffer and obtain the base address of shadow using only two inputs: address
+// of the current element of the ring buffer, and N (i.e. size of the ring
+// buffer). Since the value of N is very limited, we pack both inputs into a
+// single thread-local word as
+// (1 << (N + 56)) | A
+// See the implementation of class CompactRingBuffer, which is what is stored in
+// said thread-local word.
+//
+// Note the unusual way of aligning up the address of the shadow:
+// (A | ((1 << K) - 1)) + 1
+// It is only correct if A is not already equal to the shadow base address, but
+// it saves 2 instructions on AArch64.
+
+#include "hwasan.h"
+#include "hwasan_allocator.h"
+#include "hwasan_flags.h"
+#include "hwasan_thread.h"
+
+#include "sanitizer_common/sanitizer_placement_new.h"
+
+namespace __hwasan {
+
+static uptr RingBufferSize() {
+ uptr desired_bytes = flags()->stack_history_size * sizeof(uptr);
+ // FIXME: increase the limit to 8 once this bug is fixed:
+ // https://bugs.llvm.org/show_bug.cgi?id=39030
+ for (int shift = 1; shift < 7; ++shift) {
+ uptr size = 4096 * (1ULL << shift);
+ if (size >= desired_bytes)
+ return size;
+ }
+ Printf("stack history size too large: %d\n", flags()->stack_history_size);
+ CHECK(0);
+ return 0;
+}
+
+struct ThreadListHead {
+ Thread *list_;
+
+ ThreadListHead() : list_(nullptr) {}
+
+ void Push(Thread *t) {
+ t->next_ = list_;
+ list_ = t;
+ }
+
+ Thread *Pop() {
+ Thread *t = list_;
+ if (t)
+ list_ = t->next_;
+ return t;
+ }
+
+ void Remove(Thread *t) {
+ Thread **cur = &list_;
+ while (*cur != t) cur = &(*cur)->next_;
+ CHECK(*cur && "thread not found");
+ *cur = (*cur)->next_;
+ }
+
+ template <class CB>
+ void ForEach(CB cb) {
+ Thread *t = list_;
+ while (t) {
+ cb(t);
+ t = t->next_;
+ }
+ }
+};
+
+struct ThreadStats {
+ uptr n_live_threads;
+ uptr total_stack_size;
+};
+
+class HwasanThreadList {
+ public:
+ HwasanThreadList(uptr storage, uptr size)
+ : free_space_(storage), free_space_end_(storage + size) {
+ // [storage, storage + size) is used as a vector of
+ // thread_alloc_size_-sized, ring_buffer_size_*2-aligned elements.
+ // Each element contains
+ // * a ring buffer at offset 0,
+ // * a Thread object at offset ring_buffer_size_.
+ ring_buffer_size_ = RingBufferSize();
+ thread_alloc_size_ =
+ RoundUpTo(ring_buffer_size_ + sizeof(Thread), ring_buffer_size_ * 2);
+ }
+
+ Thread *CreateCurrentThread() {
+ Thread *t;
+ {
+ SpinMutexLock l(&list_mutex_);
+ t = free_list_.Pop();
+ if (t) {
+ uptr start = (uptr)t - ring_buffer_size_;
+ internal_memset((void *)start, 0, ring_buffer_size_ + sizeof(Thread));
+ } else {
+ t = AllocThread();
+ }
+ live_list_.Push(t);
+ }
+ t->Init((uptr)t - ring_buffer_size_, ring_buffer_size_);
+ AddThreadStats(t);
+ return t;
+ }
+
+ void DontNeedThread(Thread *t) {
+ uptr start = (uptr)t - ring_buffer_size_;
+ ReleaseMemoryPagesToOS(start, start + thread_alloc_size_);
+ }
+
+ void ReleaseThread(Thread *t) {
+ RemoveThreadStats(t);
+ t->Destroy();
+ SpinMutexLock l(&list_mutex_);
+ live_list_.Remove(t);
+ free_list_.Push(t);
+ DontNeedThread(t);
+ }
+
+ Thread *GetThreadByBufferAddress(uptr p) {
+ return (Thread *)(RoundDownTo(p, ring_buffer_size_ * 2) +
+ ring_buffer_size_);
+ }
+
+ uptr MemoryUsedPerThread() {
+ uptr res = sizeof(Thread) + ring_buffer_size_;
+ if (auto sz = flags()->heap_history_size)
+ res += HeapAllocationsRingBuffer::SizeInBytes(sz);
+ return res;
+ }
+
+ template <class CB>
+ void VisitAllLiveThreads(CB cb) {
+ SpinMutexLock l(&list_mutex_);
+ live_list_.ForEach(cb);
+ }
+
+ void AddThreadStats(Thread *t) {
+ SpinMutexLock l(&stats_mutex_);
+ stats_.n_live_threads++;
+ stats_.total_stack_size += t->stack_size();
+ }
+
+ void RemoveThreadStats(Thread *t) {
+ SpinMutexLock l(&stats_mutex_);
+ stats_.n_live_threads--;
+ stats_.total_stack_size -= t->stack_size();
+ }
+
+ ThreadStats GetThreadStats() {
+ SpinMutexLock l(&stats_mutex_);
+ return stats_;
+ }
+
+ private:
+ Thread *AllocThread() {
+ uptr align = ring_buffer_size_ * 2;
+ CHECK(IsAligned(free_space_, align));
+ Thread *t = (Thread *)(free_space_ + ring_buffer_size_);
+ free_space_ += thread_alloc_size_;
+ CHECK(free_space_ <= free_space_end_ && "out of thread memory");
+ return t;
+ }
+
+ uptr free_space_;
+ uptr free_space_end_;
+ uptr ring_buffer_size_;
+ uptr thread_alloc_size_;
+
+ ThreadListHead free_list_;
+ ThreadListHead live_list_;
+ SpinMutex list_mutex_;
+
+ ThreadStats stats_;
+ SpinMutex stats_mutex_;
+};
+
+void InitThreadList(uptr storage, uptr size);
+HwasanThreadList &hwasanThreadList();
+
+} // namespace
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_blacklist.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_blacklist.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_blacklist.txt (revision 351984)
@@ -0,0 +1,7 @@
+# Blacklist for HWAddressSanitizer. Turns off instrumentation of particular
+# functions or sources. Use with care. You may set location of blacklist
+# at compile-time using -fsanitize-blacklist=<path> flag.
+
+# Example usage:
+# fun:*bad_function_name*
+# src:file_with_tricky_code.cc
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan_blacklist.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.syms.extra
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.syms.extra (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/hwasan/hwasan.syms.extra (revision 351984)
@@ -0,0 +1,2 @@
+__hwasan_*
+__ubsan_*
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan.cc (revision 351984)
@@ -0,0 +1,135 @@
+//=-- lsan.cc -------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Standalone LSan RTL.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "lsan_thread.h"
+
+bool lsan_inited;
+bool lsan_init_is_running;
+
+namespace __lsan {
+
+///// Interface to the common LSan module. /////
+bool WordIsPoisoned(uptr addr) {
+ return false;
+}
+
+} // namespace __lsan
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ using namespace __lsan;
+ uptr stack_top = 0, stack_bottom = 0;
+ ThreadContext *t;
+ if (StackTrace::WillUseFastUnwind(request_fast) &&
+ (t = CurrentThreadContext())) {
+ stack_top = t->stack_end();
+ stack_bottom = t->stack_begin();
+ }
+ if (!SANITIZER_MIPS || IsValidFrame(bp, stack_top, stack_bottom)) {
+ if (StackTrace::WillUseFastUnwind(request_fast))
+ Unwind(max_depth, pc, bp, nullptr, stack_top, stack_bottom, true);
+ else
+ Unwind(max_depth, pc, 0, context, 0, 0, false);
+ }
+}
+
+using namespace __lsan; // NOLINT
+
+static void InitializeFlags() {
+ // Set all the default values.
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.external_symbolizer_path = GetEnv("LSAN_SYMBOLIZER_PATH");
+ cf.malloc_context_size = 30;
+ cf.intercept_tls_get_addr = true;
+ cf.detect_leaks = true;
+ cf.exitcode = 23;
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterLsanFlags(&parser, f);
+ RegisterCommonFlags(&parser);
+
+ // Override from user-specified string.
+ const char *lsan_default_options = MaybeCallLsanDefaultOptions();
+ parser.ParseString(lsan_default_options);
+ parser.ParseStringFromEnv("LSAN_OPTIONS");
+
+ SetVerbosity(common_flags()->verbosity);
+
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+
+ __sanitizer_set_report_path(common_flags()->log_path);
+}
+
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ stack->Unwind(sig.pc, sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+static void LsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetCurrentThread(), &OnStackUnwind,
+ nullptr);
+}
+
+extern "C" void __lsan_init() {
+ CHECK(!lsan_init_is_running);
+ if (lsan_inited)
+ return;
+ lsan_init_is_running = true;
+ SanitizerToolName = "LeakSanitizer";
+ CacheBinaryName();
+ AvoidCVE_2016_2143();
+ InitializeFlags();
+ InitCommonLsan();
+ InitializeAllocator();
+ ReplaceSystemMalloc();
+ InitTlsSize();
+ InitializeInterceptors();
+ InitializeThreadRegistry();
+ InstallDeadlySignalHandlers(LsanOnDeadlySignal);
+ u32 tid = ThreadCreate(0, 0, true);
+ CHECK_EQ(tid, 0);
+ ThreadStart(tid, GetTid());
+ SetCurrentThread(tid);
+
+ if (common_flags()->detect_leaks && common_flags()->leak_check_at_exit)
+ Atexit(DoLeakCheck);
+
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+
+ lsan_inited = true;
+ lsan_init_is_running = false;
+}
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_STACK_TRACE_FATAL;
+ stack.Print();
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan.h (revision 351984)
@@ -0,0 +1,48 @@
+//=-- lsan.h --------------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Private header for standalone LSan RTL.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+
+#define GET_STACK_TRACE(max_size, fast) \
+ __sanitizer::BufferedStackTrace stack; \
+ stack.Unwind(StackTrace::GetCurrentPc(), \
+ GET_CURRENT_FRAME(), nullptr, fast, max_size);
+
+#define GET_STACK_TRACE_FATAL \
+ GET_STACK_TRACE(kStackTraceMax, common_flags()->fast_unwind_on_fatal)
+
+#define GET_STACK_TRACE_MALLOC \
+ GET_STACK_TRACE(__sanitizer::common_flags()->malloc_context_size, \
+ common_flags()->fast_unwind_on_malloc)
+
+#define GET_STACK_TRACE_THREAD GET_STACK_TRACE(kStackTraceMax, true)
+
+namespace __lsan {
+
+void InitializeInterceptors();
+void ReplaceSystemMalloc();
+
+#define ENSURE_LSAN_INITED do { \
+ CHECK(!lsan_init_is_running); \
+ if (!lsan_inited) \
+ __lsan_init(); \
+} while (0)
+
+} // namespace __lsan
+
+extern bool lsan_inited;
+extern bool lsan_init_is_running;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __lsan_init();
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_allocator.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_allocator.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_allocator.cc (revision 351984)
@@ -0,0 +1,353 @@
+//=-- lsan_allocator.cc ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// See lsan_allocator.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_allocator.h"
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_errno.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "lsan_common.h"
+
+extern "C" void *memset(void *ptr, int value, uptr num);
+
+namespace __lsan {
+#if defined(__i386__) || defined(__arm__)
+static const uptr kMaxAllowedMallocSize = 1UL << 30;
+#elif defined(__mips64) || defined(__aarch64__)
+static const uptr kMaxAllowedMallocSize = 4UL << 30;
+#else
+static const uptr kMaxAllowedMallocSize = 8UL << 30;
+#endif
+
+static Allocator allocator;
+
+void InitializeAllocator() {
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ allocator.InitLinkerInitialized(
+ common_flags()->allocator_release_to_os_interval_ms);
+}
+
+void AllocatorThreadFinish() {
+ allocator.SwallowCache(GetAllocatorCache());
+}
+
+static ChunkMetadata *Metadata(const void *p) {
+ return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
+}
+
+static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
+ if (!p) return;
+ ChunkMetadata *m = Metadata(p);
+ CHECK(m);
+ m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
+ m->stack_trace_id = StackDepotPut(stack);
+ m->requested_size = size;
+ atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
+}
+
+static void RegisterDeallocation(void *p) {
+ if (!p) return;
+ ChunkMetadata *m = Metadata(p);
+ CHECK(m);
+ atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
+}
+
+static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) {
+ if (AllocatorMayReturnNull()) {
+ Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size);
+ return nullptr;
+ }
+ ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack);
+}
+
+void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
+ bool cleared) {
+ if (size == 0)
+ size = 1;
+ if (size > kMaxAllowedMallocSize)
+ return ReportAllocationSizeTooBig(size, stack);
+ void *p = allocator.Allocate(GetAllocatorCache(), size, alignment);
+ if (UNLIKELY(!p)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportOutOfMemory(size, &stack);
+ }
+ // Do not rely on the allocator to clear the memory (it's slow).
+ if (cleared && allocator.FromPrimary(p))
+ memset(p, 0, size);
+ RegisterAllocation(stack, p, size);
+ if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size);
+ RunMallocHooks(p, size);
+ return p;
+}
+
+static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportCallocOverflow(nmemb, size, &stack);
+ }
+ size *= nmemb;
+ return Allocate(stack, size, 1, true);
+}
+
+void Deallocate(void *p) {
+ if (&__sanitizer_free_hook) __sanitizer_free_hook(p);
+ RunFreeHooks(p);
+ RegisterDeallocation(p);
+ allocator.Deallocate(GetAllocatorCache(), p);
+}
+
+void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
+ uptr alignment) {
+ RegisterDeallocation(p);
+ if (new_size > kMaxAllowedMallocSize) {
+ allocator.Deallocate(GetAllocatorCache(), p);
+ return ReportAllocationSizeTooBig(new_size, stack);
+ }
+ p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment);
+ RegisterAllocation(stack, p, new_size);
+ return p;
+}
+
+void GetAllocatorCacheRange(uptr *begin, uptr *end) {
+ *begin = (uptr)GetAllocatorCache();
+ *end = *begin + sizeof(AllocatorCache);
+}
+
+uptr GetMallocUsableSize(const void *p) {
+ ChunkMetadata *m = Metadata(p);
+ if (!m) return 0;
+ return m->requested_size;
+}
+
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ const StackTrace &stack) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) {
+ if (AllocatorMayReturnNull())
+ return errno_EINVAL;
+ ReportInvalidPosixMemalignAlignment(alignment, &stack);
+ }
+ void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory);
+ if (UNLIKELY(!ptr))
+ // OOM error is already taken care of by Allocate.
+ return errno_ENOMEM;
+ CHECK(IsAligned((uptr)ptr, alignment));
+ *memptr = ptr;
+ return 0;
+}
+
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAlignedAllocAlignment(size, alignment, &stack);
+ }
+ return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) {
+ if (UNLIKELY(!IsPowerOfTwo(alignment))) {
+ errno = errno_EINVAL;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportInvalidAllocationAlignment(alignment, &stack);
+ }
+ return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory));
+}
+
+void *lsan_malloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory));
+}
+
+void lsan_free(void *p) {
+ Deallocate(p);
+}
+
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Reallocate(stack, p, size, 1));
+}
+
+void *lsan_reallocarray(void *ptr, uptr nmemb, uptr size,
+ const StackTrace &stack) {
+ if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportReallocArrayOverflow(nmemb, size, &stack);
+ }
+ return lsan_realloc(ptr, nmemb * size, stack);
+}
+
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(Calloc(nmemb, size, stack));
+}
+
+void *lsan_valloc(uptr size, const StackTrace &stack) {
+ return SetErrnoOnNull(
+ Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory));
+}
+
+void *lsan_pvalloc(uptr size, const StackTrace &stack) {
+ uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) {
+ errno = errno_ENOMEM;
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ ReportPvallocOverflow(size, &stack);
+ }
+ // pvalloc(0) should allocate one page.
+ size = size ? RoundUpTo(size, PageSize) : PageSize;
+ return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory));
+}
+
+uptr lsan_mz_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
+///// Interface to the common LSan module. /////
+
+void LockAllocator() {
+ allocator.ForceLock();
+}
+
+void UnlockAllocator() {
+ allocator.ForceUnlock();
+}
+
+void GetAllocatorGlobalRange(uptr *begin, uptr *end) {
+ *begin = (uptr)&allocator;
+ *end = *begin + sizeof(allocator);
+}
+
+uptr PointsIntoChunk(void* p) {
+ uptr addr = reinterpret_cast<uptr>(p);
+ uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
+ if (!chunk) return 0;
+ // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
+ // valid, but we don't want that.
+ if (addr < chunk) return 0;
+ ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
+ CHECK(m);
+ if (!m->allocated)
+ return 0;
+ if (addr < chunk + m->requested_size)
+ return chunk;
+ if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr))
+ return chunk;
+ return 0;
+}
+
+uptr GetUserBegin(uptr chunk) {
+ return chunk;
+}
+
+LsanMetadata::LsanMetadata(uptr chunk) {
+ metadata_ = Metadata(reinterpret_cast<void *>(chunk));
+ CHECK(metadata_);
+}
+
+bool LsanMetadata::allocated() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated;
+}
+
+ChunkTag LsanMetadata::tag() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->tag;
+}
+
+void LsanMetadata::set_tag(ChunkTag value) {
+ reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value;
+}
+
+uptr LsanMetadata::requested_size() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size;
+}
+
+u32 LsanMetadata::stack_trace_id() const {
+ return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
+}
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+ allocator.ForEachChunk(callback, arg);
+}
+
+IgnoreObjectResult IgnoreObjectLocked(const void *p) {
+ void *chunk = allocator.GetBlockBegin(p);
+ if (!chunk || p < chunk) return kIgnoreObjectInvalid;
+ ChunkMetadata *m = Metadata(chunk);
+ CHECK(m);
+ if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) {
+ if (m->tag == kIgnored)
+ return kIgnoreObjectAlreadyIgnored;
+ m->tag = kIgnored;
+ return kIgnoreObjectSuccess;
+ } else {
+ return kIgnoreObjectInvalid;
+ }
+}
+} // namespace __lsan
+
+using namespace __lsan;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_current_allocated_bytes() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatAllocated];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_heap_size() {
+ uptr stats[AllocatorStatCount];
+ allocator.GetStats(stats);
+ return stats[AllocatorStatMapped];
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_free_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_unmapped_bytes() { return 0; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; }
+
+SANITIZER_INTERFACE_ATTRIBUTE
+uptr __sanitizer_get_allocated_size(const void *p) {
+ return GetMallocUsableSize(p);
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+// Provide default (no-op) implementation of malloc hooks.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_malloc_hook(void *ptr, uptr size) {
+ (void)ptr;
+ (void)size;
+}
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+void __sanitizer_free_hook(void *ptr) {
+ (void)ptr;
+}
+#endif
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_allocator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_allocator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_allocator.h (revision 351984)
@@ -0,0 +1,115 @@
+//=-- lsan_allocator.h ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Allocator for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_ALLOCATOR_H
+#define LSAN_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "lsan_common.h"
+
+namespace __lsan {
+
+void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
+ bool cleared);
+void Deallocate(void *p);
+void *Reallocate(const StackTrace &stack, void *p, uptr new_size,
+ uptr alignment);
+uptr GetMallocUsableSize(const void *p);
+
+template<typename Callable>
+void ForEachChunk(const Callable &callback);
+
+void GetAllocatorCacheRange(uptr *begin, uptr *end);
+void AllocatorThreadFinish();
+void InitializeAllocator();
+
+const bool kAlwaysClearMemory = true;
+
+struct ChunkMetadata {
+ u8 allocated : 8; // Must be first.
+ ChunkTag tag : 2;
+#if SANITIZER_WORDSIZE == 64
+ uptr requested_size : 54;
+#else
+ uptr requested_size : 32;
+ uptr padding : 22;
+#endif
+ u32 stack_trace_id;
+};
+
+#if defined(__mips64) || defined(__aarch64__) || defined(__i386__) || \
+ defined(__arm__)
+template <typename AddressSpaceViewTy>
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef __sanitizer::CompactSizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = 20;
+ using AddressSpaceView = AddressSpaceViewTy;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+};
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator32<AP32<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#elif defined(__x86_64__) || defined(__powerpc64__)
+# if defined(__powerpc64__)
+const uptr kAllocatorSpace = 0xa0000000000ULL;
+const uptr kAllocatorSize = 0x20000000000ULL; // 2T.
+# else
+const uptr kAllocatorSpace = 0x600000000000ULL;
+const uptr kAllocatorSize = 0x40000000000ULL; // 4T.
+# endif
+template <typename AddressSpaceViewTy>
+struct AP64 { // Allocator64 parameters. Deliberately using a short name.
+ static const uptr kSpaceBeg = kAllocatorSpace;
+ static const uptr kSpaceSize = kAllocatorSize;
+ static const uptr kMetadataSize = sizeof(ChunkMetadata);
+ typedef DefaultSizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags = 0;
+ using AddressSpaceView = AddressSpaceViewTy;
+};
+
+template <typename AddressSpaceView>
+using PrimaryAllocatorASVT = SizeClassAllocator64<AP64<AddressSpaceView>>;
+using PrimaryAllocator = PrimaryAllocatorASVT<LocalAddressSpaceView>;
+#endif
+
+template <typename AddressSpaceView>
+using AllocatorASVT = CombinedAllocator<PrimaryAllocatorASVT<AddressSpaceView>>;
+using Allocator = AllocatorASVT<LocalAddressSpaceView>;
+using AllocatorCache = Allocator::AllocatorCache;
+
+Allocator::AllocatorCache *GetAllocatorCache();
+
+int lsan_posix_memalign(void **memptr, uptr alignment, uptr size,
+ const StackTrace &stack);
+void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack);
+void *lsan_malloc(uptr size, const StackTrace &stack);
+void lsan_free(void *p);
+void *lsan_realloc(void *p, uptr size, const StackTrace &stack);
+void *lsan_reallocarray(void *p, uptr nmemb, uptr size,
+ const StackTrace &stack);
+void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack);
+void *lsan_valloc(uptr size, const StackTrace &stack);
+void *lsan_pvalloc(uptr size, const StackTrace &stack);
+uptr lsan_mz_size(const void *p);
+
+} // namespace __lsan
+
+#endif // LSAN_ALLOCATOR_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common.cc (revision 351984)
@@ -0,0 +1,904 @@
+//=-- lsan_common.cc ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_common.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+
+#if CAN_SANITIZE_LEAKS
+namespace __lsan {
+
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
+// also to protect the global list of root regions.
+BlockingMutex global_mutex(LINKER_INITIALIZED);
+
+Flags lsan_flags;
+
+void DisableCounterUnderflow() {
+ if (common_flags()->detect_leaks) {
+ Report("Unmatched call to __lsan_enable().\n");
+ Die();
+ }
+}
+
+void Flags::SetDefaults() {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+}
+
+void RegisterLsanFlags(FlagParser *parser, Flags *f) {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+}
+
+#define LOG_POINTERS(...) \
+ do { \
+ if (flags()->log_pointers) Report(__VA_ARGS__); \
+ } while (0)
+
+#define LOG_THREADS(...) \
+ do { \
+ if (flags()->log_threads) Report(__VA_ARGS__); \
+ } while (0)
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kSuppressionLeak[] = "leak";
+static const char *kSuppressionTypes[] = { kSuppressionLeak };
+static const char kStdSuppressions[] =
+#if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+ // definition.
+ "leak:*pthread_exit*\n"
+#endif // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
+#if SANITIZER_MAC
+ // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
+ "leak:*_os_trace*\n"
+#endif
+ // TLS leak in some glibc versions, described in
+ // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
+ "leak:*tls_get_addr*\n";
+
+void InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+ if (&__lsan_default_suppressions)
+ suppression_ctx->Parse(__lsan_default_suppressions());
+ suppression_ctx->Parse(kStdSuppressions);
+}
+
+static SuppressionContext *GetSuppressionContext() {
+ CHECK(suppression_ctx);
+ return suppression_ctx;
+}
+
+static InternalMmapVector<RootRegion> *root_regions;
+
+InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
+
+void InitializeRootRegions() {
+ CHECK(!root_regions);
+ ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
+ root_regions = new (placeholder) InternalMmapVector<RootRegion>(); // NOLINT
+}
+
+const char *MaybeCallLsanDefaultOptions() {
+ return (&__lsan_default_options) ? __lsan_default_options() : "";
+}
+
+void InitCommonLsan() {
+ InitializeRootRegions();
+ if (common_flags()->detect_leaks) {
+ // Initialization which can fail or print warnings should only be done if
+ // LSan is actually enabled.
+ InitializeSuppressions();
+ InitializePlatformSpecificModules();
+ }
+}
+
+class Decorator: public __sanitizer::SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() { }
+ const char *Error() { return Red(); }
+ const char *Leak() { return Blue(); }
+};
+
+static inline bool CanBeAHeapPointer(uptr p) {
+ // Since our heap is located in mmap-ed memory, we can assume a sensible lower
+ // bound on heap addresses.
+ const uptr kMinAddress = 4 * 4096;
+ if (p < kMinAddress) return false;
+#if defined(__x86_64__)
+ // Accept only canonical form user-space addresses.
+ return ((p >> 47) == 0);
+#elif defined(__mips64)
+ return ((p >> 40) == 0);
+#elif defined(__aarch64__)
+ unsigned runtimeVMA =
+ (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
+ return ((p >> runtimeVMA) == 0);
+#else
+ return true;
+#endif
+}
+
+// Scans the memory range, looking for byte patterns that point into allocator
+// chunks. Marks those chunks with |tag| and adds them to |frontier|.
+// There are two usage modes for this function: finding reachable chunks
+// (|tag| = kReachable) and finding indirectly leaked chunks
+// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
+// so |frontier| = 0.
+void ScanRangeForPointers(uptr begin, uptr end,
+ Frontier *frontier,
+ const char *region_type, ChunkTag tag) {
+ CHECK(tag == kReachable || tag == kIndirectlyLeaked);
+ const uptr alignment = flags()->pointer_alignment();
+ LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
+ uptr pp = begin;
+ if (pp % alignment)
+ pp = pp + alignment - pp % alignment;
+ for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
+ void *p = *reinterpret_cast<void **>(pp);
+ if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
+ uptr chunk = PointsIntoChunk(p);
+ if (!chunk) continue;
+ // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
+ if (chunk == begin) continue;
+ LsanMetadata m(chunk);
+ if (m.tag() == kReachable || m.tag() == kIgnored) continue;
+
+ // Do this check relatively late so we can log only the interesting cases.
+ if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
+ LOG_POINTERS(
+ "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
+ "%zu.\n",
+ pp, p, chunk, chunk + m.requested_size(), m.requested_size());
+ continue;
+ }
+
+ m.set_tag(tag);
+ LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
+ chunk, chunk + m.requested_size(), m.requested_size());
+ if (frontier)
+ frontier->push_back(chunk);
+ }
+}
+
+// Scans a global range for pointers
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
+ uptr allocator_begin = 0, allocator_end = 0;
+ GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
+ if (begin <= allocator_begin && allocator_begin < end) {
+ CHECK_LE(allocator_begin, allocator_end);
+ CHECK_LE(allocator_end, end);
+ if (begin < allocator_begin)
+ ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
+ kReachable);
+ if (allocator_end < end)
+ ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
+ } else {
+ ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
+ }
+}
+
+void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
+ Frontier *frontier = reinterpret_cast<Frontier *>(arg);
+ ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
+}
+
+// Scans thread data (stacks and TLS) for heap pointers.
+static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
+ Frontier *frontier) {
+ InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
+ uptr registers_begin = reinterpret_cast<uptr>(registers.data());
+ uptr registers_end =
+ reinterpret_cast<uptr>(registers.data() + registers.size());
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
+ tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
+ LOG_THREADS("Processing thread %d.\n", os_id);
+ uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
+ DTLS *dtls;
+ bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
+ &tls_begin, &tls_end,
+ &cache_begin, &cache_end, &dtls);
+ if (!thread_found) {
+ // If a thread can't be found in the thread registry, it's probably in the
+ // process of destruction. Log this event and move on.
+ LOG_THREADS("Thread %d not found in registry.\n", os_id);
+ continue;
+ }
+ uptr sp;
+ PtraceRegistersStatus have_registers =
+ suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
+ if (have_registers != REGISTERS_AVAILABLE) {
+ Report("Unable to get registers from thread %d.\n", os_id);
+ // If unable to get SP, consider the entire stack to be reachable unless
+ // GetRegistersAndSP failed with ESRCH.
+ if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
+ sp = stack_begin;
+ }
+
+ if (flags()->use_registers && have_registers)
+ ScanRangeForPointers(registers_begin, registers_end, frontier,
+ "REGISTERS", kReachable);
+
+ if (flags()->use_stacks) {
+ LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
+ if (sp < stack_begin || sp >= stack_end) {
+ // SP is outside the recorded stack range (e.g. the thread is running a
+ // signal handler on alternate stack, or swapcontext was used).
+ // Again, consider the entire stack range to be reachable.
+ LOG_THREADS("WARNING: stack pointer not in stack range.\n");
+ uptr page_size = GetPageSizeCached();
+ int skipped = 0;
+ while (stack_begin < stack_end &&
+ !IsAccessibleMemoryRange(stack_begin, 1)) {
+ skipped++;
+ stack_begin += page_size;
+ }
+ LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
+ skipped, stack_begin, stack_end);
+ } else {
+ // Shrink the stack range to ignore out-of-scope values.
+ stack_begin = sp;
+ }
+ ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
+ kReachable);
+ ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
+ }
+
+ if (flags()->use_tls) {
+ if (tls_begin) {
+ LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
+ // If the tls and cache ranges don't overlap, scan full tls range,
+ // otherwise, only scan the non-overlapping portions
+ if (cache_begin == cache_end || tls_end < cache_begin ||
+ tls_begin > cache_end) {
+ ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
+ } else {
+ if (tls_begin < cache_begin)
+ ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
+ kReachable);
+ if (tls_end > cache_end)
+ ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
+ kReachable);
+ }
+ }
+ if (dtls && !DTLSInDestruction(dtls)) {
+ for (uptr j = 0; j < dtls->dtv_size; ++j) {
+ uptr dtls_beg = dtls->dtv[j].beg;
+ uptr dtls_end = dtls_beg + dtls->dtv[j].size;
+ if (dtls_beg < dtls_end) {
+ LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
+ ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
+ kReachable);
+ }
+ }
+ } else {
+ // We are handling a thread with DTLS under destruction. Log about
+ // this and continue.
+ LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
+ }
+ }
+ }
+}
+
+void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
+ uptr region_begin, uptr region_end, bool is_readable) {
+ uptr intersection_begin = Max(root_region.begin, region_begin);
+ uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
+ if (intersection_begin >= intersection_end) return;
+ LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
+ root_region.begin, root_region.begin + root_region.size,
+ region_begin, region_end,
+ is_readable ? "readable" : "unreadable");
+ if (is_readable)
+ ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
+ kReachable);
+}
+
+static void ProcessRootRegion(Frontier *frontier,
+ const RootRegion &root_region) {
+ MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
+ MemoryMappedSegment segment;
+ while (proc_maps.Next(&segment)) {
+ ScanRootRegion(frontier, root_region, segment.start, segment.end,
+ segment.IsReadable());
+ }
+}
+
+// Scans root regions for heap pointers.
+static void ProcessRootRegions(Frontier *frontier) {
+ if (!flags()->use_root_regions) return;
+ CHECK(root_regions);
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ ProcessRootRegion(frontier, (*root_regions)[i]);
+ }
+}
+
+static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
+ while (frontier->size()) {
+ uptr next_chunk = frontier->back();
+ frontier->pop_back();
+ LsanMetadata m(next_chunk);
+ ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
+ "HEAP", tag);
+ }
+}
+
+// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
+// which are reachable from it as indirectly leaked.
+static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kReachable) {
+ ScanRangeForPointers(chunk, chunk + m.requested_size(),
+ /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
+ }
+}
+
+// ForEachChunk callback. If chunk is marked as ignored, adds its address to
+// frontier.
+static void CollectIgnoredCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() == kIgnored) {
+ LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
+ chunk, chunk + m.requested_size(), m.requested_size());
+ reinterpret_cast<Frontier *>(arg)->push_back(chunk);
+ }
+}
+
+static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
+ CHECK(stack_id);
+ StackTrace stack = map->Get(stack_id);
+ // The top frame is our malloc/calloc/etc. The next frame is the caller.
+ if (stack.size >= 2)
+ return stack.trace[1];
+ return 0;
+}
+
+struct InvalidPCParam {
+ Frontier *frontier;
+ StackDepotReverseMap *stack_depot_reverse_map;
+ bool skip_linker_allocations;
+};
+
+// ForEachChunk callback. If the caller pc is invalid or is within the linker,
+// mark as reachable. Called by ProcessPlatformSpecificAllocations.
+static void MarkInvalidPCCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
+ u32 stack_id = m.stack_trace_id();
+ uptr caller_pc = 0;
+ if (stack_id > 0)
+ caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
+ // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
+ // it as reachable, as we can't properly report its allocation stack anyway.
+ if (caller_pc == 0 || (param->skip_linker_allocations &&
+ GetLinker()->containsAddress(caller_pc))) {
+ m.set_tag(kReachable);
+ param->frontier->push_back(chunk);
+ }
+ }
+}
+
+// On Linux, treats all chunks allocated from ld-linux.so as reachable, which
+// covers dynamically allocated TLS blocks, internal dynamic loader's loaded
+// modules accounting etc.
+// Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
+// They are allocated with a __libc_memalign() call in allocate_and_init()
+// (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
+// blocks, but we can make sure they come from our own allocator by intercepting
+// __libc_memalign(). On top of that, there is no easy way to reach them. Their
+// addresses are stored in a dynamically allocated array (the DTV) which is
+// referenced from the static TLS. Unfortunately, we can't just rely on the DTV
+// being reachable from the static TLS, and the dynamic TLS being reachable from
+// the DTV. This is because the initial DTV is allocated before our interception
+// mechanism kicks in, and thus we don't recognize it as allocated memory. We
+// can't special-case it either, since we don't know its size.
+// Our solution is to include in the root set all allocations made from
+// ld-linux.so (which is where allocate_and_init() is implemented). This is
+// guaranteed to include all dynamic TLS blocks (and possibly other allocations
+// which we don't care about).
+// On all other platforms, this simply checks to ensure that the caller pc is
+// valid before reporting chunks as leaked.
+void ProcessPC(Frontier *frontier) {
+ StackDepotReverseMap stack_depot_reverse_map;
+ InvalidPCParam arg;
+ arg.frontier = frontier;
+ arg.stack_depot_reverse_map = &stack_depot_reverse_map;
+ arg.skip_linker_allocations =
+ flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
+ ForEachChunk(MarkInvalidPCCb, &arg);
+}
+
+// Sets the appropriate tag on each chunk.
+static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
+ // Holds the flood fill frontier.
+ Frontier frontier;
+
+ ForEachChunk(CollectIgnoredCb, &frontier);
+ ProcessGlobalRegions(&frontier);
+ ProcessThreads(suspended_threads, &frontier);
+ ProcessRootRegions(&frontier);
+ FloodFillTag(&frontier, kReachable);
+
+ CHECK_EQ(0, frontier.size());
+ ProcessPC(&frontier);
+
+ // The check here is relatively expensive, so we do this in a separate flood
+ // fill. That way we can skip the check for chunks that are reachable
+ // otherwise.
+ LOG_POINTERS("Processing platform-specific allocations.\n");
+ ProcessPlatformSpecificAllocations(&frontier);
+ FloodFillTag(&frontier, kReachable);
+
+ // Iterate over leaked chunks and mark those that are reachable from other
+ // leaked chunks.
+ LOG_POINTERS("Scanning leaked chunks.\n");
+ ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
+}
+
+// ForEachChunk callback. Resets the tags to pre-leak-check state.
+static void ResetTagsCb(uptr chunk, void *arg) {
+ (void)arg;
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (m.allocated() && m.tag() != kIgnored)
+ m.set_tag(kDirectlyLeaked);
+}
+
+static void PrintStackTraceById(u32 stack_trace_id) {
+ CHECK(stack_trace_id);
+ StackDepotGet(stack_trace_id).Print();
+}
+
+// ForEachChunk callback. Aggregates information about unreachable chunks into
+// a LeakReport.
+static void CollectLeaksCb(uptr chunk, void *arg) {
+ CHECK(arg);
+ LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
+ chunk = GetUserBegin(chunk);
+ LsanMetadata m(chunk);
+ if (!m.allocated()) return;
+ if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
+ u32 resolution = flags()->resolution;
+ u32 stack_trace_id = 0;
+ if (resolution > 0) {
+ StackTrace stack = StackDepotGet(m.stack_trace_id());
+ stack.size = Min(stack.size, resolution);
+ stack_trace_id = StackDepotPut(stack);
+ } else {
+ stack_trace_id = m.stack_trace_id();
+ }
+ leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
+ m.tag());
+ }
+}
+
+static void PrintMatchedSuppressions() {
+ InternalMmapVector<Suppression *> matched;
+ GetSuppressionContext()->GetMatched(&matched);
+ if (!matched.size())
+ return;
+ const char *line = "-----------------------------------------------------";
+ Printf("%s\n", line);
+ Printf("Suppressions used:\n");
+ Printf(" count bytes template\n");
+ for (uptr i = 0; i < matched.size(); i++)
+ Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
+ &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
+ Printf("%s\n\n", line);
+}
+
+struct CheckForLeaksParam {
+ bool success;
+ LeakReport leak_report;
+};
+
+static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
+ const InternalMmapVector<tid_t> &suspended_threads =
+ *(const InternalMmapVector<tid_t> *)arg;
+ if (tctx->status == ThreadStatusRunning) {
+ uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
+ tctx->os_id, CompareLess<int>());
+ if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
+ Report("Running thread %d was not suspended. False leaks are possible.\n",
+ tctx->os_id);
+ };
+}
+
+static void ReportUnsuspendedThreads(
+ const SuspendedThreadsList &suspended_threads) {
+ InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
+ for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
+ threads[i] = suspended_threads.GetThreadID(i);
+
+ Sort(threads.data(), threads.size());
+
+ GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
+ &ReportIfNotSuspended, &threads);
+}
+
+static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
+ void *arg) {
+ CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
+ CHECK(param);
+ CHECK(!param->success);
+ ReportUnsuspendedThreads(suspended_threads);
+ ClassifyAllChunks(suspended_threads);
+ ForEachChunk(CollectLeaksCb, &param->leak_report);
+ // Clean up for subsequent leak checks. This assumes we did not overwrite any
+ // kIgnored tags.
+ ForEachChunk(ResetTagsCb, nullptr);
+ param->success = true;
+}
+
+static bool CheckForLeaks() {
+ if (&__lsan_is_turned_off && __lsan_is_turned_off())
+ return false;
+ EnsureMainThreadIDIsCorrect();
+ CheckForLeaksParam param;
+ param.success = false;
+ LockThreadRegistry();
+ LockAllocator();
+ DoStopTheWorld(CheckForLeaksCallback, &param);
+ UnlockAllocator();
+ UnlockThreadRegistry();
+
+ if (!param.success) {
+ Report("LeakSanitizer has encountered a fatal error.\n");
+ Report(
+ "HINT: For debugging, try setting environment variable "
+ "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
+ Report(
+ "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
+ Die();
+ }
+ param.leak_report.ApplySuppressions();
+ uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
+ if (unsuppressed_count > 0) {
+ Decorator d;
+ Printf("\n"
+ "================================================================="
+ "\n");
+ Printf("%s", d.Error());
+ Report("ERROR: LeakSanitizer: detected memory leaks\n");
+ Printf("%s", d.Default());
+ param.leak_report.ReportTopLeaks(flags()->max_leaks);
+ }
+ if (common_flags()->print_suppressions)
+ PrintMatchedSuppressions();
+ if (unsuppressed_count > 0) {
+ param.leak_report.PrintSummary();
+ return true;
+ }
+ return false;
+}
+
+static bool has_reported_leaks = false;
+bool HasReportedLeaks() { return has_reported_leaks; }
+
+void DoLeakCheck() {
+ BlockingMutexLock l(&global_mutex);
+ static bool already_done;
+ if (already_done) return;
+ already_done = true;
+ has_reported_leaks = CheckForLeaks();
+ if (has_reported_leaks) HandleLeaks();
+}
+
+static int DoRecoverableLeakCheck() {
+ BlockingMutexLock l(&global_mutex);
+ bool have_leaks = CheckForLeaks();
+ return have_leaks ? 1 : 0;
+}
+
+void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
+
+static Suppression *GetSuppressionForAddr(uptr addr) {
+ Suppression *s = nullptr;
+
+ // Suppress by module name.
+ SuppressionContext *suppressions = GetSuppressionContext();
+ if (const char *module_name =
+ Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
+ if (suppressions->Match(module_name, kSuppressionLeak, &s))
+ return s;
+
+ // Suppress by file or function name.
+ SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
+ for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
+ if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
+ suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
+ break;
+ }
+ }
+ frames->ClearAll();
+ return s;
+}
+
+static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
+ StackTrace stack = StackDepotGet(stack_trace_id);
+ for (uptr i = 0; i < stack.size; i++) {
+ Suppression *s = GetSuppressionForAddr(
+ StackTrace::GetPreviousInstructionPc(stack.trace[i]));
+ if (s) return s;
+ }
+ return nullptr;
+}
+
+///// LeakReport implementation. /////
+
+// A hard limit on the number of distinct leaks, to avoid quadratic complexity
+// in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
+// in real-world applications.
+// FIXME: Get rid of this limit by changing the implementation of LeakReport to
+// use a hash table.
+const uptr kMaxLeaksConsidered = 5000;
+
+void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
+ uptr leaked_size, ChunkTag tag) {
+ CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
+ bool is_directly_leaked = (tag == kDirectlyLeaked);
+ uptr i;
+ for (i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].stack_trace_id == stack_trace_id &&
+ leaks_[i].is_directly_leaked == is_directly_leaked) {
+ leaks_[i].hit_count++;
+ leaks_[i].total_size += leaked_size;
+ break;
+ }
+ }
+ if (i == leaks_.size()) {
+ if (leaks_.size() == kMaxLeaksConsidered) return;
+ Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
+ is_directly_leaked, /* is_suppressed */ false };
+ leaks_.push_back(leak);
+ }
+ if (flags()->report_objects) {
+ LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+ leaked_objects_.push_back(obj);
+ }
+}
+
+static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
+ if (leak1.is_directly_leaked == leak2.is_directly_leaked)
+ return leak1.total_size > leak2.total_size;
+ else
+ return leak1.is_directly_leaked;
+}
+
+void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
+ CHECK(leaks_.size() <= kMaxLeaksConsidered);
+ Printf("\n");
+ if (leaks_.size() == kMaxLeaksConsidered)
+ Printf("Too many leaks! Only the first %zu leaks encountered will be "
+ "reported.\n",
+ kMaxLeaksConsidered);
+
+ uptr unsuppressed_count = UnsuppressedLeakCount();
+ if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
+ Printf("The %zu top leak(s):\n", num_leaks_to_report);
+ Sort(leaks_.data(), leaks_.size(), &LeakComparator);
+ uptr leaks_reported = 0;
+ for (uptr i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].is_suppressed) continue;
+ PrintReportForLeak(i);
+ leaks_reported++;
+ if (leaks_reported == num_leaks_to_report) break;
+ }
+ if (leaks_reported < unsuppressed_count) {
+ uptr remaining = unsuppressed_count - leaks_reported;
+ Printf("Omitting %zu more leak(s).\n", remaining);
+ }
+}
+
+void LeakReport::PrintReportForLeak(uptr index) {
+ Decorator d;
+ Printf("%s", d.Leak());
+ Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
+ leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
+ leaks_[index].total_size, leaks_[index].hit_count);
+ Printf("%s", d.Default());
+
+ PrintStackTraceById(leaks_[index].stack_trace_id);
+
+ if (flags()->report_objects) {
+ Printf("Objects leaked above:\n");
+ PrintLeakedObjectsForLeak(index);
+ Printf("\n");
+ }
+}
+
+void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
+ u32 leak_id = leaks_[index].id;
+ for (uptr j = 0; j < leaked_objects_.size(); j++) {
+ if (leaked_objects_[j].leak_id == leak_id)
+ Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
+ leaked_objects_[j].size);
+ }
+}
+
+void LeakReport::PrintSummary() {
+ CHECK(leaks_.size() <= kMaxLeaksConsidered);
+ uptr bytes = 0, allocations = 0;
+ for (uptr i = 0; i < leaks_.size(); i++) {
+ if (leaks_[i].is_suppressed) continue;
+ bytes += leaks_[i].total_size;
+ allocations += leaks_[i].hit_count;
+ }
+ InternalScopedString summary(kMaxSummaryLength);
+ summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
+ allocations);
+ ReportErrorSummary(summary.data());
+}
+
+void LeakReport::ApplySuppressions() {
+ for (uptr i = 0; i < leaks_.size(); i++) {
+ Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
+ if (s) {
+ s->weight += leaks_[i].total_size;
+ atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
+ leaks_[i].hit_count);
+ leaks_[i].is_suppressed = true;
+ }
+ }
+}
+
+uptr LeakReport::UnsuppressedLeakCount() {
+ uptr result = 0;
+ for (uptr i = 0; i < leaks_.size(); i++)
+ if (!leaks_[i].is_suppressed) result++;
+ return result;
+}
+
+} // namespace __lsan
+#else // CAN_SANITIZE_LEAKS
+namespace __lsan {
+void InitCommonLsan() { }
+void DoLeakCheck() { }
+void DoRecoverableLeakCheckVoid() { }
+void DisableInThisThread() { }
+void EnableInThisThread() { }
+}
+#endif // CAN_SANITIZE_LEAKS
+
+using namespace __lsan; // NOLINT
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_ignore_object(const void *p) {
+#if CAN_SANITIZE_LEAKS
+ if (!common_flags()->detect_leaks)
+ return;
+ // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
+ // locked.
+ BlockingMutexLock l(&global_mutex);
+ IgnoreObjectResult res = IgnoreObjectLocked(p);
+ if (res == kIgnoreObjectInvalid)
+ VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
+ if (res == kIgnoreObjectAlreadyIgnored)
+ VReport(1, "__lsan_ignore_object(): "
+ "heap object at %p is already being ignored\n", p);
+ if (res == kIgnoreObjectSuccess)
+ VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_register_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ RootRegion region = {reinterpret_cast<uptr>(begin), size};
+ root_regions->push_back(region);
+ VReport(1, "Registered root region at %p of size %llu\n", begin, size);
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_unregister_root_region(const void *begin, uptr size) {
+#if CAN_SANITIZE_LEAKS
+ BlockingMutexLock l(&global_mutex);
+ CHECK(root_regions);
+ bool removed = false;
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ RootRegion region = (*root_regions)[i];
+ if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
+ removed = true;
+ uptr last_index = root_regions->size() - 1;
+ (*root_regions)[i] = (*root_regions)[last_index];
+ root_regions->pop_back();
+ VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
+ break;
+ }
+ }
+ if (!removed) {
+ Report(
+ "__lsan_unregister_root_region(): region at %p of size %llu has not "
+ "been registered.\n",
+ begin, size);
+ Die();
+ }
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_disable() {
+#if CAN_SANITIZE_LEAKS
+ __lsan::DisableInThisThread();
+#endif
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_enable() {
+#if CAN_SANITIZE_LEAKS
+ __lsan::EnableInThisThread();
+#endif
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __lsan_do_leak_check() {
+#if CAN_SANITIZE_LEAKS
+ if (common_flags()->detect_leaks)
+ __lsan::DoLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE
+int __lsan_do_recoverable_leak_check() {
+#if CAN_SANITIZE_LEAKS
+ if (common_flags()->detect_leaks)
+ return __lsan::DoRecoverableLeakCheck();
+#endif // CAN_SANITIZE_LEAKS
+ return 0;
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char * __lsan_default_options() {
+ return "";
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+int __lsan_is_turned_off() {
+ return 0;
+}
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_suppressions() {
+ return "";
+}
+#endif
+} // extern "C"
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common.h (revision 351984)
@@ -0,0 +1,266 @@
+//=-- lsan_common.h -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Private LSan header.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_COMMON_H
+#define LSAN_COMMON_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_stoptheworld.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+// LeakSanitizer relies on some Glibc's internals (e.g. TLS machinery) on Linux.
+// Also, LSan doesn't like 32 bit architectures
+// because of "small" (4 bytes) pointer size that leads to high false negative
+// ratio on large leaks. But we still want to have it for some 32 bit arches
+// (e.g. x86), see https://github.com/google/sanitizers/issues/403.
+// To enable LeakSanitizer on a new architecture, one needs to implement the
+// internal_clone function as well as (probably) adjust the TLS machinery for
+// the new architecture inside the sanitizer library.
+#if (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC) && \
+ (SANITIZER_WORDSIZE == 64) && \
+ (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__) || \
+ defined(__powerpc64__))
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__i386__) && \
+ (SANITIZER_LINUX && !SANITIZER_ANDROID || SANITIZER_MAC)
+#define CAN_SANITIZE_LEAKS 1
+#elif defined(__arm__) && \
+ SANITIZER_LINUX && !SANITIZER_ANDROID
+#define CAN_SANITIZE_LEAKS 1
+#elif SANITIZER_NETBSD
+#define CAN_SANITIZE_LEAKS 1
+#else
+#define CAN_SANITIZE_LEAKS 0
+#endif
+
+namespace __sanitizer {
+class FlagParser;
+class ThreadRegistry;
+struct DTLS;
+}
+
+namespace __lsan {
+
+// Chunk tags.
+enum ChunkTag {
+ kDirectlyLeaked = 0, // default
+ kIndirectlyLeaked = 1,
+ kReachable = 2,
+ kIgnored = 3
+};
+
+const u32 kInvalidTid = (u32) -1;
+
+struct Flags {
+#define LSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "lsan_flags.inc"
+#undef LSAN_FLAG
+
+ void SetDefaults();
+ uptr pointer_alignment() const {
+ return use_unaligned ? 1 : sizeof(uptr);
+ }
+};
+
+extern Flags lsan_flags;
+inline Flags *flags() { return &lsan_flags; }
+void RegisterLsanFlags(FlagParser *parser, Flags *f);
+
+struct Leak {
+ u32 id;
+ uptr hit_count;
+ uptr total_size;
+ u32 stack_trace_id;
+ bool is_directly_leaked;
+ bool is_suppressed;
+};
+
+struct LeakedObject {
+ u32 leak_id;
+ uptr addr;
+ uptr size;
+};
+
+// Aggregates leaks by stack trace prefix.
+class LeakReport {
+ public:
+ LeakReport() {}
+ void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
+ ChunkTag tag);
+ void ReportTopLeaks(uptr max_leaks);
+ void PrintSummary();
+ void ApplySuppressions();
+ uptr UnsuppressedLeakCount();
+
+ private:
+ void PrintReportForLeak(uptr index);
+ void PrintLeakedObjectsForLeak(uptr index);
+
+ u32 next_id_ = 0;
+ InternalMmapVector<Leak> leaks_;
+ InternalMmapVector<LeakedObject> leaked_objects_;
+};
+
+typedef InternalMmapVector<uptr> Frontier;
+
+// Platform-specific functions.
+void InitializePlatformSpecificModules();
+void ProcessGlobalRegions(Frontier *frontier);
+void ProcessPlatformSpecificAllocations(Frontier *frontier);
+
+struct RootRegion {
+ uptr begin;
+ uptr size;
+};
+
+InternalMmapVector<RootRegion> const *GetRootRegions();
+void ScanRootRegion(Frontier *frontier, RootRegion const &region,
+ uptr region_begin, uptr region_end, bool is_readable);
+// Run stoptheworld while holding any platform-specific locks.
+void DoStopTheWorld(StopTheWorldCallback callback, void* argument);
+
+void ScanRangeForPointers(uptr begin, uptr end,
+ Frontier *frontier,
+ const char *region_type, ChunkTag tag);
+void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier);
+
+enum IgnoreObjectResult {
+ kIgnoreObjectSuccess,
+ kIgnoreObjectAlreadyIgnored,
+ kIgnoreObjectInvalid
+};
+
+// Functions called from the parent tool.
+const char *MaybeCallLsanDefaultOptions();
+void InitCommonLsan();
+void DoLeakCheck();
+void DoRecoverableLeakCheckVoid();
+void DisableCounterUnderflow();
+bool DisabledInThisThread();
+
+// Used to implement __lsan::ScopedDisabler.
+void DisableInThisThread();
+void EnableInThisThread();
+// Can be used to ignore memory allocated by an intercepted
+// function.
+struct ScopedInterceptorDisabler {
+ ScopedInterceptorDisabler() { DisableInThisThread(); }
+ ~ScopedInterceptorDisabler() { EnableInThisThread(); }
+};
+
+// According to Itanium C++ ABI array cookie is a one word containing
+// size of allocated array.
+static inline bool IsItaniumABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg) == 0;
+}
+
+// According to ARM C++ ABI array cookie consists of two words:
+// struct array_cookie {
+// std::size_t element_size; // element_size != 0
+// std::size_t element_count;
+// };
+static inline bool IsARMABIArrayCookie(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+ return chunk_size == 2 * sizeof(uptr) && chunk_beg + chunk_size == addr &&
+ *reinterpret_cast<uptr *>(chunk_beg + sizeof(uptr)) == 0;
+}
+
+// Special case for "new T[0]" where T is a type with DTOR.
+// new T[0] will allocate a cookie (one or two words) for the array size (0)
+// and store a pointer to the end of allocated chunk. The actual cookie layout
+// varies between platforms according to their C++ ABI implementation.
+inline bool IsSpecialCaseOfOperatorNew0(uptr chunk_beg, uptr chunk_size,
+ uptr addr) {
+#if defined(__arm__)
+ return IsARMABIArrayCookie(chunk_beg, chunk_size, addr);
+#else
+ return IsItaniumABIArrayCookie(chunk_beg, chunk_size, addr);
+#endif
+}
+
+// The following must be implemented in the parent tool.
+
+void ForEachChunk(ForEachChunkCallback callback, void *arg);
+// Returns the address range occupied by the global allocator object.
+void GetAllocatorGlobalRange(uptr *begin, uptr *end);
+// Wrappers for allocator's ForceLock()/ForceUnlock().
+void LockAllocator();
+void UnlockAllocator();
+// Returns true if [addr, addr + sizeof(void *)) is poisoned.
+bool WordIsPoisoned(uptr addr);
+// Wrappers for ThreadRegistry access.
+void LockThreadRegistry();
+void UnlockThreadRegistry();
+ThreadRegistry *GetThreadRegistryLocked();
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls);
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
+ void *arg);
+// If called from the main thread, updates the main thread's TID in the thread
+// registry. We need this to handle processes that fork() without a subsequent
+// exec(), which invalidates the recorded TID. To update it, we must call
+// gettid() from the main thread. Our solution is to call this function before
+// leak checking and also before every call to pthread_create() (to handle cases
+// where leak checking is initiated from a non-main thread).
+void EnsureMainThreadIDIsCorrect();
+// If p points into a chunk that has been allocated to the user, returns its
+// user-visible address. Otherwise, returns 0.
+uptr PointsIntoChunk(void *p);
+// Returns address of user-visible chunk contained in this allocator chunk.
+uptr GetUserBegin(uptr chunk);
+// Helper for __lsan_ignore_object().
+IgnoreObjectResult IgnoreObjectLocked(const void *p);
+
+// Return the linker module, if valid for the platform.
+LoadedModule *GetLinker();
+
+// Return true if LSan has finished leak checking and reported leaks.
+bool HasReportedLeaks();
+
+// Run platform-specific leak handlers.
+void HandleLeaks();
+
+// Wrapper for chunk metadata operations.
+class LsanMetadata {
+ public:
+ // Constructor accepts address of user-visible chunk.
+ explicit LsanMetadata(uptr chunk);
+ bool allocated() const;
+ ChunkTag tag() const;
+ void set_tag(ChunkTag value);
+ uptr requested_size() const;
+ u32 stack_trace_id() const;
+ private:
+ void *metadata_;
+};
+
+} // namespace __lsan
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+int __lsan_is_turned_off();
+
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__lsan_default_suppressions();
+} // extern "C"
+
+#endif // LSAN_COMMON_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common_linux.cc (revision 351984)
@@ -0,0 +1,140 @@
+//=-- lsan_common_linux.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Linux/NetBSD-specific
+// code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && (SANITIZER_LINUX || SANITIZER_NETBSD)
+#include <link.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_getauxval.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
+
+namespace __lsan {
+
+static const char kLinkerName[] = "ld";
+
+static char linker_placeholder[sizeof(LoadedModule)] ALIGNED(64);
+static LoadedModule *linker = nullptr;
+
+static bool IsLinker(const LoadedModule& module) {
+#if SANITIZER_USE_GETAUXVAL
+ return module.base_address() == getauxval(AT_BASE);
+#else
+ return LibraryNameIs(module.full_name(), kLinkerName);
+#endif // SANITIZER_USE_GETAUXVAL
+}
+
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL int disable_counter;
+bool DisabledInThisThread() { return disable_counter > 0; }
+void DisableInThisThread() { disable_counter++; }
+void EnableInThisThread() {
+ if (disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ disable_counter--;
+}
+
+void InitializePlatformSpecificModules() {
+ ListOfModules modules;
+ modules.init();
+ for (LoadedModule &module : modules) {
+ if (!IsLinker(module))
+ continue;
+ if (linker == nullptr) {
+ linker = reinterpret_cast<LoadedModule *>(linker_placeholder);
+ *linker = module;
+ module = LoadedModule();
+ } else {
+ VReport(1, "LeakSanitizer: Multiple modules match \"%s\". "
+ "TLS and other allocations originating from linker might be "
+ "falsely reported as leaks.\n", kLinkerName);
+ linker->clear();
+ linker = nullptr;
+ return;
+ }
+ }
+ if (linker == nullptr) {
+ VReport(1, "LeakSanitizer: Dynamic linker not found. TLS and other "
+ "allocations originating from linker might be falsely reported "
+ "as leaks.\n");
+ }
+}
+
+static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ Frontier *frontier = reinterpret_cast<Frontier *>(data);
+ for (uptr j = 0; j < info->dlpi_phnum; j++) {
+ const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
+ // We're looking for .data and .bss sections, which reside in writeable,
+ // loadable segments.
+ if (!(phdr->p_flags & PF_W) || (phdr->p_type != PT_LOAD) ||
+ (phdr->p_memsz == 0))
+ continue;
+ uptr begin = info->dlpi_addr + phdr->p_vaddr;
+ uptr end = begin + phdr->p_memsz;
+ ScanGlobalRange(begin, end, frontier);
+ }
+ return 0;
+}
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ if (!flags()->use_globals) return;
+ dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier);
+}
+
+LoadedModule *GetLinker() { return linker; }
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {}
+
+struct DoStopTheWorldParam {
+ StopTheWorldCallback callback;
+ void *argument;
+};
+
+// While calling Die() here is undefined behavior and can potentially
+// cause race conditions, it isn't possible to intercept exit on linux,
+// so we have no choice but to call Die() from the atexit handler.
+void HandleLeaks() {
+ if (common_flags()->exitcode) Die();
+}
+
+static int DoStopTheWorldCallback(struct dl_phdr_info *info, size_t size,
+ void *data) {
+ DoStopTheWorldParam *param = reinterpret_cast<DoStopTheWorldParam *>(data);
+ StopTheWorld(param->callback, param->argument);
+ return 1;
+}
+
+// LSan calls dl_iterate_phdr() from the tracer task. This may deadlock: if one
+// of the threads is frozen while holding the libdl lock, the tracer will hang
+// in dl_iterate_phdr() forever.
+// Luckily, (a) the lock is reentrant and (b) libc can't distinguish between the
+// tracer task and the thread that spawned it. Thus, if we run the tracer task
+// while holding the libdl lock in the parent thread, we can safely reenter it
+// in the tracer. The solution is to run stoptheworld from a dl_iterate_phdr()
+// callback in the parent thread.
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+ DoStopTheWorldParam param = {callback, argument};
+ dl_iterate_phdr(DoStopTheWorldCallback, &param);
+}
+
+} // namespace __lsan
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common_mac.cc (revision 351984)
@@ -0,0 +1,202 @@
+//=-- lsan_common_mac.cc --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Implementation of common leak checking functionality. Darwin-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "lsan_common.h"
+
+#if CAN_SANITIZE_LEAKS && SANITIZER_MAC
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "lsan_allocator.h"
+
+#include <pthread.h>
+
+#include <mach/mach.h>
+
+// Only introduced in Mac OS X 10.9.
+#ifdef VM_MEMORY_OS_ALLOC_ONCE
+static const int kSanitizerVmMemoryOsAllocOnce = VM_MEMORY_OS_ALLOC_ONCE;
+#else
+static const int kSanitizerVmMemoryOsAllocOnce = 73;
+#endif
+
+namespace __lsan {
+
+typedef struct {
+ int disable_counter;
+ u32 current_thread_id;
+ AllocatorCache cache;
+} thread_local_data_t;
+
+static pthread_key_t key;
+static pthread_once_t key_once = PTHREAD_ONCE_INIT;
+
+// The main thread destructor requires the current thread id,
+// so we can't destroy it until it's been used and reset to invalid tid
+void restore_tid_data(void *ptr) {
+ thread_local_data_t *data = (thread_local_data_t *)ptr;
+ if (data->current_thread_id != kInvalidTid)
+ pthread_setspecific(key, data);
+}
+
+static void make_tls_key() {
+ CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0);
+}
+
+static thread_local_data_t *get_tls_val(bool alloc) {
+ pthread_once(&key_once, make_tls_key);
+
+ thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key);
+ if (ptr == NULL && alloc) {
+ ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr));
+ ptr->disable_counter = 0;
+ ptr->current_thread_id = kInvalidTid;
+ ptr->cache = AllocatorCache();
+ pthread_setspecific(key, ptr);
+ }
+
+ return ptr;
+}
+
+bool DisabledInThisThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->disable_counter > 0 : false;
+}
+
+void DisableInThisThread() { ++get_tls_val(true)->disable_counter; }
+
+void EnableInThisThread() {
+ int *disable_counter = &get_tls_val(true)->disable_counter;
+ if (*disable_counter == 0) {
+ DisableCounterUnderflow();
+ }
+ --*disable_counter;
+}
+
+u32 GetCurrentThread() {
+ thread_local_data_t *data = get_tls_val(false);
+ return data ? data->current_thread_id : kInvalidTid;
+}
+
+void SetCurrentThread(u32 tid) { get_tls_val(true)->current_thread_id = tid; }
+
+AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; }
+
+LoadedModule *GetLinker() { return nullptr; }
+
+// Required on Linux for initialization of TLS behavior, but should not be
+// required on Darwin.
+void InitializePlatformSpecificModules() {}
+
+// Sections which can't contain contain global pointers. This list errs on the
+// side of caution to avoid false positives, at the expense of performance.
+//
+// Other potentially safe sections include:
+// __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break
+//
+// Sections which definitely cannot be included here are:
+// __objc_data, __objc_const, __data, __bss, __common, __thread_data,
+// __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs
+static const char *kSkippedSecNames[] = {
+ "__cfstring", "__la_symbol_ptr", "__mod_init_func",
+ "__mod_term_func", "__nl_symbol_ptr", "__objc_classlist",
+ "__objc_classrefs", "__objc_imageinfo", "__objc_nlclslist",
+ "__objc_protolist", "__objc_selrefs", "__objc_superrefs"};
+
+// Scans global variables for heap pointers.
+void ProcessGlobalRegions(Frontier *frontier) {
+ for (auto name : kSkippedSecNames)
+ CHECK(internal_strnlen(name, kMaxSegName + 1) <= kMaxSegName);
+
+ MemoryMappingLayout memory_mapping(false);
+ InternalMmapVector<LoadedModule> modules;
+ modules.reserve(128);
+ memory_mapping.DumpListOfModules(&modules);
+ for (uptr i = 0; i < modules.size(); ++i) {
+ // Even when global scanning is disabled, we still need to scan
+ // system libraries for stashed pointers
+ if (!flags()->use_globals && modules[i].instrumented()) continue;
+
+ for (const __sanitizer::LoadedModule::AddressRange &range :
+ modules[i].ranges()) {
+ // Sections storing global variables are writable and non-executable
+ if (range.executable || !range.writable) continue;
+
+ for (auto name : kSkippedSecNames) {
+ if (!internal_strcmp(range.name, name)) continue;
+ }
+
+ ScanGlobalRange(range.beg, range.end, frontier);
+ }
+ }
+}
+
+void ProcessPlatformSpecificAllocations(Frontier *frontier) {
+ unsigned depth = 1;
+ vm_size_t size = 0;
+ vm_address_t address = 0;
+ kern_return_t err = KERN_SUCCESS;
+ mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64;
+
+ InternalMmapVector<RootRegion> const *root_regions = GetRootRegions();
+
+ while (err == KERN_SUCCESS) {
+ struct vm_region_submap_info_64 info;
+ err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth,
+ (vm_region_info_t)&info, &count);
+
+ uptr end_address = address + size;
+
+ // libxpc stashes some pointers in the Kernel Alloc Once page,
+ // make sure not to report those as leaks.
+ if (info.user_tag == kSanitizerVmMemoryOsAllocOnce) {
+ ScanRangeForPointers(address, end_address, frontier, "GLOBAL",
+ kReachable);
+
+ // Recursing over the full memory map is very slow, break out
+ // early if we don't need the full iteration.
+ if (!flags()->use_root_regions || !root_regions->size())
+ break;
+ }
+
+ // This additional root region scan is required on Darwin in order to
+ // detect root regions contained within mmap'd memory regions, because
+ // the Darwin implementation of sanitizer_procmaps traverses images
+ // as loaded by dyld, and not the complete set of all memory regions.
+ //
+ // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same
+ // behavior as sanitizer_procmaps_linux and traverses all memory regions
+ if (flags()->use_root_regions) {
+ for (uptr i = 0; i < root_regions->size(); i++) {
+ ScanRootRegion(frontier, (*root_regions)[i], address, end_address,
+ info.protection & kProtectionRead);
+ }
+ }
+
+ address = end_address;
+ }
+}
+
+// On darwin, we can intercept _exit gracefully, and return a failing exit code
+// if required at that point. Calling Die() here is undefined behavior and
+// causes rare race conditions.
+void HandleLeaks() {}
+
+void DoStopTheWorld(StopTheWorldCallback callback, void *argument) {
+ StopTheWorld(callback, argument);
+}
+
+} // namespace __lsan
+
+#endif // CAN_SANITIZE_LEAKS && SANITIZER_MAC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_common_mac.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_flags.inc (revision 351984)
@@ -0,0 +1,46 @@
+//===-- lsan_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// LSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef LSAN_FLAG
+# error "Define LSAN_FLAG prior to including this file!"
+#endif
+
+// LSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+LSAN_FLAG(bool, report_objects, false,
+ "Print addresses of leaked objects after main leak report.")
+LSAN_FLAG(
+ int, resolution, 0,
+ "Aggregate two objects into one leak if this many stack frames match. If "
+ "zero, the entire stack trace must match.")
+LSAN_FLAG(int, max_leaks, 0, "The number of leaks reported.")
+
+// Flags controlling the root set of reachable memory.
+LSAN_FLAG(bool, use_globals, true,
+ "Root set: include global variables (.data and .bss)")
+LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks")
+LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers")
+LSAN_FLAG(bool, use_tls, true,
+ "Root set: include TLS and thread-specific storage")
+LSAN_FLAG(bool, use_root_regions, true,
+ "Root set: include regions added via __lsan_register_root_region().")
+LSAN_FLAG(bool, use_ld_allocations, true,
+ "Root set: mark as reachable all allocations made from dynamic "
+ "linker. This was the old way to handle dynamic TLS, and will "
+ "be removed soon. Do not use this flag.")
+
+LSAN_FLAG(bool, use_unaligned, false, "Consider unaligned pointers valid.")
+LSAN_FLAG(bool, use_poisoned, false,
+ "Consider pointers found in poisoned memory to be valid.")
+LSAN_FLAG(bool, log_pointers, false, "Debug logging")
+LSAN_FLAG(bool, log_threads, false, "Debug logging")
+LSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_interceptors.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_interceptors.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_interceptors.cc (revision 351984)
@@ -0,0 +1,465 @@
+//=-- lsan_interceptors.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Interceptors for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "sanitizer_common/sanitizer_allocator_report.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
+#include "sanitizer_common/sanitizer_platform_limits_posix.h"
+#include "sanitizer_common/sanitizer_posix.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+#include "lsan_thread.h"
+
+#include <stddef.h>
+
+using namespace __lsan;
+
+extern "C" {
+int pthread_attr_init(void *attr);
+int pthread_attr_destroy(void *attr);
+int pthread_attr_getdetachstate(void *attr, int *v);
+int pthread_key_create(unsigned *key, void (*destructor)(void* v));
+int pthread_setspecific(unsigned key, const void *v);
+}
+
+///// Malloc/free interceptors. /////
+
+namespace std {
+ struct nothrow_t;
+ enum class align_val_t: size_t;
+}
+
+#if !SANITIZER_MAC
+INTERCEPTOR(void*, malloc, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_malloc(size, stack);
+}
+
+INTERCEPTOR(void, free, void *p) {
+ ENSURE_LSAN_INITED;
+ lsan_free(p);
+}
+
+INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
+ if (lsan_init_is_running) {
+ // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym.
+ const uptr kCallocPoolSize = 1024;
+ static uptr calloc_memory_for_dlsym[kCallocPoolSize];
+ static uptr allocated;
+ uptr size_in_words = ((nmemb * size) + kWordSize - 1) / kWordSize;
+ void *mem = (void*)&calloc_memory_for_dlsym[allocated];
+ allocated += size_in_words;
+ CHECK(allocated < kCallocPoolSize);
+ return mem;
+ }
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_calloc(nmemb, size, stack);
+}
+
+INTERCEPTOR(void*, realloc, void *q, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_realloc(q, size, stack);
+}
+
+INTERCEPTOR(void*, reallocarray, void *q, uptr nmemb, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_reallocarray(q, nmemb, size, stack);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_posix_memalign(memptr, alignment, size, stack);
+}
+
+INTERCEPTOR(void*, valloc, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_valloc(size, stack);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_memalign(alignment, size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN INTERCEPT_FUNCTION(memalign)
+
+INTERCEPTOR(void *, __libc_memalign, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ void *res = lsan_memalign(alignment, size, stack);
+ DTLS_on_libc_memalign(res, size);
+ return res;
+}
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN INTERCEPT_FUNCTION(__libc_memalign)
+#else
+#define LSAN_MAYBE_INTERCEPT_MEMALIGN
+#define LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN
+#endif // SANITIZER_INTERCEPT_MEMALIGN
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_aligned_alloc(alignment, size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC INTERCEPT_FUNCTION(aligned_alloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+ ENSURE_LSAN_INITED;
+ return GetMallocUsableSize(ptr);
+}
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE \
+ INTERCEPT_FUNCTION(malloc_usable_size)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+struct fake_mallinfo {
+ int x[10];
+};
+
+INTERCEPTOR(struct fake_mallinfo, mallinfo, void) {
+ struct fake_mallinfo res;
+ internal_memset(&res, 0, sizeof(res));
+ return res;
+}
+#define LSAN_MAYBE_INTERCEPT_MALLINFO INTERCEPT_FUNCTION(mallinfo)
+
+INTERCEPTOR(int, mallopt, int cmd, int value) {
+ return 0;
+}
+#define LSAN_MAYBE_INTERCEPT_MALLOPT INTERCEPT_FUNCTION(mallopt)
+#else
+#define LSAN_MAYBE_INTERCEPT_MALLINFO
+#define LSAN_MAYBE_INTERCEPT_MALLOPT
+#endif // SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR(void*, pvalloc, uptr size) {
+ ENSURE_LSAN_INITED;
+ GET_STACK_TRACE_MALLOC;
+ return lsan_pvalloc(size, stack);
+}
+#define LSAN_MAYBE_INTERCEPT_PVALLOC INTERCEPT_FUNCTION(pvalloc)
+#else
+#define LSAN_MAYBE_INTERCEPT_PVALLOC
+#endif // SANITIZER_INTERCEPT_PVALLOC
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR(void, cfree, void *p) ALIAS(WRAPPER_NAME(free));
+#define LSAN_MAYBE_INTERCEPT_CFREE INTERCEPT_FUNCTION(cfree)
+#else
+#define LSAN_MAYBE_INTERCEPT_CFREE
+#endif // SANITIZER_INTERCEPT_CFREE
+
+#if SANITIZER_INTERCEPT_MCHECK_MPROBE
+INTERCEPTOR(int, mcheck, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mcheck_pedantic, void (*abortfunc)(int mstatus)) {
+ return 0;
+}
+
+INTERCEPTOR(int, mprobe, void *ptr) {
+ return 0;
+}
+#endif // SANITIZER_INTERCEPT_MCHECK_MPROBE
+
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY(nothrow)\
+ ENSURE_LSAN_INITED;\
+ GET_STACK_TRACE_MALLOC;\
+ void *res = lsan_malloc(size, stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res;
+#define OPERATOR_NEW_BODY_ALIGN(nothrow)\
+ ENSURE_LSAN_INITED;\
+ GET_STACK_TRACE_MALLOC;\
+ void *res = lsan_memalign((uptr)align, size, stack);\
+ if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\
+ return res;
+
+#define OPERATOR_DELETE_BODY\
+ ENSURE_LSAN_INITED;\
+ lsan_free(ptr);
+
+// On OS X it's not enough to just provide our own 'operator new' and
+// 'operator delete' implementations, because they're going to be in the runtime
+// dylib, and the main executable will depend on both the runtime dylib and
+// libstdc++, each of has its implementation of new and delete.
+// To make sure that C++ allocation/deallocation operators are overridden on
+// OS X we need to intercept them using their mangled names.
+#if !SANITIZER_MAC
+
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+INTERCEPTOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); }
+
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const &)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t) NOEXCEPT
+{ OPERATOR_DELETE_BODY; }
+
+#else // SANITIZER_MAC
+
+INTERCEPTOR(void *, _Znwm, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _Znam, size_t size)
+{ OPERATOR_NEW_BODY(false /*nothrow*/); }
+INTERCEPTOR(void *, _ZnwmRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+INTERCEPTOR(void *, _ZnamRKSt9nothrow_t, size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(true /*nothrow*/); }
+
+INTERCEPTOR(void, _ZdlPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPv, void *ptr)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdlPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+INTERCEPTOR(void, _ZdaPvRKSt9nothrow_t, void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY; }
+
+#endif // !SANITIZER_MAC
+
+
+///// Thread initialization and finalization. /////
+
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+static unsigned g_thread_finalize_key;
+
+static void thread_finalize(void *v) {
+ uptr iter = (uptr)v;
+ if (iter > 1) {
+ if (pthread_setspecific(g_thread_finalize_key, (void*)(iter - 1))) {
+ Report("LeakSanitizer: failed to set thread key.\n");
+ Die();
+ }
+ return;
+ }
+ ThreadFinish();
+}
+#endif
+
+#if SANITIZER_NETBSD
+INTERCEPTOR(void, _lwp_exit) {
+ ENSURE_LSAN_INITED;
+ ThreadFinish();
+ REAL(_lwp_exit)();
+}
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT INTERCEPT_FUNCTION(_lwp_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT__LWP_EXIT
+#endif
+
+#if SANITIZER_INTERCEPT_THR_EXIT
+INTERCEPTOR(void, thr_exit, tid_t *state) {
+ ENSURE_LSAN_INITED;
+ ThreadFinish();
+ REAL(thr_exit)(state);
+}
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT INTERCEPT_FUNCTION(thr_exit)
+#else
+#define LSAN_MAYBE_INTERCEPT_THR_EXIT
+#endif
+
+struct ThreadParam {
+ void *(*callback)(void *arg);
+ void *param;
+ atomic_uintptr_t tid;
+};
+
+extern "C" void *__lsan_thread_start_func(void *arg) {
+ ThreadParam *p = (ThreadParam*)arg;
+ void* (*callback)(void *arg) = p->callback;
+ void *param = p->param;
+ // Wait until the last iteration to maximize the chance that we are the last
+ // destructor to run.
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ if (pthread_setspecific(g_thread_finalize_key,
+ (void*)GetPthreadDestructorIterations())) {
+ Report("LeakSanitizer: failed to set thread key.\n");
+ Die();
+ }
+#endif
+ int tid = 0;
+ while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0)
+ internal_sched_yield();
+ SetCurrentThread(tid);
+ ThreadStart(tid, GetTid());
+ atomic_store(&p->tid, 0, memory_order_release);
+ return callback(param);
+}
+
+INTERCEPTOR(int, pthread_create, void *th, void *attr,
+ void *(*callback)(void *), void *param) {
+ ENSURE_LSAN_INITED;
+ EnsureMainThreadIDIsCorrect();
+ __sanitizer_pthread_attr_t myattr;
+ if (!attr) {
+ pthread_attr_init(&myattr);
+ attr = &myattr;
+ }
+ AdjustStackSize(attr);
+ int detached = 0;
+ pthread_attr_getdetachstate(attr, &detached);
+ ThreadParam p;
+ p.callback = callback;
+ p.param = param;
+ atomic_store(&p.tid, 0, memory_order_relaxed);
+ int res;
+ {
+ // Ignore all allocations made by pthread_create: thread stack/TLS may be
+ // stored by pthread for future reuse even after thread destruction, and
+ // the linked list it's stored in doesn't even hold valid pointers to the
+ // objects, the latter are calculated by obscure pointer arithmetic.
+ ScopedInterceptorDisabler disabler;
+ res = REAL(pthread_create)(th, attr, __lsan_thread_start_func, &p);
+ }
+ if (res == 0) {
+ int tid = ThreadCreate(GetCurrentThread(), *(uptr *)th,
+ IsStateDetached(detached));
+ CHECK_NE(tid, 0);
+ atomic_store(&p.tid, tid, memory_order_release);
+ while (atomic_load(&p.tid, memory_order_acquire) != 0)
+ internal_sched_yield();
+ }
+ if (attr == &myattr)
+ pthread_attr_destroy(&myattr);
+ return res;
+}
+
+INTERCEPTOR(int, pthread_join, void *th, void **ret) {
+ ENSURE_LSAN_INITED;
+ int tid = ThreadTid((uptr)th);
+ int res = REAL(pthread_join)(th, ret);
+ if (res == 0)
+ ThreadJoin(tid);
+ return res;
+}
+
+INTERCEPTOR(void, _exit, int status) {
+ if (status == 0 && HasReportedLeaks()) status = common_flags()->exitcode;
+ REAL(_exit)(status);
+}
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+namespace __lsan {
+
+void InitializeInterceptors() {
+ InitializeSignalInterceptors();
+
+ INTERCEPT_FUNCTION(malloc);
+ INTERCEPT_FUNCTION(free);
+ LSAN_MAYBE_INTERCEPT_CFREE;
+ INTERCEPT_FUNCTION(calloc);
+ INTERCEPT_FUNCTION(realloc);
+ LSAN_MAYBE_INTERCEPT_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT___LIBC_MEMALIGN;
+ LSAN_MAYBE_INTERCEPT_ALIGNED_ALLOC;
+ INTERCEPT_FUNCTION(posix_memalign);
+ INTERCEPT_FUNCTION(valloc);
+ LSAN_MAYBE_INTERCEPT_PVALLOC;
+ LSAN_MAYBE_INTERCEPT_MALLOC_USABLE_SIZE;
+ LSAN_MAYBE_INTERCEPT_MALLINFO;
+ LSAN_MAYBE_INTERCEPT_MALLOPT;
+ INTERCEPT_FUNCTION(pthread_create);
+ INTERCEPT_FUNCTION(pthread_join);
+ INTERCEPT_FUNCTION(_exit);
+
+ LSAN_MAYBE_INTERCEPT__LWP_EXIT;
+ LSAN_MAYBE_INTERCEPT_THR_EXIT;
+
+#if !SANITIZER_NETBSD && !SANITIZER_FREEBSD
+ if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) {
+ Report("LeakSanitizer: failed to create thread key.\n");
+ Die();
+ }
+#endif
+}
+
+} // namespace __lsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_linux.cc (revision 351984)
@@ -0,0 +1,32 @@
+//=-- lsan_linux.cc -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer. Linux/NetBSD-specific code.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+
+#if SANITIZER_LINUX || SANITIZER_NETBSD
+
+#include "lsan_allocator.h"
+
+namespace __lsan {
+
+static THREADLOCAL u32 current_thread_tid = kInvalidTid;
+u32 GetCurrentThread() { return current_thread_tid; }
+void SetCurrentThread(u32 tid) { current_thread_tid = tid; }
+
+static THREADLOCAL AllocatorCache allocator_cache;
+AllocatorCache *GetAllocatorCache() { return &allocator_cache; }
+
+void ReplaceSystemMalloc() {}
+
+} // namespace __lsan
+
+#endif // SANITIZER_LINUX || SANITIZER_NETBSD
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_linux.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_mac.cc (revision 351984)
@@ -0,0 +1,191 @@
+//===-- lsan_mac.cc -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer, a memory leak checker.
+//
+// Mac-specific details.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "interception/interception.h"
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+#include <pthread.h>
+
+namespace __lsan {
+// Support for the following functions from libdispatch on Mac OS:
+// dispatch_async_f()
+// dispatch_async()
+// dispatch_sync_f()
+// dispatch_sync()
+// dispatch_after_f()
+// dispatch_after()
+// dispatch_group_async_f()
+// dispatch_group_async()
+// TODO(glider): libdispatch API contains other functions that we don't support
+// yet.
+//
+// dispatch_sync() and dispatch_sync_f() are synchronous, although chances are
+// they can cause jobs to run on a thread different from the current one.
+// TODO(glider): if so, we need a test for this (otherwise we should remove
+// them).
+//
+// The following functions use dispatch_barrier_async_f() (which isn't a library
+// function but is exported) and are thus supported:
+// dispatch_source_set_cancel_handler_f()
+// dispatch_source_set_cancel_handler()
+// dispatch_source_set_event_handler_f()
+// dispatch_source_set_event_handler()
+//
+// The reference manual for Grand Central Dispatch is available at
+// http://developer.apple.com/library/mac/#documentation/Performance/Reference/GCD_libdispatch_Ref/Reference/reference.html
+// The implementation details are at
+// http://libdispatch.macosforge.org/trac/browser/trunk/src/queue.c
+
+typedef void *dispatch_group_t;
+typedef void *dispatch_queue_t;
+typedef void *dispatch_source_t;
+typedef u64 dispatch_time_t;
+typedef void (*dispatch_function_t)(void *block);
+typedef void *(*worker_t)(void *block);
+
+// A wrapper for the ObjC blocks used to support libdispatch.
+typedef struct {
+ void *block;
+ dispatch_function_t func;
+ u32 parent_tid;
+} lsan_block_context_t;
+
+ALWAYS_INLINE
+void lsan_register_worker_thread(int parent_tid) {
+ if (GetCurrentThread() == kInvalidTid) {
+ u32 tid = ThreadCreate(parent_tid, 0, true);
+ ThreadStart(tid, GetTid());
+ SetCurrentThread(tid);
+ }
+}
+
+// For use by only those functions that allocated the context via
+// alloc_lsan_context().
+extern "C" void lsan_dispatch_call_block_and_release(void *block) {
+ lsan_block_context_t *context = (lsan_block_context_t *)block;
+ VReport(2,
+ "lsan_dispatch_call_block_and_release(): "
+ "context: %p, pthread_self: %p\n",
+ block, pthread_self());
+ lsan_register_worker_thread(context->parent_tid);
+ // Call the original dispatcher for the block.
+ context->func(context->block);
+ lsan_free(context);
+}
+
+} // namespace __lsan
+
+using namespace __lsan; // NOLINT
+
+// Wrap |ctxt| and |func| into an lsan_block_context_t.
+// The caller retains control of the allocated context.
+extern "C" lsan_block_context_t *alloc_lsan_context(void *ctxt,
+ dispatch_function_t func) {
+ GET_STACK_TRACE_THREAD;
+ lsan_block_context_t *lsan_ctxt =
+ (lsan_block_context_t *)lsan_malloc(sizeof(lsan_block_context_t), stack);
+ lsan_ctxt->block = ctxt;
+ lsan_ctxt->func = func;
+ lsan_ctxt->parent_tid = GetCurrentThread();
+ return lsan_ctxt;
+}
+
+// Define interceptor for dispatch_*_f function with the three most common
+// parameters: dispatch_queue_t, context, dispatch_function_t.
+#define INTERCEPT_DISPATCH_X_F_3(dispatch_x_f) \
+ INTERCEPTOR(void, dispatch_x_f, dispatch_queue_t dq, void *ctxt, \
+ dispatch_function_t func) { \
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func); \
+ return REAL(dispatch_x_f)(dq, (void *)lsan_ctxt, \
+ lsan_dispatch_call_block_and_release); \
+ }
+
+INTERCEPT_DISPATCH_X_F_3(dispatch_async_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_sync_f)
+INTERCEPT_DISPATCH_X_F_3(dispatch_barrier_async_f)
+
+INTERCEPTOR(void, dispatch_after_f, dispatch_time_t when, dispatch_queue_t dq,
+ void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ return REAL(dispatch_after_f)(when, dq, (void *)lsan_ctxt,
+ lsan_dispatch_call_block_and_release);
+}
+
+INTERCEPTOR(void, dispatch_group_async_f, dispatch_group_t group,
+ dispatch_queue_t dq, void *ctxt, dispatch_function_t func) {
+ lsan_block_context_t *lsan_ctxt = alloc_lsan_context(ctxt, func);
+ REAL(dispatch_group_async_f)
+ (group, dq, (void *)lsan_ctxt, lsan_dispatch_call_block_and_release);
+}
+
+#if !defined(MISSING_BLOCKS_SUPPORT)
+extern "C" {
+void dispatch_async(dispatch_queue_t dq, void (^work)(void));
+void dispatch_group_async(dispatch_group_t dg, dispatch_queue_t dq,
+ void (^work)(void));
+void dispatch_after(dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void));
+void dispatch_source_set_cancel_handler(dispatch_source_t ds,
+ void (^work)(void));
+void dispatch_source_set_event_handler(dispatch_source_t ds,
+ void (^work)(void));
+}
+
+#define GET_LSAN_BLOCK(work) \
+ void (^lsan_block)(void); \
+ int parent_tid = GetCurrentThread(); \
+ lsan_block = ^(void) { \
+ lsan_register_worker_thread(parent_tid); \
+ work(); \
+ }
+
+INTERCEPTOR(void, dispatch_async, dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_async)(dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_group_async, dispatch_group_t dg,
+ dispatch_queue_t dq, void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_group_async)(dg, dq, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_after, dispatch_time_t when, dispatch_queue_t queue,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_after)(when, queue, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_cancel_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ if (!work) {
+ REAL(dispatch_source_set_cancel_handler)(ds, work);
+ return;
+ }
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_cancel_handler)(ds, lsan_block);
+}
+
+INTERCEPTOR(void, dispatch_source_set_event_handler, dispatch_source_t ds,
+ void (^work)(void)) {
+ GET_LSAN_BLOCK(work);
+ REAL(dispatch_source_set_event_handler)(ds, lsan_block);
+}
+#endif
+
+#endif // SANITIZER_MAC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_mac.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_malloc_mac.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_malloc_mac.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_malloc_mac.cc (revision 351984)
@@ -0,0 +1,59 @@
+//===-- lsan_malloc_mac.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer (LSan), a memory leak detector.
+//
+// Mac-specific malloc interception.
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_MAC
+
+#include "lsan.h"
+#include "lsan_allocator.h"
+#include "lsan_thread.h"
+
+using namespace __lsan;
+#define COMMON_MALLOC_ZONE_NAME "lsan"
+#define COMMON_MALLOC_ENTER() ENSURE_LSAN_INITED
+#define COMMON_MALLOC_SANITIZER_INITIALIZED lsan_inited
+#define COMMON_MALLOC_FORCE_LOCK()
+#define COMMON_MALLOC_FORCE_UNLOCK()
+#define COMMON_MALLOC_MEMALIGN(alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_memalign(alignment, size, stack)
+#define COMMON_MALLOC_MALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_malloc(size, stack)
+#define COMMON_MALLOC_REALLOC(ptr, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_realloc(ptr, size, stack)
+#define COMMON_MALLOC_CALLOC(count, size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_calloc(count, size, stack)
+#define COMMON_MALLOC_POSIX_MEMALIGN(memptr, alignment, size) \
+ GET_STACK_TRACE_MALLOC; \
+ int res = lsan_posix_memalign(memptr, alignment, size, stack)
+#define COMMON_MALLOC_VALLOC(size) \
+ GET_STACK_TRACE_MALLOC; \
+ void *p = lsan_valloc(size, stack)
+#define COMMON_MALLOC_FREE(ptr) \
+ lsan_free(ptr)
+#define COMMON_MALLOC_SIZE(ptr) \
+ uptr size = lsan_mz_size(ptr)
+#define COMMON_MALLOC_FILL_STATS(zone, stats)
+#define COMMON_MALLOC_REPORT_UNKNOWN_REALLOC(ptr, zone_ptr, zone_name) \
+ (void)zone_name; \
+ Report("mz_realloc(%p) -- attempting to realloc unallocated memory.\n", ptr);
+#define COMMON_MALLOC_NAMESPACE __lsan
+#define COMMON_MALLOC_HAS_ZONE_ENUMERATOR 0
+#define COMMON_MALLOC_HAS_EXTRA_INTROSPECTION_INIT 0
+
+#include "sanitizer_common/sanitizer_malloc_mac.inc"
+
+#endif // SANITIZER_MAC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_malloc_mac.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_preinit.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_preinit.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_preinit.cc (revision 351984)
@@ -0,0 +1,21 @@
+//===-- lsan_preinit.cc ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+//
+// Call __lsan_init at the very early stage of process startup.
+//===----------------------------------------------------------------------===//
+
+#include "lsan.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+ // We force __lsan_init to be called before anyone else by placing it into
+ // .preinit_array section.
+ __attribute__((section(".preinit_array"), used))
+ void (*__local_lsan_preinit)(void) = __lsan_init;
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_preinit.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_thread.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_thread.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_thread.cc (revision 351984)
@@ -0,0 +1,162 @@
+//=-- lsan_thread.cc ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// See lsan_thread.h for details.
+//
+//===----------------------------------------------------------------------===//
+
+#include "lsan_thread.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_thread_registry.h"
+#include "sanitizer_common/sanitizer_tls_get_addr.h"
+#include "lsan_allocator.h"
+#include "lsan_common.h"
+
+namespace __lsan {
+
+static ThreadRegistry *thread_registry;
+
+static ThreadContextBase *CreateThreadContext(u32 tid) {
+ void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext");
+ return new(mem) ThreadContext(tid);
+}
+
+static const uptr kMaxThreads = 1 << 13;
+static const uptr kThreadQuarantineSize = 64;
+
+void InitializeThreadRegistry() {
+ static ALIGNED(64) char thread_registry_placeholder[sizeof(ThreadRegistry)];
+ thread_registry = new(thread_registry_placeholder)
+ ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize);
+}
+
+ThreadContext::ThreadContext(int tid)
+ : ThreadContextBase(tid),
+ stack_begin_(0),
+ stack_end_(0),
+ cache_begin_(0),
+ cache_end_(0),
+ tls_begin_(0),
+ tls_end_(0),
+ dtls_(nullptr) {}
+
+struct OnStartedArgs {
+ uptr stack_begin, stack_end,
+ cache_begin, cache_end,
+ tls_begin, tls_end;
+ DTLS *dtls;
+};
+
+void ThreadContext::OnStarted(void *arg) {
+ OnStartedArgs *args = reinterpret_cast<OnStartedArgs *>(arg);
+ stack_begin_ = args->stack_begin;
+ stack_end_ = args->stack_end;
+ tls_begin_ = args->tls_begin;
+ tls_end_ = args->tls_end;
+ cache_begin_ = args->cache_begin;
+ cache_end_ = args->cache_end;
+ dtls_ = args->dtls;
+}
+
+void ThreadContext::OnFinished() {
+ AllocatorThreadFinish();
+ DTLS_Destroy();
+}
+
+u32 ThreadCreate(u32 parent_tid, uptr user_id, bool detached) {
+ return thread_registry->CreateThread(user_id, detached, parent_tid,
+ /* arg */ nullptr);
+}
+
+void ThreadStart(u32 tid, tid_t os_id, ThreadType thread_type) {
+ OnStartedArgs args;
+ uptr stack_size = 0;
+ uptr tls_size = 0;
+ GetThreadStackAndTls(tid == 0, &args.stack_begin, &stack_size,
+ &args.tls_begin, &tls_size);
+ args.stack_end = args.stack_begin + stack_size;
+ args.tls_end = args.tls_begin + tls_size;
+ GetAllocatorCacheRange(&args.cache_begin, &args.cache_end);
+ args.dtls = DTLS_Get();
+ thread_registry->StartThread(tid, os_id, thread_type, &args);
+}
+
+void ThreadFinish() {
+ thread_registry->FinishThread(GetCurrentThread());
+ SetCurrentThread(kInvalidTid);
+}
+
+ThreadContext *CurrentThreadContext() {
+ if (!thread_registry) return nullptr;
+ if (GetCurrentThread() == kInvalidTid)
+ return nullptr;
+ // No lock needed when getting current thread.
+ return (ThreadContext *)thread_registry->GetThreadLocked(GetCurrentThread());
+}
+
+static bool FindThreadByUid(ThreadContextBase *tctx, void *arg) {
+ uptr uid = (uptr)arg;
+ if (tctx->user_id == uid && tctx->status != ThreadStatusInvalid) {
+ return true;
+ }
+ return false;
+}
+
+u32 ThreadTid(uptr uid) {
+ return thread_registry->FindThread(FindThreadByUid, (void*)uid);
+}
+
+void ThreadJoin(u32 tid) {
+ CHECK_NE(tid, kInvalidTid);
+ thread_registry->JoinThread(tid, /* arg */nullptr);
+}
+
+void EnsureMainThreadIDIsCorrect() {
+ if (GetCurrentThread() == 0)
+ CurrentThreadContext()->os_id = GetTid();
+}
+
+///// Interface to the common LSan module. /////
+
+bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
+ uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
+ uptr *cache_end, DTLS **dtls) {
+ ThreadContext *context = static_cast<ThreadContext *>(
+ thread_registry->FindThreadContextByOsIDLocked(os_id));
+ if (!context) return false;
+ *stack_begin = context->stack_begin();
+ *stack_end = context->stack_end();
+ *tls_begin = context->tls_begin();
+ *tls_end = context->tls_end();
+ *cache_begin = context->cache_begin();
+ *cache_end = context->cache_end();
+ *dtls = context->dtls();
+ return true;
+}
+
+void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
+ void *arg) {
+}
+
+void LockThreadRegistry() {
+ thread_registry->Lock();
+}
+
+void UnlockThreadRegistry() {
+ thread_registry->Unlock();
+}
+
+ThreadRegistry *GetThreadRegistryLocked() {
+ thread_registry->CheckLocked();
+ return thread_registry;
+}
+
+} // namespace __lsan
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_thread.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_thread.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/lsan_thread.h (revision 351984)
@@ -0,0 +1,60 @@
+//=-- lsan_thread.h -------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+// Thread registry for standalone LSan.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LSAN_THREAD_H
+#define LSAN_THREAD_H
+
+#include "sanitizer_common/sanitizer_thread_registry.h"
+
+namespace __sanitizer {
+struct DTLS;
+}
+
+namespace __lsan {
+
+class ThreadContext : public ThreadContextBase {
+ public:
+ explicit ThreadContext(int tid);
+ void OnStarted(void *arg) override;
+ void OnFinished() override;
+ uptr stack_begin() { return stack_begin_; }
+ uptr stack_end() { return stack_end_; }
+ uptr tls_begin() { return tls_begin_; }
+ uptr tls_end() { return tls_end_; }
+ uptr cache_begin() { return cache_begin_; }
+ uptr cache_end() { return cache_end_; }
+ DTLS *dtls() { return dtls_; }
+
+ private:
+ uptr stack_begin_, stack_end_,
+ cache_begin_, cache_end_,
+ tls_begin_, tls_end_;
+ DTLS *dtls_;
+};
+
+void InitializeThreadRegistry();
+
+void ThreadStart(u32 tid, tid_t os_id,
+ ThreadType thread_type = ThreadType::Regular);
+void ThreadFinish();
+u32 ThreadCreate(u32 tid, uptr uid, bool detached);
+void ThreadJoin(u32 tid);
+u32 ThreadTid(uptr uid);
+
+u32 GetCurrentThread();
+void SetCurrentThread(u32 tid);
+ThreadContext *CurrentThreadContext();
+void EnsureMainThreadIDIsCorrect();
+} // namespace __lsan
+
+#endif // LSAN_THREAD_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/weak_symbols.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/weak_symbols.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/weak_symbols.txt (revision 351984)
@@ -0,0 +1,3 @@
+___lsan_default_options
+___lsan_default_suppressions
+___lsan_is_turned_off
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/lsan/weak_symbols.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/GCDAProfiling.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/GCDAProfiling.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/GCDAProfiling.c (revision 351984)
@@ -0,0 +1,656 @@
+/*===- GCDAProfiling.c - Support library for GCDA file emission -----------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+|*===----------------------------------------------------------------------===*|
+|*
+|* This file implements the call back routines for the gcov profiling
+|* instrumentation pass. Link against this library when running code through
+|* the -insert-gcov-profiling LLVM pass.
+|*
+|* We emit files in a corrupt version of GCOV's "gcda" file format. These files
+|* are only close enough that LCOV will happily parse them. Anything that lcov
+|* ignores is missing.
+|*
+|* TODO: gcov is multi-process safe by having each exit open the existing file
+|* and append to it. We'd like to achieve that and be thread-safe too.
+|*
+\*===----------------------------------------------------------------------===*/
+
+#if !defined(__Fuchsia__)
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(_WIN32)
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include "WindowsMMap.h"
+#else
+#include <sys/mman.h>
+#include <sys/file.h>
+#endif
+
+#if defined(__FreeBSD__) && defined(__i386__)
+#define I386_FREEBSD 1
+#else
+#define I386_FREEBSD 0
+#endif
+
+#if !defined(_MSC_VER) && !I386_FREEBSD
+#include <stdint.h>
+#endif
+
+#if defined(_MSC_VER)
+typedef unsigned char uint8_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+#elif I386_FREEBSD
+/* System headers define 'size_t' incorrectly on x64 FreeBSD (prior to
+ * FreeBSD 10, r232261) when compiled in 32-bit mode.
+ */
+typedef unsigned char uint8_t;
+typedef unsigned int uint32_t;
+typedef unsigned long long uint64_t;
+#endif
+
+#include "InstrProfiling.h"
+#include "InstrProfilingUtil.h"
+
+/* #define DEBUG_GCDAPROFILING */
+
+/*
+ * --- GCOV file format I/O primitives ---
+ */
+
+/*
+ * The current file name we're outputting. Used primarily for error logging.
+ */
+static char *filename = NULL;
+
+/*
+ * The current file we're outputting.
+ */
+static FILE *output_file = NULL;
+
+/*
+ * Buffer that we write things into.
+ */
+#define WRITE_BUFFER_SIZE (128 * 1024)
+static unsigned char *write_buffer = NULL;
+static uint64_t cur_buffer_size = 0;
+static uint64_t cur_pos = 0;
+static uint64_t file_size = 0;
+static int new_file = 0;
+#if defined(_WIN32)
+static HANDLE mmap_handle = NULL;
+#endif
+static int fd = -1;
+
+typedef void (*fn_ptr)();
+
+typedef void* dynamic_object_id;
+// The address of this variable identifies a given dynamic object.
+static dynamic_object_id current_id;
+#define CURRENT_ID (&current_id)
+
+struct fn_node {
+ dynamic_object_id id;
+ fn_ptr fn;
+ struct fn_node* next;
+};
+
+struct fn_list {
+ struct fn_node *head, *tail;
+};
+
+/*
+ * A list of functions to write out the data, shared between all dynamic objects.
+ */
+struct fn_list writeout_fn_list;
+
+/*
+ * A list of flush functions that our __gcov_flush() function should call, shared between all dynamic objects.
+ */
+struct fn_list flush_fn_list;
+
+static void fn_list_insert(struct fn_list* list, fn_ptr fn) {
+ struct fn_node* new_node = malloc(sizeof(struct fn_node));
+ new_node->fn = fn;
+ new_node->next = NULL;
+ new_node->id = CURRENT_ID;
+
+ if (!list->head) {
+ list->head = list->tail = new_node;
+ } else {
+ list->tail->next = new_node;
+ list->tail = new_node;
+ }
+}
+
+static void fn_list_remove(struct fn_list* list) {
+ struct fn_node* curr = list->head;
+ struct fn_node* prev = NULL;
+ struct fn_node* next = NULL;
+
+ while (curr) {
+ next = curr->next;
+
+ if (curr->id == CURRENT_ID) {
+ if (curr == list->head) {
+ list->head = next;
+ }
+
+ if (curr == list->tail) {
+ list->tail = prev;
+ }
+
+ if (prev) {
+ prev->next = next;
+ }
+
+ free(curr);
+ } else {
+ prev = curr;
+ }
+
+ curr = next;
+ }
+}
+
+static void resize_write_buffer(uint64_t size) {
+ if (!new_file) return;
+ size += cur_pos;
+ if (size <= cur_buffer_size) return;
+ size = (size - 1) / WRITE_BUFFER_SIZE + 1;
+ size *= WRITE_BUFFER_SIZE;
+ write_buffer = realloc(write_buffer, size);
+ cur_buffer_size = size;
+}
+
+static void write_bytes(const char *s, size_t len) {
+ resize_write_buffer(len);
+ memcpy(&write_buffer[cur_pos], s, len);
+ cur_pos += len;
+}
+
+static void write_32bit_value(uint32_t i) {
+ write_bytes((char*)&i, 4);
+}
+
+static void write_64bit_value(uint64_t i) {
+ // GCOV uses a lo-/hi-word format even on big-endian systems.
+ // See also GCOVBuffer::readInt64 in LLVM.
+ uint32_t lo = (uint32_t) i;
+ uint32_t hi = (uint32_t) (i >> 32);
+ write_32bit_value(lo);
+ write_32bit_value(hi);
+}
+
+static uint32_t length_of_string(const char *s) {
+ return (strlen(s) / 4) + 1;
+}
+
+static void write_string(const char *s) {
+ uint32_t len = length_of_string(s);
+ write_32bit_value(len);
+ write_bytes(s, strlen(s));
+ write_bytes("\0\0\0\0", 4 - (strlen(s) % 4));
+}
+
+static uint32_t read_32bit_value() {
+ uint32_t val;
+
+ if (new_file)
+ return (uint32_t)-1;
+
+ val = *(uint32_t*)&write_buffer[cur_pos];
+ cur_pos += 4;
+ return val;
+}
+
+static uint32_t read_le_32bit_value() {
+ uint32_t val = 0;
+ int i;
+
+ if (new_file)
+ return (uint32_t)-1;
+
+ for (i = 0; i < 4; i++)
+ val |= write_buffer[cur_pos++] << (8*i);
+ return val;
+}
+
+static uint64_t read_64bit_value() {
+ // GCOV uses a lo-/hi-word format even on big-endian systems.
+ // See also GCOVBuffer::readInt64 in LLVM.
+ uint32_t lo = read_32bit_value();
+ uint32_t hi = read_32bit_value();
+ return ((uint64_t)hi << 32) | ((uint64_t)lo);
+}
+
+static char *mangle_filename(const char *orig_filename) {
+ char *new_filename;
+ size_t prefix_len;
+ int prefix_strip;
+ const char *prefix = lprofGetPathPrefix(&prefix_strip, &prefix_len);
+
+ if (prefix == NULL)
+ return strdup(orig_filename);
+
+ new_filename = malloc(prefix_len + 1 + strlen(orig_filename) + 1);
+ lprofApplyPathPrefix(new_filename, orig_filename, prefix, prefix_len,
+ prefix_strip);
+
+ return new_filename;
+}
+
+static int map_file() {
+ fseek(output_file, 0L, SEEK_END);
+ file_size = ftell(output_file);
+
+ /* A size of 0 is invalid to `mmap'. Return a fail here, but don't issue an
+ * error message because it should "just work" for the user. */
+ if (file_size == 0)
+ return -1;
+
+#if defined(_WIN32)
+ HANDLE mmap_fd;
+ if (fd == -1)
+ mmap_fd = INVALID_HANDLE_VALUE;
+ else
+ mmap_fd = (HANDLE)_get_osfhandle(fd);
+
+ mmap_handle = CreateFileMapping(mmap_fd, NULL, PAGE_READWRITE, DWORD_HI(file_size), DWORD_LO(file_size), NULL);
+ if (mmap_handle == NULL) {
+ fprintf(stderr, "profiling: %s: cannot create file mapping: %lu\n",
+ filename, GetLastError());
+ return -1;
+ }
+
+ write_buffer = MapViewOfFile(mmap_handle, FILE_MAP_WRITE, 0, 0, file_size);
+ if (write_buffer == NULL) {
+ fprintf(stderr, "profiling: %s: cannot map: %lu\n", filename,
+ GetLastError());
+ CloseHandle(mmap_handle);
+ return -1;
+ }
+#else
+ write_buffer = mmap(0, file_size, PROT_READ | PROT_WRITE,
+ MAP_FILE | MAP_SHARED, fd, 0);
+ if (write_buffer == (void *)-1) {
+ int errnum = errno;
+ fprintf(stderr, "profiling: %s: cannot map: %s\n", filename,
+ strerror(errnum));
+ return -1;
+ }
+#endif
+
+ return 0;
+}
+
+static void unmap_file() {
+#if defined(_WIN32)
+ if (!FlushViewOfFile(write_buffer, file_size)) {
+ fprintf(stderr, "profiling: %s: cannot flush mapped view: %lu\n", filename,
+ GetLastError());
+ }
+
+ if (!UnmapViewOfFile(write_buffer)) {
+ fprintf(stderr, "profiling: %s: cannot unmap mapped view: %lu\n", filename,
+ GetLastError());
+ }
+
+ if (!CloseHandle(mmap_handle)) {
+ fprintf(stderr, "profiling: %s: cannot close file mapping handle: %lu\n",
+ filename, GetLastError());
+ }
+
+ mmap_handle = NULL;
+#else
+ if (msync(write_buffer, file_size, MS_SYNC) == -1) {
+ int errnum = errno;
+ fprintf(stderr, "profiling: %s: cannot msync: %s\n", filename,
+ strerror(errnum));
+ }
+
+ /* We explicitly ignore errors from unmapping because at this point the data
+ * is written and we don't care.
+ */
+ (void)munmap(write_buffer, file_size);
+#endif
+
+ write_buffer = NULL;
+ file_size = 0;
+}
+
+/*
+ * --- LLVM line counter API ---
+ */
+
+/* A file in this case is a translation unit. Each .o file built with line
+ * profiling enabled will emit to a different file. Only one file may be
+ * started at a time.
+ */
+COMPILER_RT_VISIBILITY
+void llvm_gcda_start_file(const char *orig_filename, const char version[4],
+ uint32_t checksum) {
+ const char *mode = "r+b";
+ filename = mangle_filename(orig_filename);
+
+ /* Try just opening the file. */
+ new_file = 0;
+ fd = open(filename, O_RDWR | O_BINARY);
+
+ if (fd == -1) {
+ /* Try opening the file, creating it if necessary. */
+ new_file = 1;
+ mode = "w+b";
+ fd = open(filename, O_RDWR | O_CREAT | O_BINARY, 0644);
+ if (fd == -1) {
+ /* Try creating the directories first then opening the file. */
+ __llvm_profile_recursive_mkdir(filename);
+ fd = open(filename, O_RDWR | O_CREAT | O_BINARY, 0644);
+ if (fd == -1) {
+ /* Bah! It's hopeless. */
+ int errnum = errno;
+ fprintf(stderr, "profiling: %s: cannot open: %s\n", filename,
+ strerror(errnum));
+ return;
+ }
+ }
+ }
+
+ /* Try to flock the file to serialize concurrent processes writing out to the
+ * same GCDA. This can fail if the filesystem doesn't support it, but in that
+ * case we'll just carry on with the old racy behaviour and hope for the best.
+ */
+ lprofLockFd(fd);
+ output_file = fdopen(fd, mode);
+
+ /* Initialize the write buffer. */
+ write_buffer = NULL;
+ cur_buffer_size = 0;
+ cur_pos = 0;
+
+ if (new_file) {
+ resize_write_buffer(WRITE_BUFFER_SIZE);
+ memset(write_buffer, 0, WRITE_BUFFER_SIZE);
+ } else {
+ if (map_file() == -1) {
+ /* mmap failed, try to recover by clobbering */
+ new_file = 1;
+ write_buffer = NULL;
+ cur_buffer_size = 0;
+ resize_write_buffer(WRITE_BUFFER_SIZE);
+ memset(write_buffer, 0, WRITE_BUFFER_SIZE);
+ }
+ }
+
+ /* gcda file, version, stamp checksum. */
+ write_bytes("adcg", 4);
+ write_bytes(version, 4);
+ write_32bit_value(checksum);
+
+#ifdef DEBUG_GCDAPROFILING
+ fprintf(stderr, "llvmgcda: [%s]\n", orig_filename);
+#endif
+}
+
+/* Given an array of pointers to counters (counters), increment the n-th one,
+ * where we're also given a pointer to n (predecessor).
+ */
+COMPILER_RT_VISIBILITY
+void llvm_gcda_increment_indirect_counter(uint32_t *predecessor,
+ uint64_t **counters) {
+ uint64_t *counter;
+ uint32_t pred;
+
+ pred = *predecessor;
+ if (pred == 0xffffffff)
+ return;
+ counter = counters[pred];
+
+ /* Don't crash if the pred# is out of sync. This can happen due to threads,
+ or because of a TODO in GCOVProfiling.cpp buildEdgeLookupTable(). */
+ if (counter)
+ ++*counter;
+#ifdef DEBUG_GCDAPROFILING
+ else
+ fprintf(stderr,
+ "llvmgcda: increment_indirect_counter counters=%08llx, pred=%u\n",
+ *counter, *predecessor);
+#endif
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_gcda_emit_function(uint32_t ident, const char *function_name,
+ uint32_t func_checksum, uint8_t use_extra_checksum,
+ uint32_t cfg_checksum) {
+ uint32_t len = 2;
+
+ if (use_extra_checksum)
+ len++;
+#ifdef DEBUG_GCDAPROFILING
+ fprintf(stderr, "llvmgcda: function id=0x%08x name=%s\n", ident,
+ function_name ? function_name : "NULL");
+#endif
+ if (!output_file) return;
+
+ /* function tag */
+ write_bytes("\0\0\0\1", 4);
+ if (function_name)
+ len += 1 + length_of_string(function_name);
+ write_32bit_value(len);
+ write_32bit_value(ident);
+ write_32bit_value(func_checksum);
+ if (use_extra_checksum)
+ write_32bit_value(cfg_checksum);
+ if (function_name)
+ write_string(function_name);
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_gcda_emit_arcs(uint32_t num_counters, uint64_t *counters) {
+ uint32_t i;
+ uint64_t *old_ctrs = NULL;
+ uint32_t val = 0;
+ uint64_t save_cur_pos = cur_pos;
+
+ if (!output_file) return;
+
+ val = read_le_32bit_value();
+
+ if (val != (uint32_t)-1) {
+ /* There are counters present in the file. Merge them. */
+ if (val != 0x01a10000) {
+ fprintf(stderr, "profiling: %s: cannot merge previous GCDA file: "
+ "corrupt arc tag (0x%08x)\n",
+ filename, val);
+ return;
+ }
+
+ val = read_32bit_value();
+ if (val == (uint32_t)-1 || val / 2 != num_counters) {
+ fprintf(stderr, "profiling: %s: cannot merge previous GCDA file: "
+ "mismatched number of counters (%d)\n",
+ filename, val);
+ return;
+ }
+
+ old_ctrs = malloc(sizeof(uint64_t) * num_counters);
+ for (i = 0; i < num_counters; ++i)
+ old_ctrs[i] = read_64bit_value();
+ }
+
+ cur_pos = save_cur_pos;
+
+ /* Counter #1 (arcs) tag */
+ write_bytes("\0\0\xa1\1", 4);
+ write_32bit_value(num_counters * 2);
+ for (i = 0; i < num_counters; ++i) {
+ counters[i] += (old_ctrs ? old_ctrs[i] : 0);
+ write_64bit_value(counters[i]);
+ }
+
+ free(old_ctrs);
+
+#ifdef DEBUG_GCDAPROFILING
+ fprintf(stderr, "llvmgcda: %u arcs\n", num_counters);
+ for (i = 0; i < num_counters; ++i)
+ fprintf(stderr, "llvmgcda: %llu\n", (unsigned long long)counters[i]);
+#endif
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_gcda_summary_info() {
+ const uint32_t obj_summary_len = 9; /* Length for gcov compatibility. */
+ uint32_t i;
+ uint32_t runs = 1;
+ static uint32_t run_counted = 0; // We only want to increase the run count once.
+ uint32_t val = 0;
+ uint64_t save_cur_pos = cur_pos;
+
+ if (!output_file) return;
+
+ val = read_le_32bit_value();
+
+ if (val != (uint32_t)-1) {
+ /* There are counters present in the file. Merge them. */
+ if (val != 0xa1000000) {
+ fprintf(stderr, "profiling: %s: cannot merge previous run count: "
+ "corrupt object tag (0x%08x)\n",
+ filename, val);
+ return;
+ }
+
+ val = read_32bit_value(); /* length */
+ if (val != obj_summary_len) {
+ fprintf(stderr, "profiling: %s: cannot merge previous run count: "
+ "mismatched object length (%d)\n",
+ filename, val);
+ return;
+ }
+
+ read_32bit_value(); /* checksum, unused */
+ read_32bit_value(); /* num, unused */
+ uint32_t prev_runs = read_32bit_value();
+ /* Add previous run count to new counter, if not already counted before. */
+ runs = run_counted ? prev_runs : prev_runs + 1;
+ }
+
+ cur_pos = save_cur_pos;
+
+ /* Object summary tag */
+ write_bytes("\0\0\0\xa1", 4);
+ write_32bit_value(obj_summary_len);
+ write_32bit_value(0); /* checksum, unused */
+ write_32bit_value(0); /* num, unused */
+ write_32bit_value(runs);
+ for (i = 3; i < obj_summary_len; ++i)
+ write_32bit_value(0);
+
+ /* Program summary tag */
+ write_bytes("\0\0\0\xa3", 4); /* tag indicates 1 program */
+ write_32bit_value(0); /* 0 length */
+
+ run_counted = 1;
+
+#ifdef DEBUG_GCDAPROFILING
+ fprintf(stderr, "llvmgcda: %u runs\n", runs);
+#endif
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_gcda_end_file() {
+ /* Write out EOF record. */
+ if (output_file) {
+ write_bytes("\0\0\0\0\0\0\0\0", 8);
+
+ if (new_file) {
+ fwrite(write_buffer, cur_pos, 1, output_file);
+ free(write_buffer);
+ } else {
+ unmap_file();
+ }
+
+ fflush(output_file);
+ lprofUnlockFd(fd);
+ fclose(output_file);
+ output_file = NULL;
+ write_buffer = NULL;
+ }
+ free(filename);
+
+#ifdef DEBUG_GCDAPROFILING
+ fprintf(stderr, "llvmgcda: -----\n");
+#endif
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_register_writeout_function(fn_ptr fn) {
+ fn_list_insert(&writeout_fn_list, fn);
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_writeout_files(void) {
+ struct fn_node *curr = writeout_fn_list.head;
+
+ while (curr) {
+ if (curr->id == CURRENT_ID) {
+ curr->fn();
+ }
+ curr = curr->next;
+ }
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_delete_writeout_function_list(void) {
+ fn_list_remove(&writeout_fn_list);
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_register_flush_function(fn_ptr fn) {
+ fn_list_insert(&flush_fn_list, fn);
+}
+
+void __gcov_flush() {
+ struct fn_node* curr = flush_fn_list.head;
+
+ while (curr) {
+ curr->fn();
+ curr = curr->next;
+ }
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_delete_flush_function_list(void) {
+ fn_list_remove(&flush_fn_list);
+}
+
+COMPILER_RT_VISIBILITY
+void llvm_gcov_init(fn_ptr wfn, fn_ptr ffn) {
+ static int atexit_ran = 0;
+
+ if (wfn)
+ llvm_register_writeout_function(wfn);
+
+ if (ffn)
+ llvm_register_flush_function(ffn);
+
+ if (atexit_ran == 0) {
+ atexit_ran = 1;
+
+ /* Make sure we write out the data and delete the data structures. */
+ atexit(llvm_delete_flush_function_list);
+ atexit(llvm_delete_writeout_function_list);
+ atexit(llvm_writeout_files);
+ }
+}
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfData.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfData.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfData.inc (revision 351984)
@@ -0,0 +1,752 @@
+/*===-- InstrProfData.inc - instr profiling runtime structures -*- C++ -*-=== *\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+/*
+ * This is the master file that defines all the data structure, signature,
+ * constant literals that are shared across profiling runtime library,
+ * compiler (instrumentation), and host tools (reader/writer). The entities
+ * defined in this file affect the profile runtime ABI, the raw profile format,
+ * or both.
+ *
+ * The file has two identical copies. The master copy lives in LLVM and
+ * the other one sits in compiler-rt/lib/profile directory. To make changes
+ * in this file, first modify the master copy and copy it over to compiler-rt.
+ * Testing of any change in this file can start only after the two copies are
+ * synced up.
+ *
+ * The first part of the file includes macros that defines types, names, and
+ * initializers for the member fields of the core data structures. The field
+ * declarations for one structure is enabled by defining the field activation
+ * macro associated with that structure. Only one field activation record
+ * can be defined at one time and the rest definitions will be filtered out by
+ * the preprocessor.
+ *
+ * Examples of how the template is used to instantiate structure definition:
+ * 1. To declare a structure:
+ *
+ * struct ProfData {
+ * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
+ * Type Name;
+ * #include "llvm/ProfileData/InstrProfData.inc"
+ * };
+ *
+ * 2. To construct LLVM type arrays for the struct type:
+ *
+ * Type *DataTypes[] = {
+ * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
+ * LLVMType,
+ * #include "llvm/ProfileData/InstrProfData.inc"
+ * };
+ *
+ * 4. To construct constant array for the initializers:
+ * #define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) \
+ * Initializer,
+ * Constant *ConstantVals[] = {
+ * #include "llvm/ProfileData/InstrProfData.inc"
+ * };
+ *
+ *
+ * The second part of the file includes definitions all other entities that
+ * are related to runtime ABI and format. When no field activation macro is
+ * defined, this file can be included to introduce the definitions.
+ *
+\*===----------------------------------------------------------------------===*/
+
+/* Functions marked with INSTR_PROF_VISIBILITY must have hidden visibility in
+ * the compiler runtime. */
+#ifndef INSTR_PROF_VISIBILITY
+#define INSTR_PROF_VISIBILITY
+#endif
+
+/* INSTR_PROF_DATA start. */
+/* Definition of member fields of the per-function control structure. */
+#ifndef INSTR_PROF_DATA
+#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
+ ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
+ IndexedInstrProf::ComputeHash(getPGOFuncNameVarInitializer(Inc->getName()))))
+INSTR_PROF_DATA(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
+ ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
+ Inc->getHash()->getZExtValue()))
+INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt64PtrTy(Ctx), CounterPtr, \
+ ConstantExpr::getBitCast(CounterPtr, \
+ llvm::Type::getInt64PtrTy(Ctx)))
+/* This is used to map function pointers for the indirect call targets to
+ * function name hashes during the conversion from raw to merged profile
+ * data.
+ */
+INSTR_PROF_DATA(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), FunctionPointer, \
+ FunctionAddr)
+INSTR_PROF_DATA(IntPtrT, llvm::Type::getInt8PtrTy(Ctx), Values, \
+ ValuesPtrExpr)
+INSTR_PROF_DATA(const uint32_t, llvm::Type::getInt32Ty(Ctx), NumCounters, \
+ ConstantInt::get(llvm::Type::getInt32Ty(Ctx), NumCounters))
+INSTR_PROF_DATA(const uint16_t, Int16ArrayTy, NumValueSites[IPVK_Last+1], \
+ ConstantArray::get(Int16ArrayTy, Int16ArrayVals))
+#undef INSTR_PROF_DATA
+/* INSTR_PROF_DATA end. */
+
+
+/* This is an internal data structure used by value profiler. It
+ * is defined here to allow serialization code sharing by LLVM
+ * to be used in unit test.
+ *
+ * typedef struct ValueProfNode {
+ * // InstrProfValueData VData;
+ * uint64_t Value;
+ * uint64_t Count;
+ * struct ValueProfNode *Next;
+ * } ValueProfNode;
+ */
+/* INSTR_PROF_VALUE_NODE start. */
+#ifndef INSTR_PROF_VALUE_NODE
+#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Value, \
+ ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
+INSTR_PROF_VALUE_NODE(uint64_t, llvm::Type::getInt64Ty(Ctx), Count, \
+ ConstantInt::get(llvm::Type::GetInt64Ty(Ctx), 0))
+INSTR_PROF_VALUE_NODE(PtrToNodeT, llvm::Type::getInt8PtrTy(Ctx), Next, \
+ ConstantInt::get(llvm::Type::GetInt8PtrTy(Ctx), 0))
+#undef INSTR_PROF_VALUE_NODE
+/* INSTR_PROF_VALUE_NODE end. */
+
+/* INSTR_PROF_RAW_HEADER start */
+/* Definition of member fields of the raw profile header data structure. */
+#ifndef INSTR_PROF_RAW_HEADER
+#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+INSTR_PROF_RAW_HEADER(uint64_t, Magic, __llvm_profile_get_magic())
+INSTR_PROF_RAW_HEADER(uint64_t, Version, __llvm_profile_get_version())
+INSTR_PROF_RAW_HEADER(uint64_t, DataSize, DataSize)
+INSTR_PROF_RAW_HEADER(uint64_t, CountersSize, CountersSize)
+INSTR_PROF_RAW_HEADER(uint64_t, NamesSize, NamesSize)
+INSTR_PROF_RAW_HEADER(uint64_t, CountersDelta, (uintptr_t)CountersBegin)
+INSTR_PROF_RAW_HEADER(uint64_t, NamesDelta, (uintptr_t)NamesBegin)
+INSTR_PROF_RAW_HEADER(uint64_t, ValueKindLast, IPVK_Last)
+#undef INSTR_PROF_RAW_HEADER
+/* INSTR_PROF_RAW_HEADER end */
+
+/* VALUE_PROF_FUNC_PARAM start */
+/* Definition of parameter types of the runtime API used to do value profiling
+ * for a given value site.
+ */
+#ifndef VALUE_PROF_FUNC_PARAM
+#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType)
+#define INSTR_PROF_COMMA
+#else
+#define INSTR_PROF_DATA_DEFINED
+#define INSTR_PROF_COMMA ,
+#endif
+VALUE_PROF_FUNC_PARAM(uint64_t, TargetValue, Type::getInt64Ty(Ctx)) \
+ INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(void *, Data, Type::getInt8PtrTy(Ctx)) INSTR_PROF_COMMA
+#ifndef VALUE_RANGE_PROF
+VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx))
+#else /* VALUE_RANGE_PROF */
+VALUE_PROF_FUNC_PARAM(uint32_t, CounterIndex, Type::getInt32Ty(Ctx)) \
+ INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeStart, Type::getInt64Ty(Ctx)) \
+ INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint64_t, PreciseRangeLast, Type::getInt64Ty(Ctx)) \
+ INSTR_PROF_COMMA
+VALUE_PROF_FUNC_PARAM(uint64_t, LargeValue, Type::getInt64Ty(Ctx))
+#endif /*VALUE_RANGE_PROF */
+#undef VALUE_PROF_FUNC_PARAM
+#undef INSTR_PROF_COMMA
+/* VALUE_PROF_FUNC_PARAM end */
+
+/* VALUE_PROF_KIND start */
+#ifndef VALUE_PROF_KIND
+#define VALUE_PROF_KIND(Enumerator, Value, Descr)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+/* For indirect function call value profiling, the addresses of the target
+ * functions are profiled by the instrumented code. The target addresses are
+ * written in the raw profile data and converted to target function name's MD5
+ * hash by the profile reader during deserialization. Typically, this happens
+ * when the raw profile data is read during profile merging.
+ *
+ * For this remapping the ProfData is used. ProfData contains both the function
+ * name hash and the function address.
+ */
+VALUE_PROF_KIND(IPVK_IndirectCallTarget, 0, "indirect call target")
+/* For memory intrinsic functions size profiling. */
+VALUE_PROF_KIND(IPVK_MemOPSize, 1, "memory intrinsic functions size")
+/* These two kinds must be the last to be
+ * declared. This is to make sure the string
+ * array created with the template can be
+ * indexed with the kind value.
+ */
+VALUE_PROF_KIND(IPVK_First, IPVK_IndirectCallTarget, "first")
+VALUE_PROF_KIND(IPVK_Last, IPVK_MemOPSize, "last")
+
+#undef VALUE_PROF_KIND
+/* VALUE_PROF_KIND end */
+
+/* COVMAP_FUNC_RECORD start */
+/* Definition of member fields of the function record structure in coverage
+ * map.
+ */
+#ifndef COVMAP_FUNC_RECORD
+#define COVMAP_FUNC_RECORD(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+#ifdef COVMAP_V1
+COVMAP_FUNC_RECORD(const IntPtrT, llvm::Type::getInt8PtrTy(Ctx), \
+ NamePtr, llvm::ConstantExpr::getBitCast(NamePtr, \
+ llvm::Type::getInt8PtrTy(Ctx)))
+COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), NameSize, \
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx), \
+ NameValue.size()))
+#else
+COVMAP_FUNC_RECORD(const int64_t, llvm::Type::getInt64Ty(Ctx), NameRef, \
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), \
+ llvm::IndexedInstrProf::ComputeHash(NameValue)))
+#endif
+COVMAP_FUNC_RECORD(const uint32_t, llvm::Type::getInt32Ty(Ctx), DataSize, \
+ llvm::ConstantInt::get(llvm::Type::getInt32Ty(Ctx),\
+ CoverageMapping.size()))
+COVMAP_FUNC_RECORD(const uint64_t, llvm::Type::getInt64Ty(Ctx), FuncHash, \
+ llvm::ConstantInt::get(llvm::Type::getInt64Ty(Ctx), FuncHash))
+#undef COVMAP_FUNC_RECORD
+/* COVMAP_FUNC_RECORD end. */
+
+/* COVMAP_HEADER start */
+/* Definition of member fields of coverage map header.
+ */
+#ifndef COVMAP_HEADER
+#define COVMAP_HEADER(Type, LLVMType, Name, Initializer)
+#else
+#define INSTR_PROF_DATA_DEFINED
+#endif
+COVMAP_HEADER(uint32_t, Int32Ty, NRecords, \
+ llvm::ConstantInt::get(Int32Ty, FunctionRecords.size()))
+COVMAP_HEADER(uint32_t, Int32Ty, FilenamesSize, \
+ llvm::ConstantInt::get(Int32Ty, FilenamesSize))
+COVMAP_HEADER(uint32_t, Int32Ty, CoverageSize, \
+ llvm::ConstantInt::get(Int32Ty, CoverageMappingSize))
+COVMAP_HEADER(uint32_t, Int32Ty, Version, \
+ llvm::ConstantInt::get(Int32Ty, CovMapVersion::CurrentVersion))
+#undef COVMAP_HEADER
+/* COVMAP_HEADER end. */
+
+
+#ifdef INSTR_PROF_SECT_ENTRY
+#define INSTR_PROF_DATA_DEFINED
+INSTR_PROF_SECT_ENTRY(IPSK_data, \
+ INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON), \
+ INSTR_PROF_DATA_COFF, "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_cnts, \
+ INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON), \
+ INSTR_PROF_CNTS_COFF, "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_name, \
+ INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON), \
+ INSTR_PROF_NAME_COFF, "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_vals, \
+ INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON), \
+ INSTR_PROF_VALS_COFF, "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_vnodes, \
+ INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON), \
+ INSTR_PROF_VNODES_COFF, "__DATA,")
+INSTR_PROF_SECT_ENTRY(IPSK_covmap, \
+ INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON), \
+ INSTR_PROF_COVMAP_COFF, "__LLVM_COV,")
+INSTR_PROF_SECT_ENTRY(IPSK_orderfile, \
+ INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON), \
+ INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COFF), "__DATA,")
+
+#undef INSTR_PROF_SECT_ENTRY
+#endif
+
+
+#ifdef INSTR_PROF_VALUE_PROF_DATA
+#define INSTR_PROF_DATA_DEFINED
+
+#define INSTR_PROF_MAX_NUM_VAL_PER_SITE 255
+/*!
+ * This is the header of the data structure that defines the on-disk
+ * layout of the value profile data of a particular kind for one function.
+ */
+typedef struct ValueProfRecord {
+ /* The kind of the value profile record. */
+ uint32_t Kind;
+ /*
+ * The number of value profile sites. It is guaranteed to be non-zero;
+ * otherwise the record for this kind won't be emitted.
+ */
+ uint32_t NumValueSites;
+ /*
+ * The first element of the array that stores the number of profiled
+ * values for each value site. The size of the array is NumValueSites.
+ * Since NumValueSites is greater than zero, there is at least one
+ * element in the array.
+ */
+ uint8_t SiteCountArray[1];
+
+ /*
+ * The fake declaration is for documentation purpose only.
+ * Align the start of next field to be on 8 byte boundaries.
+ uint8_t Padding[X];
+ */
+
+ /* The array of value profile data. The size of the array is the sum
+ * of all elements in SiteCountArray[].
+ InstrProfValueData ValueData[];
+ */
+
+#ifdef __cplusplus
+ /*!
+ * Return the number of value sites.
+ */
+ uint32_t getNumValueSites() const { return NumValueSites; }
+ /*!
+ * Read data from this record and save it to Record.
+ */
+ void deserializeTo(InstrProfRecord &Record,
+ InstrProfSymtab *SymTab);
+ /*
+ * In-place byte swap:
+ * Do byte swap for this instance. \c Old is the original order before
+ * the swap, and \c New is the New byte order.
+ */
+ void swapBytes(support::endianness Old, support::endianness New);
+#endif
+} ValueProfRecord;
+
+/*!
+ * Per-function header/control data structure for value profiling
+ * data in indexed format.
+ */
+typedef struct ValueProfData {
+ /*
+ * Total size in bytes including this field. It must be a multiple
+ * of sizeof(uint64_t).
+ */
+ uint32_t TotalSize;
+ /*
+ *The number of value profile kinds that has value profile data.
+ * In this implementation, a value profile kind is considered to
+ * have profile data if the number of value profile sites for the
+ * kind is not zero. More aggressively, the implementation can
+ * choose to check the actual data value: if none of the value sites
+ * has any profiled values, the kind can be skipped.
+ */
+ uint32_t NumValueKinds;
+
+ /*
+ * Following are a sequence of variable length records. The prefix/header
+ * of each record is defined by ValueProfRecord type. The number of
+ * records is NumValueKinds.
+ * ValueProfRecord Record_1;
+ * ValueProfRecord Record_N;
+ */
+
+#if __cplusplus
+ /*!
+ * Return the total size in bytes of the on-disk value profile data
+ * given the data stored in Record.
+ */
+ static uint32_t getSize(const InstrProfRecord &Record);
+ /*!
+ * Return a pointer to \c ValueProfData instance ready to be streamed.
+ */
+ static std::unique_ptr<ValueProfData>
+ serializeFrom(const InstrProfRecord &Record);
+ /*!
+ * Check the integrity of the record.
+ */
+ Error checkIntegrity();
+ /*!
+ * Return a pointer to \c ValueProfileData instance ready to be read.
+ * All data in the instance are properly byte swapped. The input
+ * data is assumed to be in little endian order.
+ */
+ static Expected<std::unique_ptr<ValueProfData>>
+ getValueProfData(const unsigned char *SrcBuffer,
+ const unsigned char *const SrcBufferEnd,
+ support::endianness SrcDataEndianness);
+ /*!
+ * Swap byte order from \c Endianness order to host byte order.
+ */
+ void swapBytesToHost(support::endianness Endianness);
+ /*!
+ * Swap byte order from host byte order to \c Endianness order.
+ */
+ void swapBytesFromHost(support::endianness Endianness);
+ /*!
+ * Return the total size of \c ValueProfileData.
+ */
+ uint32_t getSize() const { return TotalSize; }
+ /*!
+ * Read data from this data and save it to \c Record.
+ */
+ void deserializeTo(InstrProfRecord &Record,
+ InstrProfSymtab *SymTab);
+ void operator delete(void *ptr) { ::operator delete(ptr); }
+#endif
+} ValueProfData;
+
+/*
+ * The closure is designed to abstact away two types of value profile data:
+ * - InstrProfRecord which is the primary data structure used to
+ * represent profile data in host tools (reader, writer, and profile-use)
+ * - value profile runtime data structure suitable to be used by C
+ * runtime library.
+ *
+ * Both sources of data need to serialize to disk/memory-buffer in common
+ * format: ValueProfData. The abstraction allows compiler-rt's raw profiler
+ * writer to share the same format and code with indexed profile writer.
+ *
+ * For documentation of the member methods below, refer to corresponding methods
+ * in class InstrProfRecord.
+ */
+typedef struct ValueProfRecordClosure {
+ const void *Record;
+ uint32_t (*GetNumValueKinds)(const void *Record);
+ uint32_t (*GetNumValueSites)(const void *Record, uint32_t VKind);
+ uint32_t (*GetNumValueData)(const void *Record, uint32_t VKind);
+ uint32_t (*GetNumValueDataForSite)(const void *R, uint32_t VK, uint32_t S);
+
+ /*
+ * After extracting the value profile data from the value profile record,
+ * this method is used to map the in-memory value to on-disk value. If
+ * the method is null, value will be written out untranslated.
+ */
+ uint64_t (*RemapValueData)(uint32_t, uint64_t Value);
+ void (*GetValueForSite)(const void *R, InstrProfValueData *Dst, uint32_t K,
+ uint32_t S);
+ ValueProfData *(*AllocValueProfData)(size_t TotalSizeInBytes);
+} ValueProfRecordClosure;
+
+INSTR_PROF_VISIBILITY ValueProfRecord *
+getFirstValueProfRecord(ValueProfData *VPD);
+INSTR_PROF_VISIBILITY ValueProfRecord *
+getValueProfRecordNext(ValueProfRecord *VPR);
+INSTR_PROF_VISIBILITY InstrProfValueData *
+getValueProfRecordValueData(ValueProfRecord *VPR);
+INSTR_PROF_VISIBILITY uint32_t
+getValueProfRecordHeaderSize(uint32_t NumValueSites);
+
+#undef INSTR_PROF_VALUE_PROF_DATA
+#endif /* INSTR_PROF_VALUE_PROF_DATA */
+
+
+#ifdef INSTR_PROF_COMMON_API_IMPL
+#define INSTR_PROF_DATA_DEFINED
+#ifdef __cplusplus
+#define INSTR_PROF_INLINE inline
+#define INSTR_PROF_NULLPTR nullptr
+#else
+#define INSTR_PROF_INLINE
+#define INSTR_PROF_NULLPTR NULL
+#endif
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+/*!
+ * Return the \c ValueProfRecord header size including the
+ * padding bytes.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+uint32_t getValueProfRecordHeaderSize(uint32_t NumValueSites) {
+ uint32_t Size = offsetof(ValueProfRecord, SiteCountArray) +
+ sizeof(uint8_t) * NumValueSites;
+ /* Round the size to multiple of 8 bytes. */
+ Size = (Size + 7) & ~7;
+ return Size;
+}
+
+/*!
+ * Return the total size of the value profile record including the
+ * header and the value data.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+uint32_t getValueProfRecordSize(uint32_t NumValueSites,
+ uint32_t NumValueData) {
+ return getValueProfRecordHeaderSize(NumValueSites) +
+ sizeof(InstrProfValueData) * NumValueData;
+}
+
+/*!
+ * Return the pointer to the start of value data array.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+InstrProfValueData *getValueProfRecordValueData(ValueProfRecord *This) {
+ return (InstrProfValueData *)((char *)This + getValueProfRecordHeaderSize(
+ This->NumValueSites));
+}
+
+/*!
+ * Return the total number of value data for \c This record.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+uint32_t getValueProfRecordNumValueData(ValueProfRecord *This) {
+ uint32_t NumValueData = 0;
+ uint32_t I;
+ for (I = 0; I < This->NumValueSites; I++)
+ NumValueData += This->SiteCountArray[I];
+ return NumValueData;
+}
+
+/*!
+ * Use this method to advance to the next \c This \c ValueProfRecord.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+ValueProfRecord *getValueProfRecordNext(ValueProfRecord *This) {
+ uint32_t NumValueData = getValueProfRecordNumValueData(This);
+ return (ValueProfRecord *)((char *)This +
+ getValueProfRecordSize(This->NumValueSites,
+ NumValueData));
+}
+
+/*!
+ * Return the first \c ValueProfRecord instance.
+ */
+INSTR_PROF_VISIBILITY INSTR_PROF_INLINE
+ValueProfRecord *getFirstValueProfRecord(ValueProfData *This) {
+ return (ValueProfRecord *)((char *)This + sizeof(ValueProfData));
+}
+
+/* Closure based interfaces. */
+
+/*!
+ * Return the total size in bytes of the on-disk value profile data
+ * given the data stored in Record.
+ */
+INSTR_PROF_VISIBILITY uint32_t
+getValueProfDataSize(ValueProfRecordClosure *Closure) {
+ uint32_t Kind;
+ uint32_t TotalSize = sizeof(ValueProfData);
+ const void *Record = Closure->Record;
+
+ for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
+ uint32_t NumValueSites = Closure->GetNumValueSites(Record, Kind);
+ if (!NumValueSites)
+ continue;
+ TotalSize += getValueProfRecordSize(NumValueSites,
+ Closure->GetNumValueData(Record, Kind));
+ }
+ return TotalSize;
+}
+
+/*!
+ * Extract value profile data of a function for the profile kind \c ValueKind
+ * from the \c Closure and serialize the data into \c This record instance.
+ */
+INSTR_PROF_VISIBILITY void
+serializeValueProfRecordFrom(ValueProfRecord *This,
+ ValueProfRecordClosure *Closure,
+ uint32_t ValueKind, uint32_t NumValueSites) {
+ uint32_t S;
+ const void *Record = Closure->Record;
+ This->Kind = ValueKind;
+ This->NumValueSites = NumValueSites;
+ InstrProfValueData *DstVD = getValueProfRecordValueData(This);
+
+ for (S = 0; S < NumValueSites; S++) {
+ uint32_t ND = Closure->GetNumValueDataForSite(Record, ValueKind, S);
+ This->SiteCountArray[S] = ND;
+ Closure->GetValueForSite(Record, DstVD, ValueKind, S);
+ DstVD += ND;
+ }
+}
+
+/*!
+ * Extract value profile data of a function from the \c Closure
+ * and serialize the data into \c DstData if it is not NULL or heap
+ * memory allocated by the \c Closure's allocator method. If \c
+ * DstData is not null, the caller is expected to set the TotalSize
+ * in DstData.
+ */
+INSTR_PROF_VISIBILITY ValueProfData *
+serializeValueProfDataFrom(ValueProfRecordClosure *Closure,
+ ValueProfData *DstData) {
+ uint32_t Kind;
+ uint32_t TotalSize =
+ DstData ? DstData->TotalSize : getValueProfDataSize(Closure);
+
+ ValueProfData *VPD =
+ DstData ? DstData : Closure->AllocValueProfData(TotalSize);
+
+ VPD->TotalSize = TotalSize;
+ VPD->NumValueKinds = Closure->GetNumValueKinds(Closure->Record);
+ ValueProfRecord *VR = getFirstValueProfRecord(VPD);
+ for (Kind = IPVK_First; Kind <= IPVK_Last; Kind++) {
+ uint32_t NumValueSites = Closure->GetNumValueSites(Closure->Record, Kind);
+ if (!NumValueSites)
+ continue;
+ serializeValueProfRecordFrom(VR, Closure, Kind, NumValueSites);
+ VR = getValueProfRecordNext(VR);
+ }
+ return VPD;
+}
+
+#undef INSTR_PROF_COMMON_API_IMPL
+#endif /* INSTR_PROF_COMMON_API_IMPL */
+
+/*============================================================================*/
+
+#ifndef INSTR_PROF_DATA_DEFINED
+
+#ifndef INSTR_PROF_DATA_INC
+#define INSTR_PROF_DATA_INC
+
+/* Helper macros. */
+#define INSTR_PROF_SIMPLE_QUOTE(x) #x
+#define INSTR_PROF_QUOTE(x) INSTR_PROF_SIMPLE_QUOTE(x)
+#define INSTR_PROF_SIMPLE_CONCAT(x,y) x ## y
+#define INSTR_PROF_CONCAT(x,y) INSTR_PROF_SIMPLE_CONCAT(x,y)
+
+/* Magic number to detect file format and endianness.
+ * Use 255 at one end, since no UTF-8 file can use that character. Avoid 0,
+ * so that utilities, like strings, don't grab it as a string. 129 is also
+ * invalid UTF-8, and high enough to be interesting.
+ * Use "lprofr" in the centre to stand for "LLVM Profile Raw", or "lprofR"
+ * for 32-bit platforms.
+ */
+#define INSTR_PROF_RAW_MAGIC_64 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
+ (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \
+ (uint64_t)'f' << 16 | (uint64_t)'r' << 8 | (uint64_t)129
+#define INSTR_PROF_RAW_MAGIC_32 (uint64_t)255 << 56 | (uint64_t)'l' << 48 | \
+ (uint64_t)'p' << 40 | (uint64_t)'r' << 32 | (uint64_t)'o' << 24 | \
+ (uint64_t)'f' << 16 | (uint64_t)'R' << 8 | (uint64_t)129
+
+/* Raw profile format version (start from 1). */
+#define INSTR_PROF_RAW_VERSION 4
+/* Indexed profile format version (start from 1). */
+#define INSTR_PROF_INDEX_VERSION 5
+/* Coverage mapping format vresion (start from 0). */
+#define INSTR_PROF_COVMAP_VERSION 2
+
+/* Profile version is always of type uint64_t. Reserve the upper 8 bits in the
+ * version for other variants of profile. We set the lowest bit of the upper 8
+ * bits (i.e. bit 56) to 1 to indicate if this is an IR-level instrumentaiton
+ * generated profile, and 0 if this is a Clang FE generated profile.
+ * 1 in bit 57 indicates there are context-sensitive records in the profile.
+ */
+#define VARIANT_MASKS_ALL 0xff00000000000000ULL
+#define GET_VERSION(V) ((V) & ~VARIANT_MASKS_ALL)
+#define VARIANT_MASK_IR_PROF (0x1ULL << 56)
+#define VARIANT_MASK_CSIR_PROF (0x1ULL << 57)
+#define INSTR_PROF_RAW_VERSION_VAR __llvm_profile_raw_version
+#define INSTR_PROF_PROFILE_RUNTIME_VAR __llvm_profile_runtime
+
+/* The variable that holds the name of the profile data
+ * specified via command line. */
+#define INSTR_PROF_PROFILE_NAME_VAR __llvm_profile_filename
+
+/* section name strings common to all targets other
+ than WIN32 */
+#define INSTR_PROF_DATA_COMMON __llvm_prf_data
+#define INSTR_PROF_NAME_COMMON __llvm_prf_names
+#define INSTR_PROF_CNTS_COMMON __llvm_prf_cnts
+#define INSTR_PROF_VALS_COMMON __llvm_prf_vals
+#define INSTR_PROF_VNODES_COMMON __llvm_prf_vnds
+#define INSTR_PROF_COVMAP_COMMON __llvm_covmap
+#define INSTR_PROF_ORDERFILE_COMMON __llvm_orderfile
+/* Windows section names. Because these section names contain dollar characters,
+ * they must be quoted.
+ */
+#define INSTR_PROF_DATA_COFF ".lprfd$M"
+#define INSTR_PROF_NAME_COFF ".lprfn$M"
+#define INSTR_PROF_CNTS_COFF ".lprfc$M"
+#define INSTR_PROF_VALS_COFF ".lprfv$M"
+#define INSTR_PROF_VNODES_COFF ".lprfnd$M"
+#define INSTR_PROF_COVMAP_COFF ".lcovmap$M"
+#define INSTR_PROF_ORDERFILE_COFF ".lorderfile$M"
+
+#ifdef _WIN32
+/* Runtime section names and name strings. */
+#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_DATA_COFF
+#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_NAME_COFF
+#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_CNTS_COFF
+/* Array of pointers. Each pointer points to a list
+ * of value nodes associated with one value site.
+ */
+#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_VALS_COFF
+/* Value profile nodes section. */
+#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_VNODES_COFF
+#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_COVMAP_COFF
+#define INSTR_PROF_ORDERFILE_SECT_NAME INSTR_PROF_ORDERFILE_COFF
+#else
+/* Runtime section names and name strings. */
+#define INSTR_PROF_DATA_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_DATA_COMMON)
+#define INSTR_PROF_NAME_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_NAME_COMMON)
+#define INSTR_PROF_CNTS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_CNTS_COMMON)
+/* Array of pointers. Each pointer points to a list
+ * of value nodes associated with one value site.
+ */
+#define INSTR_PROF_VALS_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VALS_COMMON)
+/* Value profile nodes section. */
+#define INSTR_PROF_VNODES_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_VNODES_COMMON)
+#define INSTR_PROF_COVMAP_SECT_NAME INSTR_PROF_QUOTE(INSTR_PROF_COVMAP_COMMON)
+/* Order file instrumentation. */
+#define INSTR_PROF_ORDERFILE_SECT_NAME \
+ INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_COMMON)
+#endif
+
+#define INSTR_PROF_ORDERFILE_BUFFER_NAME _llvm_order_file_buffer
+#define INSTR_PROF_ORDERFILE_BUFFER_NAME_STR \
+ INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_NAME)
+#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME _llvm_order_file_buffer_idx
+#define INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME_STR \
+ INSTR_PROF_QUOTE(INSTR_PROF_ORDERFILE_BUFFER_IDX_NAME)
+
+/* Macros to define start/stop section symbol for a given
+ * section on Linux. For instance
+ * INSTR_PROF_SECT_START(INSTR_PROF_DATA_SECT_NAME) will
+ * expand to __start___llvm_prof_data
+ */
+#define INSTR_PROF_SECT_START(Sect) \
+ INSTR_PROF_CONCAT(__start_,Sect)
+#define INSTR_PROF_SECT_STOP(Sect) \
+ INSTR_PROF_CONCAT(__stop_,Sect)
+
+/* Value Profiling API linkage name. */
+#define INSTR_PROF_VALUE_PROF_FUNC __llvm_profile_instrument_target
+#define INSTR_PROF_VALUE_PROF_FUNC_STR \
+ INSTR_PROF_QUOTE(INSTR_PROF_VALUE_PROF_FUNC)
+#define INSTR_PROF_VALUE_RANGE_PROF_FUNC __llvm_profile_instrument_range
+#define INSTR_PROF_VALUE_RANGE_PROF_FUNC_STR \
+ INSTR_PROF_QUOTE(INSTR_PROF_VALUE_RANGE_PROF_FUNC)
+
+/* InstrProfile per-function control data alignment. */
+#define INSTR_PROF_DATA_ALIGNMENT 8
+
+/* The data structure that represents a tracked value by the
+ * value profiler.
+ */
+typedef struct InstrProfValueData {
+ /* Profiled value. */
+ uint64_t Value;
+ /* Number of times the value appears in the training run. */
+ uint64_t Count;
+} InstrProfValueData;
+
+#endif /* INSTR_PROF_DATA_INC */
+
+#ifndef INSTR_ORDER_FILE_INC
+/* The maximal # of functions: 128*1024 (the buffer size will be 128*4 KB). */
+#define INSTR_ORDER_FILE_BUFFER_SIZE 131072
+#define INSTR_ORDER_FILE_BUFFER_BITS 17
+#define INSTR_ORDER_FILE_BUFFER_MASK 0x1ffff
+#endif /* INSTR_ORDER_FILE_INC */
+#else
+#undef INSTR_PROF_DATA_DEFINED
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfData.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfiling.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfiling.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfiling.c (revision 351984)
@@ -0,0 +1,84 @@
+/*===- InstrProfiling.c - Support library for PGO instrumentation ---------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+
+#define INSTR_PROF_VALUE_PROF_DATA
+#include "InstrProfData.inc"
+
+
+COMPILER_RT_WEAK uint64_t INSTR_PROF_RAW_VERSION_VAR = INSTR_PROF_RAW_VERSION;
+
+COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_magic(void) {
+ return sizeof(void *) == sizeof(uint64_t) ? (INSTR_PROF_RAW_MAGIC_64)
+ : (INSTR_PROF_RAW_MAGIC_32);
+}
+
+static unsigned ProfileDumped = 0;
+
+COMPILER_RT_VISIBILITY unsigned lprofProfileDumped() {
+ return ProfileDumped;
+}
+
+COMPILER_RT_VISIBILITY void lprofSetProfileDumped() {
+ ProfileDumped = 1;
+}
+
+COMPILER_RT_VISIBILITY void __llvm_profile_set_dumped() {
+ lprofSetProfileDumped();
+}
+
+/* Return the number of bytes needed to add to SizeInBytes to make it
+ * the result a multiple of 8.
+ */
+COMPILER_RT_VISIBILITY uint8_t
+__llvm_profile_get_num_padding_bytes(uint64_t SizeInBytes) {
+ return 7 & (sizeof(uint64_t) - SizeInBytes % sizeof(uint64_t));
+}
+
+COMPILER_RT_VISIBILITY uint64_t __llvm_profile_get_version(void) {
+ return __llvm_profile_raw_version;
+}
+
+COMPILER_RT_VISIBILITY void __llvm_profile_reset_counters(void) {
+ uint64_t *I = __llvm_profile_begin_counters();
+ uint64_t *E = __llvm_profile_end_counters();
+
+ memset(I, 0, sizeof(uint64_t) * (E - I));
+
+ const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
+ const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+ const __llvm_profile_data *DI;
+ for (DI = DataBegin; DI < DataEnd; ++DI) {
+ uint64_t CurrentVSiteCount = 0;
+ uint32_t VKI, i;
+ if (!DI->Values)
+ continue;
+
+ ValueProfNode **ValueCounters = (ValueProfNode **)DI->Values;
+
+ for (VKI = IPVK_First; VKI <= IPVK_Last; ++VKI)
+ CurrentVSiteCount += DI->NumValueSites[VKI];
+
+ for (i = 0; i < CurrentVSiteCount; ++i) {
+ ValueProfNode *CurrentVNode = ValueCounters[i];
+
+ while (CurrentVNode) {
+ CurrentVNode->Count = 0;
+ CurrentVNode = CurrentVNode->Next;
+ }
+ }
+ }
+ ProfileDumped = 0;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfiling.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfiling.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfiling.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfiling.h (revision 351984)
@@ -0,0 +1,251 @@
+/*===- InstrProfiling.h- Support library for PGO instrumentation ----------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifndef PROFILE_INSTRPROFILING_H_
+#define PROFILE_INSTRPROFILING_H_
+
+#include "InstrProfilingPort.h"
+#include <stdio.h>
+
+#define INSTR_PROF_VISIBILITY COMPILER_RT_VISIBILITY
+#include "InstrProfData.inc"
+
+enum ValueKind {
+#define VALUE_PROF_KIND(Enumerator, Value, Descr) Enumerator = Value,
+#include "InstrProfData.inc"
+};
+
+typedef void *IntPtrT;
+typedef struct COMPILER_RT_ALIGNAS(INSTR_PROF_DATA_ALIGNMENT)
+ __llvm_profile_data {
+#define INSTR_PROF_DATA(Type, LLVMType, Name, Initializer) Type Name;
+#include "InstrProfData.inc"
+} __llvm_profile_data;
+
+typedef struct __llvm_profile_header {
+#define INSTR_PROF_RAW_HEADER(Type, Name, Initializer) Type Name;
+#include "InstrProfData.inc"
+} __llvm_profile_header;
+
+typedef struct ValueProfNode * PtrToNodeT;
+typedef struct ValueProfNode {
+#define INSTR_PROF_VALUE_NODE(Type, LLVMType, Name, Initializer) Type Name;
+#include "InstrProfData.inc"
+} ValueProfNode;
+
+/*!
+ * \brief Get number of bytes necessary to pad the argument to eight
+ * byte boundary.
+ */
+uint8_t __llvm_profile_get_num_padding_bytes(uint64_t SizeInBytes);
+
+/*!
+ * \brief Get required size for profile buffer.
+ */
+uint64_t __llvm_profile_get_size_for_buffer(void);
+
+/*!
+ * \brief Write instrumentation data to the given buffer.
+ *
+ * \pre \c Buffer is the start of a buffer at least as big as \a
+ * __llvm_profile_get_size_for_buffer().
+ */
+int __llvm_profile_write_buffer(char *Buffer);
+
+const __llvm_profile_data *__llvm_profile_begin_data(void);
+const __llvm_profile_data *__llvm_profile_end_data(void);
+const char *__llvm_profile_begin_names(void);
+const char *__llvm_profile_end_names(void);
+uint64_t *__llvm_profile_begin_counters(void);
+uint64_t *__llvm_profile_end_counters(void);
+ValueProfNode *__llvm_profile_begin_vnodes();
+ValueProfNode *__llvm_profile_end_vnodes();
+uint32_t *__llvm_profile_begin_orderfile();
+
+/*!
+ * \brief Clear profile counters to zero.
+ *
+ */
+void __llvm_profile_reset_counters(void);
+
+/*!
+ * \brief Merge profile data from buffer.
+ *
+ * Read profile data form buffer \p Profile and merge with
+ * in-process profile counters. The client is expected to
+ * have checked or already knows the profile data in the
+ * buffer matches the in-process counter structure before
+ * calling it.
+ */
+void __llvm_profile_merge_from_buffer(const char *Profile, uint64_t Size);
+
+/*! \brief Check if profile in buffer matches the current binary.
+ *
+ * Returns 0 (success) if the profile data in buffer \p Profile with size
+ * \p Size was generated by the same binary and therefore matches
+ * structurally the in-process counters. If the profile data in buffer is
+ * not compatible, the interface returns 1 (failure).
+ */
+int __llvm_profile_check_compatibility(const char *Profile,
+ uint64_t Size);
+
+/*!
+ * \brief Counts the number of times a target value is seen.
+ *
+ * Records the target value for the CounterIndex if not seen before. Otherwise,
+ * increments the counter associated w/ the target value.
+ * void __llvm_profile_instrument_target(uint64_t TargetValue, void *Data,
+ * uint32_t CounterIndex);
+ */
+void INSTR_PROF_VALUE_PROF_FUNC(
+#define VALUE_PROF_FUNC_PARAM(ArgType, ArgName, ArgLLVMType) ArgType ArgName
+#include "InstrProfData.inc"
+ );
+
+void __llvm_profile_instrument_target_value(uint64_t TargetValue, void *Data,
+ uint32_t CounterIndex,
+ uint64_t CounterValue);
+
+/*!
+ * \brief Write instrumentation data to the current file.
+ *
+ * Writes to the file with the last name given to \a *
+ * __llvm_profile_set_filename(),
+ * or if it hasn't been called, the \c LLVM_PROFILE_FILE environment variable,
+ * or if that's not set, the last name set to INSTR_PROF_PROFILE_NAME_VAR,
+ * or if that's not set, \c "default.profraw".
+ */
+int __llvm_profile_write_file(void);
+
+int __llvm_orderfile_write_file(void);
+/*!
+ * \brief this is a wrapper interface to \c __llvm_profile_write_file.
+ * After this interface is invoked, a arleady dumped flag will be set
+ * so that profile won't be dumped again during program exit.
+ * Invocation of interface __llvm_profile_reset_counters will clear
+ * the flag. This interface is designed to be used to collect profile
+ * data from user selected hot regions. The use model is
+ * __llvm_profile_reset_counters();
+ * ... hot region 1
+ * __llvm_profile_dump();
+ * .. some other code
+ * __llvm_profile_reset_counters();
+ * ... hot region 2
+ * __llvm_profile_dump();
+ *
+ * It is expected that on-line profile merging is on with \c %m specifier
+ * used in profile filename . If merging is not turned on, user is expected
+ * to invoke __llvm_profile_set_filename to specify different profile names
+ * for different regions before dumping to avoid profile write clobbering.
+ */
+int __llvm_profile_dump(void);
+
+int __llvm_orderfile_dump(void);
+
+/*!
+ * \brief Set the filename for writing instrumentation data.
+ *
+ * Sets the filename to be used for subsequent calls to
+ * \a __llvm_profile_write_file().
+ *
+ * \c Name is not copied, so it must remain valid. Passing NULL resets the
+ * filename logic to the default behaviour.
+ */
+void __llvm_profile_set_filename(const char *Name);
+
+/*!
+ * \brief Set the FILE object for writing instrumentation data.
+ *
+ * Sets the FILE object to be used for subsequent calls to
+ * \a __llvm_profile_write_file(). The profile file name set by environment
+ * variable, command-line option, or calls to \a __llvm_profile_set_filename
+ * will be ignored.
+ *
+ * \c File will not be closed after a call to \a __llvm_profile_write_file() but
+ * it may be flushed. Passing NULL restores default behavior.
+ *
+ * If \c EnableMerge is nonzero, the runtime will always merge profiling data
+ * with the contents of the profiling file. If EnableMerge is zero, the runtime
+ * may still merge the data if it would have merged for another reason (for
+ * example, because of a %m specifier in the file name).
+ */
+void __llvm_profile_set_file_object(FILE *File, int EnableMerge);
+
+/*! \brief Register to write instrumentation data to file at exit. */
+int __llvm_profile_register_write_file_atexit(void);
+
+/*! \brief Initialize file handling. */
+void __llvm_profile_initialize_file(void);
+
+/*!
+ * \brief Return path prefix (excluding the base filename) of the profile data.
+ * This is useful for users using \c -fprofile-generate=./path_prefix who do
+ * not care about the default raw profile name. It is also useful to collect
+ * more than more profile data files dumped in the same directory (Online
+ * merge mode is turned on for instrumented programs with shared libs).
+ * Side-effect: this API call will invoke malloc with dynamic memory allocation.
+ */
+const char *__llvm_profile_get_path_prefix();
+
+/*!
+ * \brief Return filename (including path) of the profile data. Note that if the
+ * user calls __llvm_profile_set_filename later after invoking this interface,
+ * the actual file name may differ from what is returned here.
+ * Side-effect: this API call will invoke malloc with dynamic memory allocation.
+ */
+const char *__llvm_profile_get_filename();
+
+/*! \brief Get the magic token for the file format. */
+uint64_t __llvm_profile_get_magic(void);
+
+/*! \brief Get the version of the file format. */
+uint64_t __llvm_profile_get_version(void);
+
+/*! \brief Get the number of entries in the profile data section. */
+uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
+ const __llvm_profile_data *End);
+
+/*!
+ * \brief Set the flag that profile data has been dumped to the file.
+ * This is useful for users to disable dumping profile data to the file for
+ * certain processes in case the processes don't have permission to write to
+ * the disks, and trying to do so would result in side effects such as crashes.
+ */
+void __llvm_profile_set_dumped();
+
+/*!
+ * This variable is defined in InstrProfilingRuntime.cc as a hidden
+ * symbol. Its main purpose is to enable profile runtime user to
+ * bypass runtime initialization code -- if the client code explicitly
+ * define this variable, then InstProfileRuntime.o won't be linked in.
+ * Note that this variable's visibility needs to be hidden so that the
+ * definition of this variable in an instrumented shared library won't
+ * affect runtime initialization decision of the main program.
+ * __llvm_profile_profile_runtime. */
+COMPILER_RT_VISIBILITY extern int INSTR_PROF_PROFILE_RUNTIME_VAR;
+
+/*!
+ * This variable is defined in InstrProfiling.c. Its main purpose is to
+ * encode the raw profile version value and other format related information
+ * such as whether the profile is from IR based instrumentation. The variable
+ * is defined as weak so that compiler can emit an overriding definition
+ * depending on user option. Since we don't support mixing FE and IR based
+ * data in the same raw profile data file (in other words, shared libs and
+ * main program are expected to be instrumented in the same way), there is
+ * no need for this variable to be hidden.
+ */
+extern uint64_t INSTR_PROF_RAW_VERSION_VAR; /* __llvm_profile_raw_version */
+
+/*!
+ * This variable is a weak symbol defined in InstrProfiling.c. It allows
+ * compiler instrumentation to provide overriding definition with value
+ * from compiler command line. This variable has default visibility.
+ */
+extern char INSTR_PROF_PROFILE_NAME_VAR[1]; /* __llvm_profile_filename. */
+
+#endif /* PROFILE_INSTRPROFILING_H_ */
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfiling.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingBuffer.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingBuffer.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingBuffer.c (revision 351984)
@@ -0,0 +1,67 @@
+/*===- InstrProfilingBuffer.c - Write instrumentation to a memory buffer --===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_size_for_buffer(void) {
+ const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
+ const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+ const uint64_t *CountersBegin = __llvm_profile_begin_counters();
+ const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *NamesBegin = __llvm_profile_begin_names();
+ const char *NamesEnd = __llvm_profile_end_names();
+
+ return __llvm_profile_get_size_for_buffer_internal(
+ DataBegin, DataEnd, CountersBegin, CountersEnd, NamesBegin, NamesEnd);
+}
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_data_size(const __llvm_profile_data *Begin,
+ const __llvm_profile_data *End) {
+ intptr_t BeginI = (intptr_t)Begin, EndI = (intptr_t)End;
+ return ((EndI + sizeof(__llvm_profile_data) - 1) - BeginI) /
+ sizeof(__llvm_profile_data);
+}
+
+COMPILER_RT_VISIBILITY
+uint64_t __llvm_profile_get_size_for_buffer_internal(
+ const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
+ const uint64_t *CountersBegin, const uint64_t *CountersEnd,
+ const char *NamesBegin, const char *NamesEnd) {
+ /* Match logic in __llvm_profile_write_buffer(). */
+ const uint64_t NamesSize = (NamesEnd - NamesBegin) * sizeof(char);
+ const uint8_t Padding = __llvm_profile_get_num_padding_bytes(NamesSize);
+ return sizeof(__llvm_profile_header) +
+ (__llvm_profile_get_data_size(DataBegin, DataEnd) *
+ sizeof(__llvm_profile_data)) +
+ (CountersEnd - CountersBegin) * sizeof(uint64_t) + NamesSize + Padding;
+}
+
+COMPILER_RT_VISIBILITY
+void initBufferWriter(ProfDataWriter *BufferWriter, char *Buffer) {
+ BufferWriter->Write = lprofBufferWriter;
+ BufferWriter->WriterCtx = Buffer;
+}
+
+COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer(char *Buffer) {
+ ProfDataWriter BufferWriter;
+ initBufferWriter(&BufferWriter, Buffer);
+ return lprofWriteData(&BufferWriter, 0, 0);
+}
+
+COMPILER_RT_VISIBILITY int __llvm_profile_write_buffer_internal(
+ char *Buffer, const __llvm_profile_data *DataBegin,
+ const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
+ const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd) {
+ ProfDataWriter BufferWriter;
+ initBufferWriter(&BufferWriter, Buffer);
+ return lprofWriteDataImpl(&BufferWriter, DataBegin, DataEnd, CountersBegin,
+ CountersEnd, 0, NamesBegin, NamesEnd, 0);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingBuffer.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingFile.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingFile.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingFile.c (revision 351984)
@@ -0,0 +1,792 @@
+/*===- InstrProfilingFile.c - Write instrumentation to a file -------------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#if !defined(__Fuchsia__)
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#ifdef _MSC_VER
+/* For _alloca. */
+#include <malloc.h>
+#endif
+#if defined(_WIN32)
+#include "WindowsMMap.h"
+/* For _chsize_s */
+#include <io.h>
+#include <process.h>
+#else
+#include <sys/file.h>
+#include <sys/mman.h>
+#include <unistd.h>
+#if defined(__linux__)
+#include <sys/types.h>
+#endif
+#endif
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+#include "InstrProfilingUtil.h"
+
+/* From where is profile name specified.
+ * The order the enumerators define their
+ * precedence. Re-order them may lead to
+ * runtime behavior change. */
+typedef enum ProfileNameSpecifier {
+ PNS_unknown = 0,
+ PNS_default,
+ PNS_command_line,
+ PNS_environment,
+ PNS_runtime_api
+} ProfileNameSpecifier;
+
+static const char *getPNSStr(ProfileNameSpecifier PNS) {
+ switch (PNS) {
+ case PNS_default:
+ return "default setting";
+ case PNS_command_line:
+ return "command line";
+ case PNS_environment:
+ return "environment variable";
+ case PNS_runtime_api:
+ return "runtime API";
+ default:
+ return "Unknown";
+ }
+}
+
+#define MAX_PID_SIZE 16
+/* Data structure holding the result of parsed filename pattern. */
+typedef struct lprofFilename {
+ /* File name string possibly with %p or %h specifiers. */
+ const char *FilenamePat;
+ /* A flag indicating if FilenamePat's memory is allocated
+ * by runtime. */
+ unsigned OwnsFilenamePat;
+ const char *ProfilePathPrefix;
+ const char *Filename;
+ char PidChars[MAX_PID_SIZE];
+ char Hostname[COMPILER_RT_MAX_HOSTLEN];
+ unsigned NumPids;
+ unsigned NumHosts;
+ /* When in-process merging is enabled, this parameter specifies
+ * the total number of profile data files shared by all the processes
+ * spawned from the same binary. By default the value is 1. If merging
+ * is not enabled, its value should be 0. This parameter is specified
+ * by the %[0-9]m specifier. For instance %2m enables merging using
+ * 2 profile data files. %1m is equivalent to %m. Also %m specifier
+ * can only appear once at the end of the name pattern. */
+ unsigned MergePoolSize;
+ ProfileNameSpecifier PNS;
+} lprofFilename;
+
+COMPILER_RT_WEAK lprofFilename lprofCurFilename = {0, 0, 0, 0, {0},
+ {0}, 0, 0, 0, PNS_unknown};
+
+static int ProfileMergeRequested = 0;
+static int isProfileMergeRequested() { return ProfileMergeRequested; }
+static void setProfileMergeRequested(int EnableMerge) {
+ ProfileMergeRequested = EnableMerge;
+}
+
+static FILE *ProfileFile = NULL;
+static FILE *getProfileFile() { return ProfileFile; }
+static void setProfileFile(FILE *File) { ProfileFile = File; }
+
+COMPILER_RT_VISIBILITY void __llvm_profile_set_file_object(FILE *File,
+ int EnableMerge) {
+ setProfileFile(File);
+ setProfileMergeRequested(EnableMerge);
+}
+
+static int getCurFilenameLength();
+static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf);
+static unsigned doMerging() {
+ return lprofCurFilename.MergePoolSize || isProfileMergeRequested();
+}
+
+/* Return 1 if there is an error, otherwise return 0. */
+static uint32_t fileWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs,
+ uint32_t NumIOVecs) {
+ uint32_t I;
+ FILE *File = (FILE *)This->WriterCtx;
+ for (I = 0; I < NumIOVecs; I++) {
+ if (IOVecs[I].Data) {
+ if (fwrite(IOVecs[I].Data, IOVecs[I].ElmSize, IOVecs[I].NumElm, File) !=
+ IOVecs[I].NumElm)
+ return 1;
+ } else {
+ if (fseek(File, IOVecs[I].ElmSize * IOVecs[I].NumElm, SEEK_CUR) == -1)
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* TODO: make buffer size controllable by an internal option, and compiler can pass the size
+ to runtime via a variable. */
+static uint32_t orderFileWriter(FILE *File, const uint32_t *DataStart) {
+ if (fwrite(DataStart, sizeof(uint32_t), INSTR_ORDER_FILE_BUFFER_SIZE, File) !=
+ INSTR_ORDER_FILE_BUFFER_SIZE)
+ return 1;
+ return 0;
+}
+
+static void initFileWriter(ProfDataWriter *This, FILE *File) {
+ This->Write = fileWriter;
+ This->WriterCtx = File;
+}
+
+COMPILER_RT_VISIBILITY ProfBufferIO *
+lprofCreateBufferIOInternal(void *File, uint32_t BufferSz) {
+ FreeHook = &free;
+ DynamicBufferIOBuffer = (uint8_t *)calloc(BufferSz, 1);
+ VPBufferSize = BufferSz;
+ ProfDataWriter *fileWriter =
+ (ProfDataWriter *)calloc(sizeof(ProfDataWriter), 1);
+ initFileWriter(fileWriter, File);
+ ProfBufferIO *IO = lprofCreateBufferIO(fileWriter);
+ IO->OwnFileWriter = 1;
+ return IO;
+}
+
+static void setupIOBuffer() {
+ const char *BufferSzStr = 0;
+ BufferSzStr = getenv("LLVM_VP_BUFFER_SIZE");
+ if (BufferSzStr && BufferSzStr[0]) {
+ VPBufferSize = atoi(BufferSzStr);
+ DynamicBufferIOBuffer = (uint8_t *)calloc(VPBufferSize, 1);
+ }
+}
+
+/* Read profile data in \c ProfileFile and merge with in-memory
+ profile counters. Returns -1 if there is fatal error, otheriwse
+ 0 is returned. Returning 0 does not mean merge is actually
+ performed. If merge is actually done, *MergeDone is set to 1.
+*/
+static int doProfileMerging(FILE *ProfileFile, int *MergeDone) {
+ uint64_t ProfileFileSize;
+ char *ProfileBuffer;
+
+ if (fseek(ProfileFile, 0L, SEEK_END) == -1) {
+ PROF_ERR("Unable to merge profile data, unable to get size: %s\n",
+ strerror(errno));
+ return -1;
+ }
+ ProfileFileSize = ftell(ProfileFile);
+
+ /* Restore file offset. */
+ if (fseek(ProfileFile, 0L, SEEK_SET) == -1) {
+ PROF_ERR("Unable to merge profile data, unable to rewind: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ /* Nothing to merge. */
+ if (ProfileFileSize < sizeof(__llvm_profile_header)) {
+ if (ProfileFileSize)
+ PROF_WARN("Unable to merge profile data: %s\n",
+ "source profile file is too small.");
+ return 0;
+ }
+
+ ProfileBuffer = mmap(NULL, ProfileFileSize, PROT_READ, MAP_SHARED | MAP_FILE,
+ fileno(ProfileFile), 0);
+ if (ProfileBuffer == MAP_FAILED) {
+ PROF_ERR("Unable to merge profile data, mmap failed: %s\n",
+ strerror(errno));
+ return -1;
+ }
+
+ if (__llvm_profile_check_compatibility(ProfileBuffer, ProfileFileSize)) {
+ (void)munmap(ProfileBuffer, ProfileFileSize);
+ PROF_WARN("Unable to merge profile data: %s\n",
+ "source profile file is not compatible.");
+ return 0;
+ }
+
+ /* Now start merging */
+ __llvm_profile_merge_from_buffer(ProfileBuffer, ProfileFileSize);
+
+ // Truncate the file in case merging of value profile did not happend to
+ // prevent from leaving garbage data at the end of the profile file.
+ COMPILER_RT_FTRUNCATE(ProfileFile, __llvm_profile_get_size_for_buffer());
+
+ (void)munmap(ProfileBuffer, ProfileFileSize);
+ *MergeDone = 1;
+
+ return 0;
+}
+
+/* Create the directory holding the file, if needed. */
+static void createProfileDir(const char *Filename) {
+ size_t Length = strlen(Filename);
+ if (lprofFindFirstDirSeparator(Filename)) {
+ char *Copy = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ strncpy(Copy, Filename, Length + 1);
+ __llvm_profile_recursive_mkdir(Copy);
+ }
+}
+
+/* Open the profile data for merging. It opens the file in r+b mode with
+ * file locking. If the file has content which is compatible with the
+ * current process, it also reads in the profile data in the file and merge
+ * it with in-memory counters. After the profile data is merged in memory,
+ * the original profile data is truncated and gets ready for the profile
+ * dumper. With profile merging enabled, each executable as well as any of
+ * its instrumented shared libraries dump profile data into their own data file.
+*/
+static FILE *openFileForMerging(const char *ProfileFileName, int *MergeDone) {
+ FILE *ProfileFile = NULL;
+ int rc;
+
+ ProfileFile = getProfileFile();
+ if (ProfileFile) {
+ lprofLockFileHandle(ProfileFile);
+ } else {
+ createProfileDir(ProfileFileName);
+ ProfileFile = lprofOpenFileEx(ProfileFileName);
+ }
+ if (!ProfileFile)
+ return NULL;
+
+ rc = doProfileMerging(ProfileFile, MergeDone);
+ if (rc || (!*MergeDone && COMPILER_RT_FTRUNCATE(ProfileFile, 0L)) ||
+ fseek(ProfileFile, 0L, SEEK_SET) == -1) {
+ PROF_ERR("Profile Merging of file %s failed: %s\n", ProfileFileName,
+ strerror(errno));
+ fclose(ProfileFile);
+ return NULL;
+ }
+ return ProfileFile;
+}
+
+static FILE *getFileObject(const char *OutputName) {
+ FILE *File;
+ File = getProfileFile();
+ if (File != NULL) {
+ return File;
+ }
+
+ return fopen(OutputName, "ab");
+}
+
+/* Write profile data to file \c OutputName. */
+static int writeFile(const char *OutputName) {
+ int RetVal;
+ FILE *OutputFile;
+
+ int MergeDone = 0;
+ VPMergeHook = &lprofMergeValueProfData;
+ if (doMerging())
+ OutputFile = openFileForMerging(OutputName, &MergeDone);
+ else
+ OutputFile = getFileObject(OutputName);
+
+ if (!OutputFile)
+ return -1;
+
+ FreeHook = &free;
+ setupIOBuffer();
+ ProfDataWriter fileWriter;
+ initFileWriter(&fileWriter, OutputFile);
+ RetVal = lprofWriteData(&fileWriter, lprofGetVPDataReader(), MergeDone);
+
+ if (OutputFile == getProfileFile()) {
+ fflush(OutputFile);
+ if (doMerging()) {
+ lprofUnlockFileHandle(OutputFile);
+ }
+ } else {
+ fclose(OutputFile);
+ }
+
+ return RetVal;
+}
+
+/* Write order data to file \c OutputName. */
+static int writeOrderFile(const char *OutputName) {
+ int RetVal;
+ FILE *OutputFile;
+
+ OutputFile = fopen(OutputName, "w");
+
+ if (!OutputFile) {
+ PROF_WARN("can't open file with mode ab: %s\n", OutputName);
+ return -1;
+ }
+
+ FreeHook = &free;
+ setupIOBuffer();
+ const uint32_t *DataBegin = __llvm_profile_begin_orderfile();
+ RetVal = orderFileWriter(OutputFile, DataBegin);
+
+ fclose(OutputFile);
+ return RetVal;
+}
+
+static void truncateCurrentFile(void) {
+ const char *Filename;
+ char *FilenameBuf;
+ FILE *File;
+ int Length;
+
+ Length = getCurFilenameLength();
+ FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ Filename = getCurFilename(FilenameBuf, 0);
+ if (!Filename)
+ return;
+
+ /* By pass file truncation to allow online raw profile
+ * merging. */
+ if (lprofCurFilename.MergePoolSize)
+ return;
+
+ createProfileDir(Filename);
+
+ /* Truncate the file. Later we'll reopen and append. */
+ File = fopen(Filename, "w");
+ if (!File)
+ return;
+ fclose(File);
+}
+
+static const char *DefaultProfileName = "default.profraw";
+static void resetFilenameToDefault(void) {
+ if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
+ free((void *)lprofCurFilename.FilenamePat);
+ }
+ memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
+ lprofCurFilename.FilenamePat = DefaultProfileName;
+ lprofCurFilename.PNS = PNS_default;
+}
+
+static int containsMergeSpecifier(const char *FilenamePat, int I) {
+ return (FilenamePat[I] == 'm' ||
+ (FilenamePat[I] >= '1' && FilenamePat[I] <= '9' &&
+ /* If FilenamePat[I] is not '\0', the next byte is guaranteed
+ * to be in-bound as the string is null terminated. */
+ FilenamePat[I + 1] == 'm'));
+}
+
+/* Parses the pattern string \p FilenamePat and stores the result to
+ * lprofcurFilename structure. */
+static int parseFilenamePattern(const char *FilenamePat,
+ unsigned CopyFilenamePat) {
+ int NumPids = 0, NumHosts = 0, I;
+ char *PidChars = &lprofCurFilename.PidChars[0];
+ char *Hostname = &lprofCurFilename.Hostname[0];
+ int MergingEnabled = 0;
+
+ /* Clean up cached prefix and filename. */
+ if (lprofCurFilename.ProfilePathPrefix)
+ free((void *)lprofCurFilename.ProfilePathPrefix);
+ if (lprofCurFilename.Filename)
+ free((void *)lprofCurFilename.Filename);
+
+ if (lprofCurFilename.FilenamePat && lprofCurFilename.OwnsFilenamePat) {
+ free((void *)lprofCurFilename.FilenamePat);
+ }
+
+ memset(&lprofCurFilename, 0, sizeof(lprofCurFilename));
+
+ if (!CopyFilenamePat)
+ lprofCurFilename.FilenamePat = FilenamePat;
+ else {
+ lprofCurFilename.FilenamePat = strdup(FilenamePat);
+ lprofCurFilename.OwnsFilenamePat = 1;
+ }
+ /* Check the filename for "%p", which indicates a pid-substitution. */
+ for (I = 0; FilenamePat[I]; ++I)
+ if (FilenamePat[I] == '%') {
+ if (FilenamePat[++I] == 'p') {
+ if (!NumPids++) {
+ if (snprintf(PidChars, MAX_PID_SIZE, "%ld", (long)getpid()) <= 0) {
+ PROF_WARN("Unable to get pid for filename pattern %s. Using the "
+ "default name.",
+ FilenamePat);
+ return -1;
+ }
+ }
+ } else if (FilenamePat[I] == 'h') {
+ if (!NumHosts++)
+ if (COMPILER_RT_GETHOSTNAME(Hostname, COMPILER_RT_MAX_HOSTLEN)) {
+ PROF_WARN("Unable to get hostname for filename pattern %s. Using "
+ "the default name.",
+ FilenamePat);
+ return -1;
+ }
+ } else if (containsMergeSpecifier(FilenamePat, I)) {
+ if (MergingEnabled) {
+ PROF_WARN("%%m specifier can only be specified once in %s.\n",
+ FilenamePat);
+ return -1;
+ }
+ MergingEnabled = 1;
+ if (FilenamePat[I] == 'm')
+ lprofCurFilename.MergePoolSize = 1;
+ else {
+ lprofCurFilename.MergePoolSize = FilenamePat[I] - '0';
+ I++; /* advance to 'm' */
+ }
+ }
+ }
+
+ lprofCurFilename.NumPids = NumPids;
+ lprofCurFilename.NumHosts = NumHosts;
+ return 0;
+}
+
+static void parseAndSetFilename(const char *FilenamePat,
+ ProfileNameSpecifier PNS,
+ unsigned CopyFilenamePat) {
+
+ const char *OldFilenamePat = lprofCurFilename.FilenamePat;
+ ProfileNameSpecifier OldPNS = lprofCurFilename.PNS;
+
+ if (PNS < OldPNS)
+ return;
+
+ if (!FilenamePat)
+ FilenamePat = DefaultProfileName;
+
+ if (OldFilenamePat && !strcmp(OldFilenamePat, FilenamePat)) {
+ lprofCurFilename.PNS = PNS;
+ return;
+ }
+
+ /* When PNS >= OldPNS, the last one wins. */
+ if (!FilenamePat || parseFilenamePattern(FilenamePat, CopyFilenamePat))
+ resetFilenameToDefault();
+ lprofCurFilename.PNS = PNS;
+
+ if (!OldFilenamePat) {
+ if (getenv("LLVM_PROFILE_VERBOSE"))
+ PROF_NOTE("Set profile file path to \"%s\" via %s.\n",
+ lprofCurFilename.FilenamePat, getPNSStr(PNS));
+ } else {
+ if (getenv("LLVM_PROFILE_VERBOSE"))
+ PROF_NOTE("Override old profile path \"%s\" via %s to \"%s\" via %s.\n",
+ OldFilenamePat, getPNSStr(OldPNS), lprofCurFilename.FilenamePat,
+ getPNSStr(PNS));
+ }
+
+ truncateCurrentFile();
+}
+
+/* Return buffer length that is required to store the current profile
+ * filename with PID and hostname substitutions. */
+/* The length to hold uint64_t followed by 2 digit pool id including '_' */
+#define SIGLEN 24
+static int getCurFilenameLength() {
+ int Len;
+ if (!lprofCurFilename.FilenamePat || !lprofCurFilename.FilenamePat[0])
+ return 0;
+
+ if (!(lprofCurFilename.NumPids || lprofCurFilename.NumHosts ||
+ lprofCurFilename.MergePoolSize))
+ return strlen(lprofCurFilename.FilenamePat);
+
+ Len = strlen(lprofCurFilename.FilenamePat) +
+ lprofCurFilename.NumPids * (strlen(lprofCurFilename.PidChars) - 2) +
+ lprofCurFilename.NumHosts * (strlen(lprofCurFilename.Hostname) - 2);
+ if (lprofCurFilename.MergePoolSize)
+ Len += SIGLEN;
+ return Len;
+}
+
+/* Return the pointer to the current profile file name (after substituting
+ * PIDs and Hostnames in filename pattern. \p FilenameBuf is the buffer
+ * to store the resulting filename. If no substitution is needed, the
+ * current filename pattern string is directly returned, unless ForceUseBuf
+ * is enabled. */
+static const char *getCurFilename(char *FilenameBuf, int ForceUseBuf) {
+ int I, J, PidLength, HostNameLength, FilenamePatLength;
+ const char *FilenamePat = lprofCurFilename.FilenamePat;
+
+ if (!lprofCurFilename.FilenamePat || !lprofCurFilename.FilenamePat[0])
+ return 0;
+
+ if (!(lprofCurFilename.NumPids || lprofCurFilename.NumHosts ||
+ lprofCurFilename.MergePoolSize)) {
+ if (!ForceUseBuf)
+ return lprofCurFilename.FilenamePat;
+
+ FilenamePatLength = strlen(lprofCurFilename.FilenamePat);
+ memcpy(FilenameBuf, lprofCurFilename.FilenamePat, FilenamePatLength);
+ FilenameBuf[FilenamePatLength] = '\0';
+ return FilenameBuf;
+ }
+
+ PidLength = strlen(lprofCurFilename.PidChars);
+ HostNameLength = strlen(lprofCurFilename.Hostname);
+ /* Construct the new filename. */
+ for (I = 0, J = 0; FilenamePat[I]; ++I)
+ if (FilenamePat[I] == '%') {
+ if (FilenamePat[++I] == 'p') {
+ memcpy(FilenameBuf + J, lprofCurFilename.PidChars, PidLength);
+ J += PidLength;
+ } else if (FilenamePat[I] == 'h') {
+ memcpy(FilenameBuf + J, lprofCurFilename.Hostname, HostNameLength);
+ J += HostNameLength;
+ } else if (containsMergeSpecifier(FilenamePat, I)) {
+ char LoadModuleSignature[SIGLEN];
+ int S;
+ int ProfilePoolId = getpid() % lprofCurFilename.MergePoolSize;
+ S = snprintf(LoadModuleSignature, SIGLEN, "%" PRIu64 "_%d",
+ lprofGetLoadModuleSignature(), ProfilePoolId);
+ if (S == -1 || S > SIGLEN)
+ S = SIGLEN;
+ memcpy(FilenameBuf + J, LoadModuleSignature, S);
+ J += S;
+ if (FilenamePat[I] != 'm')
+ I++;
+ }
+ /* Drop any unknown substitutions. */
+ } else
+ FilenameBuf[J++] = FilenamePat[I];
+ FilenameBuf[J] = 0;
+
+ return FilenameBuf;
+}
+
+/* Returns the pointer to the environment variable
+ * string. Returns null if the env var is not set. */
+static const char *getFilenamePatFromEnv(void) {
+ const char *Filename = getenv("LLVM_PROFILE_FILE");
+ if (!Filename || !Filename[0])
+ return 0;
+ return Filename;
+}
+
+COMPILER_RT_VISIBILITY
+const char *__llvm_profile_get_path_prefix(void) {
+ int Length;
+ char *FilenameBuf, *Prefix;
+ const char *Filename, *PrefixEnd;
+
+ if (lprofCurFilename.ProfilePathPrefix)
+ return lprofCurFilename.ProfilePathPrefix;
+
+ Length = getCurFilenameLength();
+ FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ Filename = getCurFilename(FilenameBuf, 0);
+ if (!Filename)
+ return "\0";
+
+ PrefixEnd = lprofFindLastDirSeparator(Filename);
+ if (!PrefixEnd)
+ return "\0";
+
+ Length = PrefixEnd - Filename + 1;
+ Prefix = (char *)malloc(Length + 1);
+ if (!Prefix) {
+ PROF_ERR("Failed to %s\n", "allocate memory.");
+ return "\0";
+ }
+ memcpy(Prefix, Filename, Length);
+ Prefix[Length] = '\0';
+ lprofCurFilename.ProfilePathPrefix = Prefix;
+ return Prefix;
+}
+
+COMPILER_RT_VISIBILITY
+const char *__llvm_profile_get_filename(void) {
+ int Length;
+ char *FilenameBuf;
+ const char *Filename;
+
+ if (lprofCurFilename.Filename)
+ return lprofCurFilename.Filename;
+
+ Length = getCurFilenameLength();
+ FilenameBuf = (char *)malloc(Length + 1);
+ if (!FilenameBuf) {
+ PROF_ERR("Failed to %s\n", "allocate memory.");
+ return "\0";
+ }
+ Filename = getCurFilename(FilenameBuf, 1);
+ if (!Filename)
+ return "\0";
+
+ lprofCurFilename.Filename = FilenameBuf;
+ return FilenameBuf;
+}
+
+/* This method is invoked by the runtime initialization hook
+ * InstrProfilingRuntime.o if it is linked in. Both user specified
+ * profile path via -fprofile-instr-generate= and LLVM_PROFILE_FILE
+ * environment variable can override this default value. */
+COMPILER_RT_VISIBILITY
+void __llvm_profile_initialize_file(void) {
+ const char *EnvFilenamePat;
+ const char *SelectedPat = NULL;
+ ProfileNameSpecifier PNS = PNS_unknown;
+ int hasCommandLineOverrider = (INSTR_PROF_PROFILE_NAME_VAR[0] != 0);
+
+ EnvFilenamePat = getFilenamePatFromEnv();
+ if (EnvFilenamePat) {
+ /* Pass CopyFilenamePat = 1, to ensure that the filename would be valid
+ at the moment when __llvm_profile_write_file() gets executed. */
+ parseAndSetFilename(EnvFilenamePat, PNS_environment, 1);
+ return;
+ } else if (hasCommandLineOverrider) {
+ SelectedPat = INSTR_PROF_PROFILE_NAME_VAR;
+ PNS = PNS_command_line;
+ } else {
+ SelectedPat = NULL;
+ PNS = PNS_default;
+ }
+
+ parseAndSetFilename(SelectedPat, PNS, 0);
+}
+
+/* This API is directly called by the user application code. It has the
+ * highest precedence compared with LLVM_PROFILE_FILE environment variable
+ * and command line option -fprofile-instr-generate=<profile_name>.
+ */
+COMPILER_RT_VISIBILITY
+void __llvm_profile_set_filename(const char *FilenamePat) {
+ parseAndSetFilename(FilenamePat, PNS_runtime_api, 1);
+}
+
+/* The public API for writing profile data into the file with name
+ * set by previous calls to __llvm_profile_set_filename or
+ * __llvm_profile_override_default_filename or
+ * __llvm_profile_initialize_file. */
+COMPILER_RT_VISIBILITY
+int __llvm_profile_write_file(void) {
+ int rc, Length;
+ const char *Filename;
+ char *FilenameBuf;
+ int PDeathSig = 0;
+
+ if (lprofProfileDumped()) {
+ PROF_NOTE("Profile data not written to file: %s.\n", "already written");
+ return 0;
+ }
+
+ Length = getCurFilenameLength();
+ FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ Filename = getCurFilename(FilenameBuf, 0);
+
+ /* Check the filename. */
+ if (!Filename) {
+ PROF_ERR("Failed to write file : %s\n", "Filename not set");
+ return -1;
+ }
+
+ /* Check if there is llvm/runtime version mismatch. */
+ if (GET_VERSION(__llvm_profile_get_version()) != INSTR_PROF_RAW_VERSION) {
+ PROF_ERR("Runtime and instrumentation version mismatch : "
+ "expected %d, but get %d\n",
+ INSTR_PROF_RAW_VERSION,
+ (int)GET_VERSION(__llvm_profile_get_version()));
+ return -1;
+ }
+
+ // Temporarily suspend getting SIGKILL when the parent exits.
+ PDeathSig = lprofSuspendSigKill();
+
+ /* Write profile data to the file. */
+ rc = writeFile(Filename);
+ if (rc)
+ PROF_ERR("Failed to write file \"%s\": %s\n", Filename, strerror(errno));
+
+ // Restore SIGKILL.
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
+
+ return rc;
+}
+
+COMPILER_RT_VISIBILITY
+int __llvm_profile_dump(void) {
+ if (!doMerging())
+ PROF_WARN("Later invocation of __llvm_profile_dump can lead to clobbering "
+ " of previously dumped profile data : %s. Either use %%m "
+ "in profile name or change profile name before dumping.\n",
+ "online profile merging is not on");
+ int rc = __llvm_profile_write_file();
+ lprofSetProfileDumped();
+ return rc;
+}
+
+/* Order file data will be saved in a file with suffx .order. */
+static const char *OrderFileSuffix = ".order";
+
+COMPILER_RT_VISIBILITY
+int __llvm_orderfile_write_file(void) {
+ int rc, Length, LengthBeforeAppend, SuffixLength;
+ const char *Filename;
+ char *FilenameBuf;
+ int PDeathSig = 0;
+
+ SuffixLength = strlen(OrderFileSuffix);
+ Length = getCurFilenameLength() + SuffixLength;
+ FilenameBuf = (char *)COMPILER_RT_ALLOCA(Length + 1);
+ Filename = getCurFilename(FilenameBuf, 1);
+
+ /* Check the filename. */
+ if (!Filename) {
+ PROF_ERR("Failed to write file : %s\n", "Filename not set");
+ return -1;
+ }
+
+ /* Append order file suffix */
+ LengthBeforeAppend = strlen(Filename);
+ memcpy(FilenameBuf + LengthBeforeAppend, OrderFileSuffix, SuffixLength);
+ FilenameBuf[LengthBeforeAppend + SuffixLength] = '\0';
+
+ /* Check if there is llvm/runtime version mismatch. */
+ if (GET_VERSION(__llvm_profile_get_version()) != INSTR_PROF_RAW_VERSION) {
+ PROF_ERR("Runtime and instrumentation version mismatch : "
+ "expected %d, but get %d\n",
+ INSTR_PROF_RAW_VERSION,
+ (int)GET_VERSION(__llvm_profile_get_version()));
+ return -1;
+ }
+
+ // Temporarily suspend getting SIGKILL when the parent exits.
+ PDeathSig = lprofSuspendSigKill();
+
+ /* Write order data to the file. */
+ rc = writeOrderFile(Filename);
+ if (rc)
+ PROF_ERR("Failed to write file \"%s\": %s\n", Filename, strerror(errno));
+
+ // Restore SIGKILL.
+ if (PDeathSig == 1)
+ lprofRestoreSigKill();
+
+ return rc;
+}
+
+COMPILER_RT_VISIBILITY
+int __llvm_orderfile_dump(void) {
+ int rc = __llvm_orderfile_write_file();
+ return rc;
+}
+
+static void writeFileWithoutReturn(void) { __llvm_profile_write_file(); }
+
+COMPILER_RT_VISIBILITY
+int __llvm_profile_register_write_file_atexit(void) {
+ static int HasBeenRegistered = 0;
+
+ if (HasBeenRegistered)
+ return 0;
+
+ lprofSetupValueProfiler();
+
+ HasBeenRegistered = 1;
+ return atexit(writeFileWithoutReturn);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingFile.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingInternal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingInternal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingInternal.h (revision 351984)
@@ -0,0 +1,189 @@
+/*===- InstrProfiling.h- Support library for PGO instrumentation ----------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifndef PROFILE_INSTRPROFILING_INTERNALH_
+#define PROFILE_INSTRPROFILING_INTERNALH_
+
+#include <stddef.h>
+
+#include "InstrProfiling.h"
+
+/*!
+ * \brief Write instrumentation data to the given buffer, given explicit
+ * pointers to the live data in memory. This function is probably not what you
+ * want. Use __llvm_profile_get_size_for_buffer instead. Use this function if
+ * your program has a custom memory layout.
+ */
+uint64_t __llvm_profile_get_size_for_buffer_internal(
+ const __llvm_profile_data *DataBegin, const __llvm_profile_data *DataEnd,
+ const uint64_t *CountersBegin, const uint64_t *CountersEnd,
+ const char *NamesBegin, const char *NamesEnd);
+
+/*!
+ * \brief Write instrumentation data to the given buffer, given explicit
+ * pointers to the live data in memory. This function is probably not what you
+ * want. Use __llvm_profile_write_buffer instead. Use this function if your
+ * program has a custom memory layout.
+ *
+ * \pre \c Buffer is the start of a buffer at least as big as \a
+ * __llvm_profile_get_size_for_buffer_internal().
+ */
+int __llvm_profile_write_buffer_internal(
+ char *Buffer, const __llvm_profile_data *DataBegin,
+ const __llvm_profile_data *DataEnd, const uint64_t *CountersBegin,
+ const uint64_t *CountersEnd, const char *NamesBegin, const char *NamesEnd);
+
+/*!
+ * The data structure describing the data to be written by the
+ * low level writer callback function.
+ */
+typedef struct ProfDataIOVec {
+ const void *Data;
+ size_t ElmSize;
+ size_t NumElm;
+} ProfDataIOVec;
+
+struct ProfDataWriter;
+typedef uint32_t (*WriterCallback)(struct ProfDataWriter *This, ProfDataIOVec *,
+ uint32_t NumIOVecs);
+
+typedef struct ProfDataWriter {
+ WriterCallback Write;
+ void *WriterCtx;
+} ProfDataWriter;
+
+/*!
+ * The data structure for buffered IO of profile data.
+ */
+typedef struct ProfBufferIO {
+ ProfDataWriter *FileWriter;
+ uint32_t OwnFileWriter;
+ /* The start of the buffer. */
+ uint8_t *BufferStart;
+ /* Total size of the buffer. */
+ uint32_t BufferSz;
+ /* Current byte offset from the start of the buffer. */
+ uint32_t CurOffset;
+} ProfBufferIO;
+
+/* The creator interface used by testing. */
+ProfBufferIO *lprofCreateBufferIOInternal(void *File, uint32_t BufferSz);
+
+/*!
+ * This is the interface to create a handle for buffered IO.
+ */
+ProfBufferIO *lprofCreateBufferIO(ProfDataWriter *FileWriter);
+
+/*!
+ * The interface to destroy the bufferIO handle and reclaim
+ * the memory.
+ */
+void lprofDeleteBufferIO(ProfBufferIO *BufferIO);
+
+/*!
+ * This is the interface to write \c Data of \c Size bytes through
+ * \c BufferIO. Returns 0 if successful, otherwise return -1.
+ */
+int lprofBufferIOWrite(ProfBufferIO *BufferIO, const uint8_t *Data,
+ uint32_t Size);
+/*!
+ * The interface to flush the remaining data in the buffer.
+ * through the low level writer callback.
+ */
+int lprofBufferIOFlush(ProfBufferIO *BufferIO);
+
+/* The low level interface to write data into a buffer. It is used as the
+ * callback by other high level writer methods such as buffered IO writer
+ * and profile data writer. */
+uint32_t lprofBufferWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs,
+ uint32_t NumIOVecs);
+void initBufferWriter(ProfDataWriter *BufferWriter, char *Buffer);
+
+struct ValueProfData;
+struct ValueProfRecord;
+struct InstrProfValueData;
+struct ValueProfNode;
+
+/*!
+ * The class that defines a set of methods to read value profile
+ * data for streaming/serialization from the instrumentation runtime.
+ */
+typedef struct VPDataReaderType {
+ uint32_t (*InitRTRecord)(const __llvm_profile_data *Data,
+ uint8_t *SiteCountArray[]);
+ /* Function pointer to getValueProfRecordHeader method. */
+ uint32_t (*GetValueProfRecordHeaderSize)(uint32_t NumSites);
+ /* Function pointer to getFristValueProfRecord method. */
+ struct ValueProfRecord *(*GetFirstValueProfRecord)(struct ValueProfData *);
+ /* Return the number of value data for site \p Site. */
+ uint32_t (*GetNumValueDataForSite)(uint32_t VK, uint32_t Site);
+ /* Return the total size of the value profile data of the
+ * current function. */
+ uint32_t (*GetValueProfDataSize)(void);
+ /*!
+ * Read the next \p N value data for site \p Site and store the data
+ * in \p Dst. \p StartNode is the first value node to start with if
+ * it is not null. The function returns the pointer to the value
+ * node pointer to be used as the \p StartNode of the next batch reading.
+ * If there is nothing left, it returns NULL.
+ */
+ struct ValueProfNode *(*GetValueData)(uint32_t ValueKind, uint32_t Site,
+ struct InstrProfValueData *Dst,
+ struct ValueProfNode *StartNode,
+ uint32_t N);
+} VPDataReaderType;
+
+/* Write profile data to destinitation. If SkipNameDataWrite is set to 1,
+ the name data is already in destintation, we just skip over it. */
+int lprofWriteData(ProfDataWriter *Writer, VPDataReaderType *VPDataReader,
+ int SkipNameDataWrite);
+int lprofWriteDataImpl(ProfDataWriter *Writer,
+ const __llvm_profile_data *DataBegin,
+ const __llvm_profile_data *DataEnd,
+ const uint64_t *CountersBegin,
+ const uint64_t *CountersEnd,
+ VPDataReaderType *VPDataReader, const char *NamesBegin,
+ const char *NamesEnd, int SkipNameDataWrite);
+
+/* Merge value profile data pointed to by SrcValueProfData into
+ * in-memory profile counters pointed by to DstData. */
+void lprofMergeValueProfData(struct ValueProfData *SrcValueProfData,
+ __llvm_profile_data *DstData);
+
+VPDataReaderType *lprofGetVPDataReader();
+
+/* Internal interface used by test to reset the max number of
+ * tracked values per value site to be \p MaxVals.
+ */
+void lprofSetMaxValsPerSite(uint32_t MaxVals);
+void lprofSetupValueProfiler();
+
+/* Return the profile header 'signature' value associated with the current
+ * executable or shared library. The signature value can be used to for
+ * a profile name that is unique to this load module so that it does not
+ * collide with profiles from other binaries. It also allows shared libraries
+ * to dump merged profile data into its own profile file. */
+uint64_t lprofGetLoadModuleSignature();
+
+/*
+ * Return non zero value if the profile data has already been
+ * dumped to the file.
+ */
+unsigned lprofProfileDumped();
+void lprofSetProfileDumped();
+
+COMPILER_RT_VISIBILITY extern void (*FreeHook)(void *);
+COMPILER_RT_VISIBILITY extern uint8_t *DynamicBufferIOBuffer;
+COMPILER_RT_VISIBILITY extern uint32_t VPBufferSize;
+COMPILER_RT_VISIBILITY extern uint32_t VPMaxNumValsPerSite;
+/* Pointer to the start of static value counters to be allocted. */
+COMPILER_RT_VISIBILITY extern ValueProfNode *CurrentVNode;
+COMPILER_RT_VISIBILITY extern ValueProfNode *EndVNode;
+extern void (*VPMergeHook)(struct ValueProfData *, __llvm_profile_data *);
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingInternal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingMerge.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingMerge.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingMerge.c (revision 351984)
@@ -0,0 +1,132 @@
+/*===- InstrProfilingMerge.c - Profile in-process Merging ---------------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+|*===----------------------------------------------------------------------===*
+|* This file defines the API needed for in-process merging of profile data
+|* stored in memory buffer.
+\*===---------------------------------------------------------------------===*/
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+#include "InstrProfilingUtil.h"
+
+#define INSTR_PROF_VALUE_PROF_DATA
+#include "InstrProfData.inc"
+
+COMPILER_RT_VISIBILITY
+void (*VPMergeHook)(ValueProfData *, __llvm_profile_data *);
+
+COMPILER_RT_VISIBILITY
+uint64_t lprofGetLoadModuleSignature() {
+ /* A very fast way to compute a module signature. */
+ uint64_t CounterSize = (uint64_t)(__llvm_profile_end_counters() -
+ __llvm_profile_begin_counters());
+ uint64_t DataSize = __llvm_profile_get_data_size(__llvm_profile_begin_data(),
+ __llvm_profile_end_data());
+ uint64_t NamesSize =
+ (uint64_t)(__llvm_profile_end_names() - __llvm_profile_begin_names());
+ uint64_t NumVnodes =
+ (uint64_t)(__llvm_profile_end_vnodes() - __llvm_profile_begin_vnodes());
+ const __llvm_profile_data *FirstD = __llvm_profile_begin_data();
+
+ return (NamesSize << 40) + (CounterSize << 30) + (DataSize << 20) +
+ (NumVnodes << 10) + (DataSize > 0 ? FirstD->NameRef : 0);
+}
+
+/* Returns 1 if profile is not structurally compatible. */
+COMPILER_RT_VISIBILITY
+int __llvm_profile_check_compatibility(const char *ProfileData,
+ uint64_t ProfileSize) {
+ /* Check profile header only for now */
+ __llvm_profile_header *Header = (__llvm_profile_header *)ProfileData;
+ __llvm_profile_data *SrcDataStart, *SrcDataEnd, *SrcData, *DstData;
+ SrcDataStart =
+ (__llvm_profile_data *)(ProfileData + sizeof(__llvm_profile_header));
+ SrcDataEnd = SrcDataStart + Header->DataSize;
+
+ if (ProfileSize < sizeof(__llvm_profile_header))
+ return 1;
+
+ /* Check the header first. */
+ if (Header->Magic != __llvm_profile_get_magic() ||
+ Header->Version != __llvm_profile_get_version() ||
+ Header->DataSize !=
+ __llvm_profile_get_data_size(__llvm_profile_begin_data(),
+ __llvm_profile_end_data()) ||
+ Header->CountersSize != (uint64_t)(__llvm_profile_end_counters() -
+ __llvm_profile_begin_counters()) ||
+ Header->NamesSize != (uint64_t)(__llvm_profile_end_names() -
+ __llvm_profile_begin_names()) ||
+ Header->ValueKindLast != IPVK_Last)
+ return 1;
+
+ if (ProfileSize < sizeof(__llvm_profile_header) +
+ Header->DataSize * sizeof(__llvm_profile_data) +
+ Header->NamesSize + Header->CountersSize)
+ return 1;
+
+ for (SrcData = SrcDataStart,
+ DstData = (__llvm_profile_data *)__llvm_profile_begin_data();
+ SrcData < SrcDataEnd; ++SrcData, ++DstData) {
+ if (SrcData->NameRef != DstData->NameRef ||
+ SrcData->FuncHash != DstData->FuncHash ||
+ SrcData->NumCounters != DstData->NumCounters)
+ return 1;
+ }
+
+ /* Matched! */
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY
+void __llvm_profile_merge_from_buffer(const char *ProfileData,
+ uint64_t ProfileSize) {
+ __llvm_profile_data *SrcDataStart, *SrcDataEnd, *SrcData, *DstData;
+ __llvm_profile_header *Header = (__llvm_profile_header *)ProfileData;
+ uint64_t *SrcCountersStart;
+ const char *SrcNameStart;
+ ValueProfData *SrcValueProfDataStart, *SrcValueProfData;
+
+ SrcDataStart =
+ (__llvm_profile_data *)(ProfileData + sizeof(__llvm_profile_header));
+ SrcDataEnd = SrcDataStart + Header->DataSize;
+ SrcCountersStart = (uint64_t *)SrcDataEnd;
+ SrcNameStart = (const char *)(SrcCountersStart + Header->CountersSize);
+ SrcValueProfDataStart =
+ (ValueProfData *)(SrcNameStart + Header->NamesSize +
+ __llvm_profile_get_num_padding_bytes(
+ Header->NamesSize));
+
+ for (SrcData = SrcDataStart,
+ DstData = (__llvm_profile_data *)__llvm_profile_begin_data(),
+ SrcValueProfData = SrcValueProfDataStart;
+ SrcData < SrcDataEnd; ++SrcData, ++DstData) {
+ uint64_t *SrcCounters;
+ uint64_t *DstCounters = (uint64_t *)DstData->CounterPtr;
+ unsigned I, NC, NVK = 0;
+
+ NC = SrcData->NumCounters;
+ SrcCounters = SrcCountersStart +
+ ((size_t)SrcData->CounterPtr - Header->CountersDelta) /
+ sizeof(uint64_t);
+ for (I = 0; I < NC; I++)
+ DstCounters[I] += SrcCounters[I];
+
+ /* Now merge value profile data. */
+ if (!VPMergeHook)
+ continue;
+
+ for (I = 0; I <= IPVK_Last; I++)
+ NVK += (SrcData->NumValueSites[I] != 0);
+
+ if (!NVK)
+ continue;
+
+ VPMergeHook(SrcValueProfData, DstData);
+ SrcValueProfData = (ValueProfData *)((char *)SrcValueProfData +
+ SrcValueProfData->TotalSize);
+ }
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingMerge.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingMergeFile.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingMergeFile.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingMergeFile.c (revision 351984)
@@ -0,0 +1,45 @@
+/*===- InstrProfilingMergeFile.c - Profile in-process Merging ------------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+|*===----------------------------------------------------------------------===
+|* This file defines APIs needed to support in-process merging for profile data
+|* stored in files.
+\*===----------------------------------------------------------------------===*/
+
+#if !defined(__Fuchsia__)
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+#include "InstrProfilingUtil.h"
+
+#define INSTR_PROF_VALUE_PROF_DATA
+#include "InstrProfData.inc"
+
+/* Merge value profile data pointed to by SrcValueProfData into
+ * in-memory profile counters pointed by to DstData. */
+COMPILER_RT_VISIBILITY
+void lprofMergeValueProfData(ValueProfData *SrcValueProfData,
+ __llvm_profile_data *DstData) {
+ unsigned I, S, V, DstIndex = 0;
+ InstrProfValueData *VData;
+ ValueProfRecord *VR = getFirstValueProfRecord(SrcValueProfData);
+ for (I = 0; I < SrcValueProfData->NumValueKinds; I++) {
+ VData = getValueProfRecordValueData(VR);
+ unsigned SrcIndex = 0;
+ for (S = 0; S < VR->NumValueSites; S++) {
+ uint8_t NV = VR->SiteCountArray[S];
+ for (V = 0; V < NV; V++) {
+ __llvm_profile_instrument_target_value(VData[SrcIndex].Value, DstData,
+ DstIndex, VData[SrcIndex].Count);
+ ++SrcIndex;
+ }
+ ++DstIndex;
+ }
+ VR = getValueProfRecordNext(VR);
+ }
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingMergeFile.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingNameVar.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingNameVar.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingNameVar.c (revision 351984)
@@ -0,0 +1,17 @@
+/*===- InstrProfilingNameVar.c - profile name variable setup -------------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#include "InstrProfiling.h"
+
+/* char __llvm_profile_filename[1]
+ *
+ * The runtime should only provide its own definition of this symbol when the
+ * user has not specified one. Set this up by moving the runtime's copy of this
+ * symbol to an object file within the archive.
+ */
+COMPILER_RT_WEAK char INSTR_PROF_PROFILE_NAME_VAR[1] = {0};
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingNameVar.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformDarwin.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformDarwin.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformDarwin.c (revision 351984)
@@ -0,0 +1,67 @@
+/*===- InstrProfilingPlatformDarwin.c - Profile data on Darwin ------------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#include "InstrProfiling.h"
+
+#if defined(__APPLE__)
+/* Use linker magic to find the bounds of the Data section. */
+COMPILER_RT_VISIBILITY
+extern __llvm_profile_data
+ DataStart __asm("section$start$__DATA$" INSTR_PROF_DATA_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern __llvm_profile_data
+ DataEnd __asm("section$end$__DATA$" INSTR_PROF_DATA_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern char
+ NamesStart __asm("section$start$__DATA$" INSTR_PROF_NAME_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern char NamesEnd __asm("section$end$__DATA$" INSTR_PROF_NAME_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern uint64_t
+ CountersStart __asm("section$start$__DATA$" INSTR_PROF_CNTS_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern uint64_t
+ CountersEnd __asm("section$end$__DATA$" INSTR_PROF_CNTS_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern uint32_t
+ OrderFileStart __asm("section$start$__DATA$" INSTR_PROF_ORDERFILE_SECT_NAME);
+
+COMPILER_RT_VISIBILITY
+extern ValueProfNode
+ VNodesStart __asm("section$start$__DATA$" INSTR_PROF_VNODES_SECT_NAME);
+COMPILER_RT_VISIBILITY
+extern ValueProfNode
+ VNodesEnd __asm("section$end$__DATA$" INSTR_PROF_VNODES_SECT_NAME);
+
+COMPILER_RT_VISIBILITY
+const __llvm_profile_data *__llvm_profile_begin_data(void) {
+ return &DataStart;
+}
+COMPILER_RT_VISIBILITY
+const __llvm_profile_data *__llvm_profile_end_data(void) { return &DataEnd; }
+COMPILER_RT_VISIBILITY
+const char *__llvm_profile_begin_names(void) { return &NamesStart; }
+COMPILER_RT_VISIBILITY
+const char *__llvm_profile_end_names(void) { return &NamesEnd; }
+COMPILER_RT_VISIBILITY
+uint64_t *__llvm_profile_begin_counters(void) { return &CountersStart; }
+COMPILER_RT_VISIBILITY
+uint64_t *__llvm_profile_end_counters(void) { return &CountersEnd; }
+COMPILER_RT_VISIBILITY
+uint32_t *__llvm_profile_begin_orderfile(void) { return &OrderFileStart; }
+
+COMPILER_RT_VISIBILITY
+ValueProfNode *__llvm_profile_begin_vnodes(void) {
+ return &VNodesStart;
+}
+COMPILER_RT_VISIBILITY
+ValueProfNode *__llvm_profile_end_vnodes(void) { return &VNodesEnd; }
+
+COMPILER_RT_VISIBILITY ValueProfNode *CurrentVNode = &VNodesStart;
+COMPILER_RT_VISIBILITY ValueProfNode *EndVNode = &VNodesEnd;
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformDarwin.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformFuchsia.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformFuchsia.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformFuchsia.c (revision 351984)
@@ -0,0 +1,182 @@
+/*===- InstrProfilingPlatformFuchsia.c - Profile data Fuchsia platform ----===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+/*
+ * This file implements the profiling runtime for Fuchsia and defines the
+ * shared profile runtime interface. Each module (executable or DSO) statically
+ * links in the whole profile runtime to satisfy the calls from its
+ * instrumented code. Several modules in the same program might be separately
+ * compiled and even use different versions of the instrumentation ABI and data
+ * format. All they share in common is the VMO and the offset, which live in
+ * exported globals so that exactly one definition will be shared across all
+ * modules. Each module has its own independent runtime that registers its own
+ * atexit hook to append its own data into the shared VMO which is published
+ * via the data sink hook provided by Fuchsia's dynamic linker.
+ */
+
+#if defined(__Fuchsia__)
+
+#include <inttypes.h>
+#include <stdarg.h>
+#include <stdbool.h>
+#include <stdlib.h>
+
+#include <zircon/process.h>
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls.h>
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+#include "InstrProfilingUtil.h"
+
+/* VMO that contains the coverage data shared across all modules. This symbol
+ * has default visibility and is exported in each module (executable or DSO)
+ * that statically links in the profiling runtime.
+ */
+zx_handle_t __llvm_profile_vmo;
+/* Current offset within the VMO where data should be written next. This symbol
+ * has default visibility and is exported in each module (executable or DSO)
+ * that statically links in the profiling runtime.
+ */
+uint64_t __llvm_profile_offset;
+
+static const char ProfileSinkName[] = "llvm-profile";
+
+static inline void lprofWrite(const char *fmt, ...) {
+ char s[256];
+
+ va_list ap;
+ va_start(ap, fmt);
+ int ret = vsnprintf(s, sizeof(s), fmt, ap);
+ va_end(ap);
+
+ __sanitizer_log_write(s, ret + 1);
+}
+
+static uint32_t lprofVMOWriter(ProfDataWriter *This, ProfDataIOVec *IOVecs,
+ uint32_t NumIOVecs) {
+ /* Allocate VMO if it hasn't been created yet. */
+ if (__llvm_profile_vmo == ZX_HANDLE_INVALID) {
+ /* Get information about the current process. */
+ zx_info_handle_basic_t Info;
+ zx_status_t Status =
+ _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &Info,
+ sizeof(Info), NULL, NULL);
+ if (Status != ZX_OK)
+ return -1;
+
+ /* Create VMO to hold the profile data. */
+ Status = _zx_vmo_create(0, ZX_VMO_RESIZABLE, &__llvm_profile_vmo);
+ if (Status != ZX_OK)
+ return -1;
+
+ /* Give the VMO a name including our process KOID so it's easy to spot. */
+ char VmoName[ZX_MAX_NAME_LEN];
+ snprintf(VmoName, sizeof(VmoName), "%s.%" PRIu64, ProfileSinkName,
+ Info.koid);
+ _zx_object_set_property(__llvm_profile_vmo, ZX_PROP_NAME, VmoName,
+ strlen(VmoName));
+
+ /* Duplicate the handle since __sanitizer_publish_data consumes it. */
+ zx_handle_t Handle;
+ Status =
+ _zx_handle_duplicate(__llvm_profile_vmo, ZX_RIGHT_SAME_RIGHTS, &Handle);
+ if (Status != ZX_OK)
+ return -1;
+
+ /* Publish the VMO which contains profile data to the system. */
+ __sanitizer_publish_data(ProfileSinkName, Handle);
+
+ /* Use the dumpfile symbolizer markup element to write the name of VMO. */
+ lprofWrite("LLVM Profile: {{{dumpfile:%s:%s}}}\n",
+ ProfileSinkName, VmoName);
+ }
+
+ /* Compute the total length of data to be written. */
+ size_t Length = 0;
+ for (uint32_t I = 0; I < NumIOVecs; I++)
+ Length += IOVecs[I].ElmSize * IOVecs[I].NumElm;
+
+ /* Resize the VMO to ensure there's sufficient space for the data. */
+ zx_status_t Status =
+ _zx_vmo_set_size(__llvm_profile_vmo, __llvm_profile_offset + Length);
+ if (Status != ZX_OK)
+ return -1;
+
+ /* Copy the data into VMO. */
+ for (uint32_t I = 0; I < NumIOVecs; I++) {
+ size_t Length = IOVecs[I].ElmSize * IOVecs[I].NumElm;
+ if (IOVecs[I].Data) {
+ Status = _zx_vmo_write(__llvm_profile_vmo, IOVecs[I].Data,
+ __llvm_profile_offset, Length);
+ if (Status != ZX_OK)
+ return -1;
+ }
+ __llvm_profile_offset += Length;
+ }
+
+ return 0;
+}
+
+static void initVMOWriter(ProfDataWriter *This) {
+ This->Write = lprofVMOWriter;
+ This->WriterCtx = NULL;
+}
+
+static int dump(void) {
+ if (lprofProfileDumped()) {
+ lprofWrite("Profile data not published: already written.\n");
+ return 0;
+ }
+
+ /* Check if there is llvm/runtime version mismatch. */
+ if (GET_VERSION(__llvm_profile_get_version()) != INSTR_PROF_RAW_VERSION) {
+ lprofWrite("Runtime and instrumentation version mismatch : "
+ "expected %d, but got %d\n",
+ INSTR_PROF_RAW_VERSION,
+ (int)GET_VERSION(__llvm_profile_get_version()));
+ return -1;
+ }
+
+ /* Write the profile data into the mapped region. */
+ ProfDataWriter VMOWriter;
+ initVMOWriter(&VMOWriter);
+ if (lprofWriteData(&VMOWriter, lprofGetVPDataReader(), 0) != 0)
+ return -1;
+
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY
+int __llvm_profile_dump(void) {
+ int rc = dump();
+ lprofSetProfileDumped();
+ return rc;
+}
+
+static void dumpWithoutReturn(void) { dump(); }
+
+/* This method is invoked by the runtime initialization hook
+ * InstrProfilingRuntime.o if it is linked in.
+ */
+COMPILER_RT_VISIBILITY
+void __llvm_profile_initialize_file(void) {}
+
+COMPILER_RT_VISIBILITY
+int __llvm_profile_register_write_file_atexit(void) {
+ static bool HasBeenRegistered = false;
+
+ if (HasBeenRegistered)
+ return 0;
+
+ lprofSetupValueProfiler();
+
+ HasBeenRegistered = true;
+ return atexit(dumpWithoutReturn);
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformFuchsia.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformLinux.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformLinux.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformLinux.c (revision 351984)
@@ -0,0 +1,83 @@
+/*===- InstrProfilingPlatformLinux.c - Profile data Linux platform ------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__Fuchsia__) || \
+ (defined(__sun__) && defined(__svr4__)) || defined(__NetBSD__)
+
+#include <stdlib.h>
+
+#include "InstrProfiling.h"
+
+#define PROF_DATA_START INSTR_PROF_SECT_START(INSTR_PROF_DATA_COMMON)
+#define PROF_DATA_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_DATA_COMMON)
+#define PROF_NAME_START INSTR_PROF_SECT_START(INSTR_PROF_NAME_COMMON)
+#define PROF_NAME_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_NAME_COMMON)
+#define PROF_CNTS_START INSTR_PROF_SECT_START(INSTR_PROF_CNTS_COMMON)
+#define PROF_CNTS_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_CNTS_COMMON)
+#define PROF_ORDERFILE_START INSTR_PROF_SECT_START(INSTR_PROF_ORDERFILE_COMMON)
+#define PROF_VNODES_START INSTR_PROF_SECT_START(INSTR_PROF_VNODES_COMMON)
+#define PROF_VNODES_STOP INSTR_PROF_SECT_STOP(INSTR_PROF_VNODES_COMMON)
+
+/* Declare section start and stop symbols for various sections
+ * generated by compiler instrumentation.
+ */
+extern __llvm_profile_data PROF_DATA_START COMPILER_RT_VISIBILITY;
+extern __llvm_profile_data PROF_DATA_STOP COMPILER_RT_VISIBILITY;
+extern uint64_t PROF_CNTS_START COMPILER_RT_VISIBILITY;
+extern uint64_t PROF_CNTS_STOP COMPILER_RT_VISIBILITY;
+extern uint32_t PROF_ORDERFILE_START COMPILER_RT_VISIBILITY;
+extern char PROF_NAME_START COMPILER_RT_VISIBILITY;
+extern char PROF_NAME_STOP COMPILER_RT_VISIBILITY;
+extern ValueProfNode PROF_VNODES_START COMPILER_RT_VISIBILITY;
+extern ValueProfNode PROF_VNODES_STOP COMPILER_RT_VISIBILITY;
+
+/* Add dummy data to ensure the section is always created. */
+__llvm_profile_data
+ __prof_data_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_DATA_SECT_NAME);
+uint64_t
+ __prof_cnts_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_CNTS_SECT_NAME);
+uint32_t
+ __prof_orderfile_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_ORDERFILE_SECT_NAME);
+char __prof_nms_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_NAME_SECT_NAME);
+ValueProfNode __prof_vnodes_sect_data[0] COMPILER_RT_SECTION(INSTR_PROF_VNODES_SECT_NAME);
+
+COMPILER_RT_VISIBILITY const __llvm_profile_data *
+__llvm_profile_begin_data(void) {
+ return &PROF_DATA_START;
+}
+COMPILER_RT_VISIBILITY const __llvm_profile_data *
+__llvm_profile_end_data(void) {
+ return &PROF_DATA_STOP;
+}
+COMPILER_RT_VISIBILITY const char *__llvm_profile_begin_names(void) {
+ return &PROF_NAME_START;
+}
+COMPILER_RT_VISIBILITY const char *__llvm_profile_end_names(void) {
+ return &PROF_NAME_STOP;
+}
+COMPILER_RT_VISIBILITY uint64_t *__llvm_profile_begin_counters(void) {
+ return &PROF_CNTS_START;
+}
+COMPILER_RT_VISIBILITY uint64_t *__llvm_profile_end_counters(void) {
+ return &PROF_CNTS_STOP;
+}
+COMPILER_RT_VISIBILITY uint32_t *__llvm_profile_begin_orderfile(void) {
+ return &PROF_ORDERFILE_START;
+}
+
+COMPILER_RT_VISIBILITY ValueProfNode *
+__llvm_profile_begin_vnodes(void) {
+ return &PROF_VNODES_START;
+}
+COMPILER_RT_VISIBILITY ValueProfNode *__llvm_profile_end_vnodes(void) {
+ return &PROF_VNODES_STOP;
+}
+COMPILER_RT_VISIBILITY ValueProfNode *CurrentVNode = &PROF_VNODES_START;
+COMPILER_RT_VISIBILITY ValueProfNode *EndVNode = &PROF_VNODES_STOP;
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformLinux.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformOther.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformOther.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformOther.c (revision 351984)
@@ -0,0 +1,100 @@
+/*===- InstrProfilingPlatformOther.c - Profile data default platform ------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#if !defined(__APPLE__) && !defined(__linux__) && !defined(__FreeBSD__) && \
+ !(defined(__sun__) && defined(__svr4__)) && !defined(__NetBSD__) && \
+ !defined(_WIN32)
+
+#include <stdlib.h>
+#include <stdio.h>
+
+#include "InstrProfiling.h"
+
+static const __llvm_profile_data *DataFirst = NULL;
+static const __llvm_profile_data *DataLast = NULL;
+static const char *NamesFirst = NULL;
+static const char *NamesLast = NULL;
+static uint64_t *CountersFirst = NULL;
+static uint64_t *CountersLast = NULL;
+static uint32_t *OrderFileFirst = NULL;
+
+static const void *getMinAddr(const void *A1, const void *A2) {
+ return A1 < A2 ? A1 : A2;
+}
+
+static const void *getMaxAddr(const void *A1, const void *A2) {
+ return A1 > A2 ? A1 : A2;
+}
+
+/*!
+ * \brief Register an instrumented function.
+ *
+ * Calls to this are emitted by clang with -fprofile-instr-generate. Such
+ * calls are only required (and only emitted) on targets where we haven't
+ * implemented linker magic to find the bounds of the sections.
+ */
+COMPILER_RT_VISIBILITY
+void __llvm_profile_register_function(void *Data_) {
+ /* TODO: Only emit this function if we can't use linker magic. */
+ const __llvm_profile_data *Data = (__llvm_profile_data *)Data_;
+ if (!DataFirst) {
+ DataFirst = Data;
+ DataLast = Data + 1;
+ CountersFirst = Data->CounterPtr;
+ CountersLast = (uint64_t *)Data->CounterPtr + Data->NumCounters;
+ return;
+ }
+
+ DataFirst = (const __llvm_profile_data *)getMinAddr(DataFirst, Data);
+ CountersFirst = (uint64_t *)getMinAddr(CountersFirst, Data->CounterPtr);
+
+ DataLast = (const __llvm_profile_data *)getMaxAddr(DataLast, Data + 1);
+ CountersLast = (uint64_t *)getMaxAddr(
+ CountersLast, (uint64_t *)Data->CounterPtr + Data->NumCounters);
+}
+
+COMPILER_RT_VISIBILITY
+void __llvm_profile_register_names_function(void *NamesStart,
+ uint64_t NamesSize) {
+ if (!NamesFirst) {
+ NamesFirst = (const char *)NamesStart;
+ NamesLast = (const char *)NamesStart + NamesSize;
+ return;
+ }
+ NamesFirst = (const char *)getMinAddr(NamesFirst, NamesStart);
+ NamesLast =
+ (const char *)getMaxAddr(NamesLast, (const char *)NamesStart + NamesSize);
+}
+
+COMPILER_RT_VISIBILITY
+const __llvm_profile_data *__llvm_profile_begin_data(void) { return DataFirst; }
+COMPILER_RT_VISIBILITY
+const __llvm_profile_data *__llvm_profile_end_data(void) { return DataLast; }
+COMPILER_RT_VISIBILITY
+const char *__llvm_profile_begin_names(void) { return NamesFirst; }
+COMPILER_RT_VISIBILITY
+const char *__llvm_profile_end_names(void) { return NamesLast; }
+COMPILER_RT_VISIBILITY
+uint64_t *__llvm_profile_begin_counters(void) { return CountersFirst; }
+COMPILER_RT_VISIBILITY
+uint64_t *__llvm_profile_end_counters(void) { return CountersLast; }
+/* TODO: correctly set up OrderFileFirst. */
+COMPILER_RT_VISIBILITY
+uint32_t *__llvm_profile_begin_orderfile(void) { return OrderFileFirst; }
+
+COMPILER_RT_VISIBILITY
+ValueProfNode *__llvm_profile_begin_vnodes(void) {
+ return 0;
+}
+COMPILER_RT_VISIBILITY
+ValueProfNode *__llvm_profile_end_vnodes(void) { return 0; }
+
+COMPILER_RT_VISIBILITY ValueProfNode *CurrentVNode = 0;
+COMPILER_RT_VISIBILITY ValueProfNode *EndVNode = 0;
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformOther.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformWindows.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformWindows.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPlatformWindows.c (revision 351984)
@@ -0,0 +1,68 @@
+/*===- InstrProfilingPlatformWindows.c - Profile data on Windows ----------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#include "InstrProfiling.h"
+
+#if defined(_WIN32)
+
+#if defined(_MSC_VER)
+/* Merge read-write sections into .data. */
+#pragma comment(linker, "/MERGE:.lprfc=.data")
+#pragma comment(linker, "/MERGE:.lprfd=.data")
+#pragma comment(linker, "/MERGE:.lprfv=.data")
+#pragma comment(linker, "/MERGE:.lprfnd=.data")
+/* Do *NOT* merge .lprfn and .lcovmap into .rdata. llvm-cov must be able to find
+ * after the fact.
+ */
+
+/* Allocate read-only section bounds. */
+#pragma section(".lprfn$A", read)
+#pragma section(".lprfn$Z", read)
+
+/* Allocate read-write section bounds. */
+#pragma section(".lprfd$A", read, write)
+#pragma section(".lprfd$Z", read, write)
+#pragma section(".lprfc$A", read, write)
+#pragma section(".lprfc$Z", read, write)
+#pragma section(".lorderfile$A", read, write)
+#pragma section(".lprfnd$A", read, write)
+#pragma section(".lprfnd$Z", read, write)
+#endif
+
+__llvm_profile_data COMPILER_RT_SECTION(".lprfd$A") DataStart = {0};
+__llvm_profile_data COMPILER_RT_SECTION(".lprfd$Z") DataEnd = {0};
+
+const char COMPILER_RT_SECTION(".lprfn$A") NamesStart = '\0';
+const char COMPILER_RT_SECTION(".lprfn$Z") NamesEnd = '\0';
+
+uint64_t COMPILER_RT_SECTION(".lprfc$A") CountersStart;
+uint64_t COMPILER_RT_SECTION(".lprfc$Z") CountersEnd;
+uint32_t COMPILER_RT_SECTION(".lorderfile$A") OrderFileStart;
+
+ValueProfNode COMPILER_RT_SECTION(".lprfnd$A") VNodesStart;
+ValueProfNode COMPILER_RT_SECTION(".lprfnd$Z") VNodesEnd;
+
+const __llvm_profile_data *__llvm_profile_begin_data(void) {
+ return &DataStart + 1;
+}
+const __llvm_profile_data *__llvm_profile_end_data(void) { return &DataEnd; }
+
+const char *__llvm_profile_begin_names(void) { return &NamesStart + 1; }
+const char *__llvm_profile_end_names(void) { return &NamesEnd; }
+
+uint64_t *__llvm_profile_begin_counters(void) { return &CountersStart + 1; }
+uint64_t *__llvm_profile_end_counters(void) { return &CountersEnd; }
+uint32_t *__llvm_profile_begin_orderfile(void) { return &OrderFileStart; }
+
+ValueProfNode *__llvm_profile_begin_vnodes(void) { return &VNodesStart + 1; }
+ValueProfNode *__llvm_profile_end_vnodes(void) { return &VNodesEnd; }
+
+ValueProfNode *CurrentVNode = &VNodesStart + 1;
+ValueProfNode *EndVNode = &VNodesEnd;
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPort.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPort.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPort.h (revision 351984)
@@ -0,0 +1,131 @@
+/*===- InstrProfilingPort.h- Support library for PGO instrumentation ------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+/* This header must be included after all others so it can provide fallback
+ definitions for stuff missing in system headers. */
+
+#ifndef PROFILE_INSTRPROFILING_PORT_H_
+#define PROFILE_INSTRPROFILING_PORT_H_
+
+#ifdef _MSC_VER
+#define COMPILER_RT_ALIGNAS(x) __declspec(align(x))
+#define COMPILER_RT_VISIBILITY
+/* FIXME: selectany does not have the same semantics as weak. */
+#define COMPILER_RT_WEAK __declspec(selectany)
+/* Need to include <windows.h> */
+#define COMPILER_RT_ALLOCA _alloca
+/* Need to include <stdio.h> and <io.h> */
+#define COMPILER_RT_FTRUNCATE(f,l) _chsize(_fileno(f),l)
+#define COMPILER_RT_ALWAYS_INLINE __forceinline
+#elif __GNUC__
+#define COMPILER_RT_ALIGNAS(x) __attribute__((aligned(x)))
+#define COMPILER_RT_VISIBILITY __attribute__((visibility("hidden")))
+#define COMPILER_RT_WEAK __attribute__((weak))
+#define COMPILER_RT_ALLOCA __builtin_alloca
+#define COMPILER_RT_FTRUNCATE(f,l) ftruncate(fileno(f),l)
+#define COMPILER_RT_ALWAYS_INLINE inline __attribute((always_inline))
+#endif
+
+#if defined(__APPLE__)
+#define COMPILER_RT_SEG "__DATA,"
+#else
+#define COMPILER_RT_SEG ""
+#endif
+
+#ifdef _MSC_VER
+#define COMPILER_RT_SECTION(Sect) __declspec(allocate(Sect))
+#else
+#define COMPILER_RT_SECTION(Sect) __attribute__((section(Sect)))
+#endif
+
+#define COMPILER_RT_MAX_HOSTLEN 128
+#ifdef __ORBIS__
+#define COMPILER_RT_GETHOSTNAME(Name, Len) ((void)(Name), (void)(Len), (-1))
+#else
+#define COMPILER_RT_GETHOSTNAME(Name, Len) lprofGetHostName(Name, Len)
+#endif
+
+#if COMPILER_RT_HAS_ATOMICS == 1
+#ifdef _MSC_VER
+#include <windows.h>
+#if _MSC_VER < 1900
+#define snprintf _snprintf
+#endif
+#if defined(_WIN64)
+#define COMPILER_RT_BOOL_CMPXCHG(Ptr, OldV, NewV) \
+ (InterlockedCompareExchange64((LONGLONG volatile *)Ptr, (LONGLONG)NewV, \
+ (LONGLONG)OldV) == (LONGLONG)OldV)
+#define COMPILER_RT_PTR_FETCH_ADD(DomType, PtrVar, PtrIncr) \
+ (DomType *)InterlockedExchangeAdd64((LONGLONG volatile *)&PtrVar, \
+ (LONGLONG)sizeof(DomType) * PtrIncr)
+#else /* !defined(_WIN64) */
+#define COMPILER_RT_BOOL_CMPXCHG(Ptr, OldV, NewV) \
+ (InterlockedCompareExchange((LONG volatile *)Ptr, (LONG)NewV, (LONG)OldV) == \
+ (LONG)OldV)
+#define COMPILER_RT_PTR_FETCH_ADD(DomType, PtrVar, PtrIncr) \
+ (DomType *)InterlockedExchangeAdd((LONG volatile *)&PtrVar, \
+ (LONG)sizeof(DomType) * PtrIncr)
+#endif
+#else /* !defined(_MSC_VER) */
+#define COMPILER_RT_BOOL_CMPXCHG(Ptr, OldV, NewV) \
+ __sync_bool_compare_and_swap(Ptr, OldV, NewV)
+#define COMPILER_RT_PTR_FETCH_ADD(DomType, PtrVar, PtrIncr) \
+ (DomType *)__sync_fetch_and_add((long *)&PtrVar, sizeof(DomType) * PtrIncr)
+#endif
+#else /* COMPILER_RT_HAS_ATOMICS != 1 */
+#include "InstrProfilingUtil.h"
+#define COMPILER_RT_BOOL_CMPXCHG(Ptr, OldV, NewV) \
+ lprofBoolCmpXchg((void **)Ptr, OldV, NewV)
+#define COMPILER_RT_PTR_FETCH_ADD(DomType, PtrVar, PtrIncr) \
+ (DomType *)lprofPtrFetchAdd((void **)&PtrVar, sizeof(DomType) * PtrIncr)
+#endif
+
+#if defined(_WIN32)
+#define DIR_SEPARATOR '\\'
+#define DIR_SEPARATOR_2 '/'
+#else
+#define DIR_SEPARATOR '/'
+#endif
+
+#ifndef DIR_SEPARATOR_2
+#define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR)
+#else /* DIR_SEPARATOR_2 */
+#define IS_DIR_SEPARATOR(ch) \
+ (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2))
+#endif /* DIR_SEPARATOR_2 */
+
+#define PROF_ERR(Format, ...) \
+ fprintf(stderr, "LLVM Profile Error: " Format, __VA_ARGS__);
+
+#define PROF_WARN(Format, ...) \
+ fprintf(stderr, "LLVM Profile Warning: " Format, __VA_ARGS__);
+
+#define PROF_NOTE(Format, ...) \
+ fprintf(stderr, "LLVM Profile Note: " Format, __VA_ARGS__);
+
+#ifndef MAP_FILE
+#define MAP_FILE 0
+#endif
+
+#ifndef O_BINARY
+#define O_BINARY 0
+#endif
+
+#if defined(__FreeBSD__)
+
+#include <inttypes.h>
+#include <sys/types.h>
+
+#else /* defined(__FreeBSD__) */
+
+#include <inttypes.h>
+#include <stdint.h>
+
+#endif /* defined(__FreeBSD__) && defined(__i386__) */
+
+#endif /* PROFILE_INSTRPROFILING_PORT_H_ */
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingPort.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingRuntime.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingRuntime.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingRuntime.cc (revision 351984)
@@ -0,0 +1,29 @@
+//===- InstrProfilingRuntime.cpp - PGO runtime initialization -------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+extern "C" {
+
+#include "InstrProfiling.h"
+
+/* int __llvm_profile_runtime */
+COMPILER_RT_VISIBILITY int INSTR_PROF_PROFILE_RUNTIME_VAR;
+}
+
+namespace {
+
+class RegisterRuntime {
+public:
+ RegisterRuntime() {
+ __llvm_profile_register_write_file_atexit();
+ __llvm_profile_initialize_file();
+ }
+};
+
+RegisterRuntime Registration;
+
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingRuntime.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingUtil.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingUtil.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingUtil.c (revision 351984)
@@ -0,0 +1,309 @@
+/*===- InstrProfilingUtil.c - Support library for PGO instrumentation -----===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifdef _WIN32
+#include <direct.h>
+#include <process.h>
+#include <windows.h>
+#include "WindowsMMap.h"
+#else
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <errno.h>
+#endif
+
+#ifdef COMPILER_RT_HAS_UNAME
+#include <sys/utsname.h>
+#endif
+
+#include <stdlib.h>
+#include <string.h>
+
+#if defined(__linux__)
+#include <signal.h>
+#include <sys/prctl.h>
+#endif
+
+#include "InstrProfiling.h"
+#include "InstrProfilingUtil.h"
+
+COMPILER_RT_WEAK unsigned lprofDirMode = 0755;
+
+COMPILER_RT_VISIBILITY
+void __llvm_profile_recursive_mkdir(char *path) {
+ int i;
+
+ for (i = 1; path[i] != '\0'; ++i) {
+ char save = path[i];
+ if (!IS_DIR_SEPARATOR(path[i]))
+ continue;
+ path[i] = '\0';
+#ifdef _WIN32
+ _mkdir(path);
+#else
+ /* Some of these will fail, ignore it. */
+ mkdir(path, __llvm_profile_get_dir_mode());
+#endif
+ path[i] = save;
+ }
+}
+
+COMPILER_RT_VISIBILITY
+void __llvm_profile_set_dir_mode(unsigned Mode) { lprofDirMode = Mode; }
+
+COMPILER_RT_VISIBILITY
+unsigned __llvm_profile_get_dir_mode(void) { return lprofDirMode; }
+
+#if COMPILER_RT_HAS_ATOMICS != 1
+COMPILER_RT_VISIBILITY
+uint32_t lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV) {
+ void *R = *Ptr;
+ if (R == OldV) {
+ *Ptr = NewV;
+ return 1;
+ }
+ return 0;
+}
+COMPILER_RT_VISIBILITY
+void *lprofPtrFetchAdd(void **Mem, long ByteIncr) {
+ void *Old = *Mem;
+ *((char **)Mem) += ByteIncr;
+ return Old;
+}
+
+#endif
+
+#ifdef _WIN32
+COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
+ WCHAR Buffer[COMPILER_RT_MAX_HOSTLEN];
+ DWORD BufferSize = sizeof(Buffer);
+ BOOL Result =
+ GetComputerNameExW(ComputerNameDnsFullyQualified, Buffer, &BufferSize);
+ if (!Result)
+ return -1;
+ if (WideCharToMultiByte(CP_UTF8, 0, Buffer, -1, Name, Len, NULL, NULL) == 0)
+ return -1;
+ return 0;
+}
+#elif defined(COMPILER_RT_HAS_UNAME)
+COMPILER_RT_VISIBILITY int lprofGetHostName(char *Name, int Len) {
+ struct utsname N;
+ int R = uname(&N);
+ if (R >= 0) {
+ strncpy(Name, N.nodename, Len);
+ return 0;
+ }
+ return R;
+}
+#endif
+
+COMPILER_RT_VISIBILITY int lprofLockFd(int fd) {
+#ifdef COMPILER_RT_HAS_FCNTL_LCK
+ struct flock s_flock;
+
+ s_flock.l_whence = SEEK_SET;
+ s_flock.l_start = 0;
+ s_flock.l_len = 0; /* Until EOF. */
+ s_flock.l_pid = getpid();
+ s_flock.l_type = F_WRLCK;
+
+ while (fcntl(fd, F_SETLKW, &s_flock) == -1) {
+ if (errno != EINTR) {
+ if (errno == ENOLCK) {
+ return -1;
+ }
+ break;
+ }
+ }
+ return 0;
+#else
+ flock(fd, LOCK_EX);
+ return 0;
+#endif
+}
+
+COMPILER_RT_VISIBILITY int lprofUnlockFd(int fd) {
+#ifdef COMPILER_RT_HAS_FCNTL_LCK
+ struct flock s_flock;
+
+ s_flock.l_whence = SEEK_SET;
+ s_flock.l_start = 0;
+ s_flock.l_len = 0; /* Until EOF. */
+ s_flock.l_pid = getpid();
+ s_flock.l_type = F_UNLCK;
+
+ while (fcntl(fd, F_SETLKW, &s_flock) == -1) {
+ if (errno != EINTR) {
+ if (errno == ENOLCK) {
+ return -1;
+ }
+ break;
+ }
+ }
+ return 0;
+#else
+ flock(fd, LOCK_UN);
+ return 0;
+#endif
+}
+
+COMPILER_RT_VISIBILITY int lprofLockFileHandle(FILE *F) {
+ int fd;
+#if defined(_WIN32)
+ fd = _fileno(F);
+#else
+ fd = fileno(F);
+#endif
+ return lprofLockFd(fd);
+}
+
+COMPILER_RT_VISIBILITY int lprofUnlockFileHandle(FILE *F) {
+ int fd;
+#if defined(_WIN32)
+ fd = _fileno(F);
+#else
+ fd = fileno(F);
+#endif
+ return lprofUnlockFd(fd);
+}
+
+COMPILER_RT_VISIBILITY FILE *lprofOpenFileEx(const char *ProfileName) {
+ FILE *f;
+ int fd;
+#ifdef COMPILER_RT_HAS_FCNTL_LCK
+ fd = open(ProfileName, O_RDWR | O_CREAT, 0666);
+ if (fd < 0)
+ return NULL;
+
+ if (lprofLockFd(fd) != 0)
+ PROF_WARN("Data may be corrupted during profile merging : %s\n",
+ "Fail to obtain file lock due to system limit.");
+
+ f = fdopen(fd, "r+b");
+#elif defined(_WIN32)
+ // FIXME: Use the wide variants to handle Unicode filenames.
+ HANDLE h = CreateFileA(ProfileName, GENERIC_READ | GENERIC_WRITE, 0, 0,
+ OPEN_ALWAYS, FILE_ATTRIBUTE_NORMAL, 0);
+ if (h == INVALID_HANDLE_VALUE)
+ return NULL;
+
+ fd = _open_osfhandle((intptr_t)h, 0);
+ if (fd == -1) {
+ CloseHandle(h);
+ return NULL;
+ }
+
+ f = _fdopen(fd, "r+b");
+ if (f == 0) {
+ CloseHandle(h);
+ return NULL;
+ }
+#else
+ /* Worst case no locking applied. */
+ PROF_WARN("Concurrent file access is not supported : %s\n",
+ "lack file locking");
+ fd = open(ProfileName, O_RDWR | O_CREAT, 0666);
+ if (fd < 0)
+ return NULL;
+ f = fdopen(fd, "r+b");
+#endif
+
+ return f;
+}
+
+COMPILER_RT_VISIBILITY const char *lprofGetPathPrefix(int *PrefixStrip,
+ size_t *PrefixLen) {
+ const char *Prefix = getenv("GCOV_PREFIX");
+ const char *PrefixStripStr = getenv("GCOV_PREFIX_STRIP");
+
+ *PrefixLen = 0;
+ *PrefixStrip = 0;
+ if (Prefix == NULL || Prefix[0] == '\0')
+ return NULL;
+
+ if (PrefixStripStr) {
+ *PrefixStrip = atoi(PrefixStripStr);
+
+ /* Negative GCOV_PREFIX_STRIP values are ignored */
+ if (*PrefixStrip < 0)
+ *PrefixStrip = 0;
+ } else {
+ *PrefixStrip = 0;
+ }
+ *PrefixLen = strlen(Prefix);
+
+ return Prefix;
+}
+
+COMPILER_RT_VISIBILITY void
+lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
+ size_t PrefixLen, int PrefixStrip) {
+
+ const char *Ptr;
+ int Level;
+ const char *StrippedPathStr = PathStr;
+
+ for (Level = 0, Ptr = PathStr + 1; Level < PrefixStrip; ++Ptr) {
+ if (*Ptr == '\0')
+ break;
+
+ if (!IS_DIR_SEPARATOR(*Ptr))
+ continue;
+
+ StrippedPathStr = Ptr;
+ ++Level;
+ }
+
+ memcpy(Dest, Prefix, PrefixLen);
+
+ if (!IS_DIR_SEPARATOR(Prefix[PrefixLen - 1]))
+ Dest[PrefixLen++] = DIR_SEPARATOR;
+
+ memcpy(Dest + PrefixLen, StrippedPathStr, strlen(StrippedPathStr) + 1);
+}
+
+COMPILER_RT_VISIBILITY const char *
+lprofFindFirstDirSeparator(const char *Path) {
+ const char *Sep = strchr(Path, DIR_SEPARATOR);
+#if defined(DIR_SEPARATOR_2)
+ const char *Sep2 = strchr(Path, DIR_SEPARATOR_2);
+ if (Sep2 && (!Sep || Sep2 < Sep))
+ Sep = Sep2;
+#endif
+ return Sep;
+}
+
+COMPILER_RT_VISIBILITY const char *lprofFindLastDirSeparator(const char *Path) {
+ const char *Sep = strrchr(Path, DIR_SEPARATOR);
+#if defined(DIR_SEPARATOR_2)
+ const char *Sep2 = strrchr(Path, DIR_SEPARATOR_2);
+ if (Sep2 && (!Sep || Sep2 > Sep))
+ Sep = Sep2;
+#endif
+ return Sep;
+}
+
+COMPILER_RT_VISIBILITY int lprofSuspendSigKill() {
+#if defined(__linux__)
+ int PDeachSig = 0;
+ /* Temporarily suspend getting SIGKILL upon exit of the parent process. */
+ if (prctl(PR_GET_PDEATHSIG, &PDeachSig) == 0 && PDeachSig == SIGKILL)
+ prctl(PR_SET_PDEATHSIG, 0);
+ return (PDeachSig == SIGKILL);
+#else
+ return 0;
+#endif
+}
+
+COMPILER_RT_VISIBILITY void lprofRestoreSigKill() {
+#if defined(__linux__)
+ prctl(PR_SET_PDEATHSIG, SIGKILL);
+#endif
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingUtil.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingUtil.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingUtil.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingUtil.h (revision 351984)
@@ -0,0 +1,72 @@
+/*===- InstrProfilingUtil.h - Support library for PGO instrumentation -----===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifndef PROFILE_INSTRPROFILINGUTIL_H
+#define PROFILE_INSTRPROFILINGUTIL_H
+
+#include <stddef.h>
+#include <stdio.h>
+
+/*! \brief Create a directory tree. */
+void __llvm_profile_recursive_mkdir(char *Pathname);
+
+/*! Set the mode used when creating profile directories. */
+void __llvm_profile_set_dir_mode(unsigned Mode);
+
+/*! Return the directory creation mode. */
+unsigned __llvm_profile_get_dir_mode(void);
+
+int lprofLockFd(int fd);
+int lprofUnlockFd(int fd);
+int lprofLockFileHandle(FILE *F);
+int lprofUnlockFileHandle(FILE *F);
+
+/*! Open file \c Filename for read+write with write
+ * lock for exclusive access. The caller will block
+ * if the lock is already held by another process. */
+FILE *lprofOpenFileEx(const char *Filename);
+/* PS4 doesn't have getenv. Define a shim. */
+#if __ORBIS__
+static inline char *getenv(const char *name) { return NULL; }
+#endif /* #if __ORBIS__ */
+
+/* GCOV_PREFIX and GCOV_PREFIX_STRIP support */
+/* Return the path prefix specified by GCOV_PREFIX environment variable.
+ * If GCOV_PREFIX_STRIP is also specified, the strip level (integer value)
+ * is returned via \c *PrefixStrip. The prefix length is stored in *PrefixLen.
+ */
+const char *lprofGetPathPrefix(int *PrefixStrip, size_t *PrefixLen);
+/* Apply the path prefix specified in \c Prefix to path string in \c PathStr,
+ * and store the result to buffer pointed to by \c Buffer. If \c PrefixStrip
+ * is not zero, path prefixes are stripped from \c PathStr (the level of
+ * stripping is specified by \c PrefixStrip) before \c Prefix is added.
+ */
+void lprofApplyPathPrefix(char *Dest, const char *PathStr, const char *Prefix,
+ size_t PrefixLen, int PrefixStrip);
+
+/* Returns a pointer to the first occurrence of \c DIR_SEPARATOR char in
+ * the string \c Path, or NULL if the char is not found. */
+const char *lprofFindFirstDirSeparator(const char *Path);
+/* Returns a pointer to the last occurrence of \c DIR_SEPARATOR char in
+ * the string \c Path, or NULL if the char is not found. */
+const char *lprofFindLastDirSeparator(const char *Path);
+
+int lprofGetHostName(char *Name, int Len);
+
+unsigned lprofBoolCmpXchg(void **Ptr, void *OldV, void *NewV);
+void *lprofPtrFetchAdd(void **Mem, long ByteIncr);
+
+/* Temporarily suspend SIGKILL. Return value of 1 means a restore is needed.
+ * Other return values mean no restore is needed.
+ */
+int lprofSuspendSigKill();
+
+/* Restore previously suspended SIGKILL. */
+void lprofRestoreSigKill();
+
+#endif /* PROFILE_INSTRPROFILINGUTIL_H */
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingUtil.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingValue.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingValue.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingValue.c (revision 351984)
@@ -0,0 +1,371 @@
+/*===- InstrProfilingValue.c - Support library for PGO instrumentation ----===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+#include "InstrProfilingUtil.h"
+
+#define INSTR_PROF_VALUE_PROF_DATA
+#define INSTR_PROF_COMMON_API_IMPL
+#include "InstrProfData.inc"
+
+static int hasStaticCounters = 1;
+static int OutOfNodesWarnings = 0;
+static int hasNonDefaultValsPerSite = 0;
+#define INSTR_PROF_MAX_VP_WARNS 10
+#define INSTR_PROF_DEFAULT_NUM_VAL_PER_SITE 16
+#define INSTR_PROF_VNODE_POOL_SIZE 1024
+
+#ifndef _MSC_VER
+/* A shared static pool in addition to the vnodes statically
+ * allocated by the compiler. */
+COMPILER_RT_VISIBILITY ValueProfNode
+ lprofValueProfNodes[INSTR_PROF_VNODE_POOL_SIZE] COMPILER_RT_SECTION(
+ COMPILER_RT_SEG INSTR_PROF_VNODES_SECT_NAME);
+#endif
+
+COMPILER_RT_VISIBILITY uint32_t VPMaxNumValsPerSite =
+ INSTR_PROF_DEFAULT_NUM_VAL_PER_SITE;
+
+COMPILER_RT_VISIBILITY void lprofSetupValueProfiler() {
+ const char *Str = 0;
+ Str = getenv("LLVM_VP_MAX_NUM_VALS_PER_SITE");
+ if (Str && Str[0]) {
+ VPMaxNumValsPerSite = atoi(Str);
+ hasNonDefaultValsPerSite = 1;
+ }
+ if (VPMaxNumValsPerSite > INSTR_PROF_MAX_NUM_VAL_PER_SITE)
+ VPMaxNumValsPerSite = INSTR_PROF_MAX_NUM_VAL_PER_SITE;
+}
+
+COMPILER_RT_VISIBILITY void lprofSetMaxValsPerSite(uint32_t MaxVals) {
+ VPMaxNumValsPerSite = MaxVals;
+ hasNonDefaultValsPerSite = 1;
+}
+
+/* This method is only used in value profiler mock testing. */
+COMPILER_RT_VISIBILITY void
+__llvm_profile_set_num_value_sites(__llvm_profile_data *Data,
+ uint32_t ValueKind, uint16_t NumValueSites) {
+ *((uint16_t *)&Data->NumValueSites[ValueKind]) = NumValueSites;
+}
+
+/* This method is only used in value profiler mock testing. */
+COMPILER_RT_VISIBILITY const __llvm_profile_data *
+__llvm_profile_iterate_data(const __llvm_profile_data *Data) {
+ return Data + 1;
+}
+
+/* This method is only used in value profiler mock testing. */
+COMPILER_RT_VISIBILITY void *
+__llvm_get_function_addr(const __llvm_profile_data *Data) {
+ return Data->FunctionPointer;
+}
+
+/* Allocate an array that holds the pointers to the linked lists of
+ * value profile counter nodes. The number of element of the array
+ * is the total number of value profile sites instrumented. Returns
+ * 0 if allocation fails.
+ */
+
+static int allocateValueProfileCounters(__llvm_profile_data *Data) {
+ uint64_t NumVSites = 0;
+ uint32_t VKI;
+
+ /* This function will never be called when value site array is allocated
+ statically at compile time. */
+ hasStaticCounters = 0;
+ /* When dynamic allocation is enabled, allow tracking the max number of
+ * values allowd. */
+ if (!hasNonDefaultValsPerSite)
+ VPMaxNumValsPerSite = INSTR_PROF_MAX_NUM_VAL_PER_SITE;
+
+ for (VKI = IPVK_First; VKI <= IPVK_Last; ++VKI)
+ NumVSites += Data->NumValueSites[VKI];
+
+ ValueProfNode **Mem =
+ (ValueProfNode **)calloc(NumVSites, sizeof(ValueProfNode *));
+ if (!Mem)
+ return 0;
+ if (!COMPILER_RT_BOOL_CMPXCHG(&Data->Values, 0, Mem)) {
+ free(Mem);
+ return 0;
+ }
+ return 1;
+}
+
+static ValueProfNode *allocateOneNode(void) {
+ ValueProfNode *Node;
+
+ if (!hasStaticCounters)
+ return (ValueProfNode *)calloc(1, sizeof(ValueProfNode));
+
+ /* Early check to avoid value wrapping around. */
+ if (CurrentVNode + 1 > EndVNode) {
+ if (OutOfNodesWarnings++ < INSTR_PROF_MAX_VP_WARNS) {
+ PROF_WARN("Unable to track new values: %s. "
+ " Consider using option -mllvm -vp-counters-per-site=<n> to "
+ "allocate more"
+ " value profile counters at compile time. \n",
+ "Running out of static counters");
+ }
+ return 0;
+ }
+ Node = COMPILER_RT_PTR_FETCH_ADD(ValueProfNode, CurrentVNode, 1);
+ /* Due to section padding, EndVNode point to a byte which is one pass
+ * an incomplete VNode, so we need to skip the last incomplete node. */
+ if (Node + 1 > EndVNode)
+ return 0;
+
+ return Node;
+}
+
+static COMPILER_RT_ALWAYS_INLINE void
+instrumentTargetValueImpl(uint64_t TargetValue, void *Data,
+ uint32_t CounterIndex, uint64_t CountValue) {
+ __llvm_profile_data *PData = (__llvm_profile_data *)Data;
+ if (!PData)
+ return;
+ if (!CountValue)
+ return;
+ if (!PData->Values) {
+ if (!allocateValueProfileCounters(PData))
+ return;
+ }
+
+ ValueProfNode **ValueCounters = (ValueProfNode **)PData->Values;
+ ValueProfNode *PrevVNode = NULL;
+ ValueProfNode *MinCountVNode = NULL;
+ ValueProfNode *CurVNode = ValueCounters[CounterIndex];
+ uint64_t MinCount = UINT64_MAX;
+
+ uint8_t VDataCount = 0;
+ while (CurVNode) {
+ if (TargetValue == CurVNode->Value) {
+ CurVNode->Count += CountValue;
+ return;
+ }
+ if (CurVNode->Count < MinCount) {
+ MinCount = CurVNode->Count;
+ MinCountVNode = CurVNode;
+ }
+ PrevVNode = CurVNode;
+ CurVNode = CurVNode->Next;
+ ++VDataCount;
+ }
+
+ if (VDataCount >= VPMaxNumValsPerSite) {
+ /* Bump down the min count node's count. If it reaches 0,
+ * evict it. This eviction/replacement policy makes hot
+ * targets more sticky while cold targets less so. In other
+ * words, it makes it less likely for the hot targets to be
+ * prematurally evicted during warmup/establishment period,
+ * when their counts are still low. In a special case when
+ * the number of values tracked is reduced to only one, this
+ * policy will guarantee that the dominating target with >50%
+ * total count will survive in the end. Note that this scheme
+ * allows the runtime to track the min count node in an adaptive
+ * manner. It can correct previous mistakes and eventually
+ * lock on a cold target that is alread in stable state.
+ *
+ * In very rare cases, this replacement scheme may still lead
+ * to target loss. For instance, out of \c N value slots, \c N-1
+ * slots are occupied by luke warm targets during the warmup
+ * period and the remaining one slot is competed by two or more
+ * very hot targets. If those hot targets occur in an interleaved
+ * way, none of them will survive (gain enough weight to throw out
+ * other established entries) due to the ping-pong effect.
+ * To handle this situation, user can choose to increase the max
+ * number of tracked values per value site. Alternatively, a more
+ * expensive eviction mechanism can be implemented. It requires
+ * the runtime to track the total number of evictions per-site.
+ * When the total number of evictions reaches certain threshold,
+ * the runtime can wipe out more than one lowest count entries
+ * to give space for hot targets.
+ */
+ if (MinCountVNode->Count <= CountValue) {
+ CurVNode = MinCountVNode;
+ CurVNode->Value = TargetValue;
+ CurVNode->Count = CountValue;
+ } else
+ MinCountVNode->Count -= CountValue;
+
+ return;
+ }
+
+ CurVNode = allocateOneNode();
+ if (!CurVNode)
+ return;
+ CurVNode->Value = TargetValue;
+ CurVNode->Count += CountValue;
+
+ uint32_t Success = 0;
+ if (!ValueCounters[CounterIndex])
+ Success =
+ COMPILER_RT_BOOL_CMPXCHG(&ValueCounters[CounterIndex], 0, CurVNode);
+ else if (PrevVNode && !PrevVNode->Next)
+ Success = COMPILER_RT_BOOL_CMPXCHG(&(PrevVNode->Next), 0, CurVNode);
+
+ if (!Success && !hasStaticCounters) {
+ free(CurVNode);
+ return;
+ }
+}
+
+COMPILER_RT_VISIBILITY void
+__llvm_profile_instrument_target(uint64_t TargetValue, void *Data,
+ uint32_t CounterIndex) {
+ instrumentTargetValueImpl(TargetValue, Data, CounterIndex, 1);
+}
+COMPILER_RT_VISIBILITY void
+__llvm_profile_instrument_target_value(uint64_t TargetValue, void *Data,
+ uint32_t CounterIndex,
+ uint64_t CountValue) {
+ instrumentTargetValueImpl(TargetValue, Data, CounterIndex, CountValue);
+}
+
+/*
+ * The target values are partitioned into multiple regions/ranges. There is one
+ * contiguous region which is precise -- every value in the range is tracked
+ * individually. A value outside the precise region will be collapsed into one
+ * value depending on the region it falls in.
+ *
+ * There are three regions:
+ * 1. (-inf, PreciseRangeStart) and (PreciseRangeLast, LargeRangeValue) belong
+ * to one region -- all values here should be mapped to one value of
+ * "PreciseRangeLast + 1".
+ * 2. [PreciseRangeStart, PreciseRangeLast]
+ * 3. Large values: [LargeValue, +inf) maps to one value of LargeValue.
+ *
+ * The range for large values is optional. The default value of INT64_MIN
+ * indicates it is not specified.
+ */
+COMPILER_RT_VISIBILITY void __llvm_profile_instrument_range(
+ uint64_t TargetValue, void *Data, uint32_t CounterIndex,
+ int64_t PreciseRangeStart, int64_t PreciseRangeLast, int64_t LargeValue) {
+
+ if (LargeValue != INT64_MIN && (int64_t)TargetValue >= LargeValue)
+ TargetValue = LargeValue;
+ else if ((int64_t)TargetValue < PreciseRangeStart ||
+ (int64_t)TargetValue > PreciseRangeLast)
+ TargetValue = PreciseRangeLast + 1;
+
+ __llvm_profile_instrument_target(TargetValue, Data, CounterIndex);
+}
+
+/*
+ * A wrapper struct that represents value profile runtime data.
+ * Like InstrProfRecord class which is used by profiling host tools,
+ * ValueProfRuntimeRecord also implements the abstract intefaces defined in
+ * ValueProfRecordClosure so that the runtime data can be serialized using
+ * shared C implementation.
+ */
+typedef struct ValueProfRuntimeRecord {
+ const __llvm_profile_data *Data;
+ ValueProfNode **NodesKind[IPVK_Last + 1];
+ uint8_t **SiteCountArray;
+} ValueProfRuntimeRecord;
+
+/* ValueProfRecordClosure Interface implementation. */
+
+static uint32_t getNumValueSitesRT(const void *R, uint32_t VK) {
+ return ((const ValueProfRuntimeRecord *)R)->Data->NumValueSites[VK];
+}
+
+static uint32_t getNumValueDataRT(const void *R, uint32_t VK) {
+ uint32_t S = 0, I;
+ const ValueProfRuntimeRecord *Record = (const ValueProfRuntimeRecord *)R;
+ if (Record->SiteCountArray[VK] == INSTR_PROF_NULLPTR)
+ return 0;
+ for (I = 0; I < Record->Data->NumValueSites[VK]; I++)
+ S += Record->SiteCountArray[VK][I];
+ return S;
+}
+
+static uint32_t getNumValueDataForSiteRT(const void *R, uint32_t VK,
+ uint32_t S) {
+ const ValueProfRuntimeRecord *Record = (const ValueProfRuntimeRecord *)R;
+ return Record->SiteCountArray[VK][S];
+}
+
+static ValueProfRuntimeRecord RTRecord;
+static ValueProfRecordClosure RTRecordClosure = {
+ &RTRecord, INSTR_PROF_NULLPTR, /* GetNumValueKinds */
+ getNumValueSitesRT, getNumValueDataRT, getNumValueDataForSiteRT,
+ INSTR_PROF_NULLPTR, /* RemapValueData */
+ INSTR_PROF_NULLPTR, /* GetValueForSite, */
+ INSTR_PROF_NULLPTR /* AllocValueProfData */
+};
+
+static uint32_t
+initializeValueProfRuntimeRecord(const __llvm_profile_data *Data,
+ uint8_t *SiteCountArray[]) {
+ unsigned I, J, S = 0, NumValueKinds = 0;
+ ValueProfNode **Nodes = (ValueProfNode **)Data->Values;
+ RTRecord.Data = Data;
+ RTRecord.SiteCountArray = SiteCountArray;
+ for (I = 0; I <= IPVK_Last; I++) {
+ uint16_t N = Data->NumValueSites[I];
+ if (!N)
+ continue;
+
+ NumValueKinds++;
+
+ RTRecord.NodesKind[I] = Nodes ? &Nodes[S] : INSTR_PROF_NULLPTR;
+ for (J = 0; J < N; J++) {
+ /* Compute value count for each site. */
+ uint32_t C = 0;
+ ValueProfNode *Site =
+ Nodes ? RTRecord.NodesKind[I][J] : INSTR_PROF_NULLPTR;
+ while (Site) {
+ C++;
+ Site = Site->Next;
+ }
+ if (C > UCHAR_MAX)
+ C = UCHAR_MAX;
+ RTRecord.SiteCountArray[I][J] = C;
+ }
+ S += N;
+ }
+ return NumValueKinds;
+}
+
+static ValueProfNode *getNextNValueData(uint32_t VK, uint32_t Site,
+ InstrProfValueData *Dst,
+ ValueProfNode *StartNode, uint32_t N) {
+ unsigned I;
+ ValueProfNode *VNode = StartNode ? StartNode : RTRecord.NodesKind[VK][Site];
+ for (I = 0; I < N; I++) {
+ Dst[I].Value = VNode->Value;
+ Dst[I].Count = VNode->Count;
+ VNode = VNode->Next;
+ }
+ return VNode;
+}
+
+static uint32_t getValueProfDataSizeWrapper(void) {
+ return getValueProfDataSize(&RTRecordClosure);
+}
+
+static uint32_t getNumValueDataForSiteWrapper(uint32_t VK, uint32_t S) {
+ return getNumValueDataForSiteRT(&RTRecord, VK, S);
+}
+
+static VPDataReaderType TheVPDataReader = {
+ initializeValueProfRuntimeRecord, getValueProfRecordHeaderSize,
+ getFirstValueProfRecord, getNumValueDataForSiteWrapper,
+ getValueProfDataSizeWrapper, getNextNValueData};
+
+COMPILER_RT_VISIBILITY VPDataReaderType *lprofGetVPDataReader() {
+ return &TheVPDataReader;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingValue.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingWriter.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingWriter.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingWriter.c (revision 351984)
@@ -0,0 +1,286 @@
+/*===- InstrProfilingWriter.c - Write instrumentation to a file or buffer -===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifdef _MSC_VER
+/* For _alloca */
+#include <malloc.h>
+#endif
+#include <string.h>
+
+#include "InstrProfiling.h"
+#include "InstrProfilingInternal.h"
+
+#define INSTR_PROF_VALUE_PROF_DATA
+#include "InstrProfData.inc"
+
+COMPILER_RT_VISIBILITY void (*FreeHook)(void *) = NULL;
+static ProfBufferIO TheBufferIO;
+#define VP_BUFFER_SIZE 8 * 1024
+static uint8_t BufferIOBuffer[VP_BUFFER_SIZE];
+static InstrProfValueData VPDataArray[16];
+static uint32_t VPDataArraySize = sizeof(VPDataArray) / sizeof(*VPDataArray);
+
+COMPILER_RT_VISIBILITY uint8_t *DynamicBufferIOBuffer = 0;
+COMPILER_RT_VISIBILITY uint32_t VPBufferSize = 0;
+
+/* The buffer writer is reponsponsible in keeping writer state
+ * across the call.
+ */
+COMPILER_RT_VISIBILITY uint32_t lprofBufferWriter(ProfDataWriter *This,
+ ProfDataIOVec *IOVecs,
+ uint32_t NumIOVecs) {
+ uint32_t I;
+ char **Buffer = (char **)&This->WriterCtx;
+ for (I = 0; I < NumIOVecs; I++) {
+ size_t Length = IOVecs[I].ElmSize * IOVecs[I].NumElm;
+ if (IOVecs[I].Data)
+ memcpy(*Buffer, IOVecs[I].Data, Length);
+ *Buffer += Length;
+ }
+ return 0;
+}
+
+static void llvmInitBufferIO(ProfBufferIO *BufferIO, ProfDataWriter *FileWriter,
+ uint8_t *Buffer, uint32_t BufferSz) {
+ BufferIO->FileWriter = FileWriter;
+ BufferIO->OwnFileWriter = 0;
+ BufferIO->BufferStart = Buffer;
+ BufferIO->BufferSz = BufferSz;
+ BufferIO->CurOffset = 0;
+}
+
+COMPILER_RT_VISIBILITY ProfBufferIO *
+lprofCreateBufferIO(ProfDataWriter *FileWriter) {
+ uint8_t *Buffer = DynamicBufferIOBuffer;
+ uint32_t BufferSize = VPBufferSize;
+ if (!Buffer) {
+ Buffer = &BufferIOBuffer[0];
+ BufferSize = sizeof(BufferIOBuffer);
+ }
+ llvmInitBufferIO(&TheBufferIO, FileWriter, Buffer, BufferSize);
+ return &TheBufferIO;
+}
+
+COMPILER_RT_VISIBILITY void lprofDeleteBufferIO(ProfBufferIO *BufferIO) {
+ if (BufferIO->OwnFileWriter)
+ FreeHook(BufferIO->FileWriter);
+ if (DynamicBufferIOBuffer) {
+ FreeHook(DynamicBufferIOBuffer);
+ DynamicBufferIOBuffer = 0;
+ VPBufferSize = 0;
+ }
+}
+
+COMPILER_RT_VISIBILITY int
+lprofBufferIOWrite(ProfBufferIO *BufferIO, const uint8_t *Data, uint32_t Size) {
+ /* Buffer is not large enough, it is time to flush. */
+ if (Size + BufferIO->CurOffset > BufferIO->BufferSz) {
+ if (lprofBufferIOFlush(BufferIO) != 0)
+ return -1;
+ }
+ /* Special case, bypass the buffer completely. */
+ ProfDataIOVec IO[] = {{Data, sizeof(uint8_t), Size}};
+ if (Size > BufferIO->BufferSz) {
+ if (BufferIO->FileWriter->Write(BufferIO->FileWriter, IO, 1))
+ return -1;
+ } else {
+ /* Write the data to buffer */
+ uint8_t *Buffer = BufferIO->BufferStart + BufferIO->CurOffset;
+ ProfDataWriter BufferWriter;
+ initBufferWriter(&BufferWriter, (char *)Buffer);
+ lprofBufferWriter(&BufferWriter, IO, 1);
+ BufferIO->CurOffset =
+ (uint8_t *)BufferWriter.WriterCtx - BufferIO->BufferStart;
+ }
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY int lprofBufferIOFlush(ProfBufferIO *BufferIO) {
+ if (BufferIO->CurOffset) {
+ ProfDataIOVec IO[] = {
+ {BufferIO->BufferStart, sizeof(uint8_t), BufferIO->CurOffset}};
+ if (BufferIO->FileWriter->Write(BufferIO->FileWriter, IO, 1))
+ return -1;
+ BufferIO->CurOffset = 0;
+ }
+ return 0;
+}
+
+/* Write out value profile data for function specified with \c Data.
+ * The implementation does not use the method \c serializeValueProfData
+ * which depends on dynamic memory allocation. In this implementation,
+ * value profile data is written out to \c BufferIO piecemeal.
+ */
+static int writeOneValueProfData(ProfBufferIO *BufferIO,
+ VPDataReaderType *VPDataReader,
+ const __llvm_profile_data *Data) {
+ unsigned I, NumValueKinds = 0;
+ ValueProfData VPHeader;
+ uint8_t *SiteCountArray[IPVK_Last + 1];
+
+ for (I = 0; I <= IPVK_Last; I++) {
+ if (!Data->NumValueSites[I])
+ SiteCountArray[I] = 0;
+ else {
+ uint32_t Sz =
+ VPDataReader->GetValueProfRecordHeaderSize(Data->NumValueSites[I]) -
+ offsetof(ValueProfRecord, SiteCountArray);
+ /* Only use alloca for this small byte array to avoid excessive
+ * stack growth. */
+ SiteCountArray[I] = (uint8_t *)COMPILER_RT_ALLOCA(Sz);
+ memset(SiteCountArray[I], 0, Sz);
+ }
+ }
+
+ /* If NumValueKinds returned is 0, there is nothing to write, report
+ success and return. This should match the raw profile reader's behavior. */
+ if (!(NumValueKinds = VPDataReader->InitRTRecord(Data, SiteCountArray)))
+ return 0;
+
+ /* First write the header structure. */
+ VPHeader.TotalSize = VPDataReader->GetValueProfDataSize();
+ VPHeader.NumValueKinds = NumValueKinds;
+ if (lprofBufferIOWrite(BufferIO, (const uint8_t *)&VPHeader,
+ sizeof(ValueProfData)))
+ return -1;
+
+ /* Make sure nothing else needs to be written before value profile
+ * records. */
+ if ((void *)VPDataReader->GetFirstValueProfRecord(&VPHeader) !=
+ (void *)(&VPHeader + 1))
+ return -1;
+
+ /* Write out the value profile record for each value kind
+ * one by one. */
+ for (I = 0; I <= IPVK_Last; I++) {
+ uint32_t J;
+ ValueProfRecord RecordHeader;
+ /* The size of the value prof record header without counting the
+ * site count array .*/
+ uint32_t RecordHeaderSize = offsetof(ValueProfRecord, SiteCountArray);
+ uint32_t SiteCountArraySize;
+
+ if (!Data->NumValueSites[I])
+ continue;
+
+ /* Write out the record header. */
+ RecordHeader.Kind = I;
+ RecordHeader.NumValueSites = Data->NumValueSites[I];
+ if (lprofBufferIOWrite(BufferIO, (const uint8_t *)&RecordHeader,
+ RecordHeaderSize))
+ return -1;
+
+ /* Write out the site value count array including padding space. */
+ SiteCountArraySize =
+ VPDataReader->GetValueProfRecordHeaderSize(Data->NumValueSites[I]) -
+ RecordHeaderSize;
+ if (lprofBufferIOWrite(BufferIO, SiteCountArray[I], SiteCountArraySize))
+ return -1;
+
+ /* Write out the value profile data for each value site. */
+ for (J = 0; J < Data->NumValueSites[I]; J++) {
+ uint32_t NRead, NRemain;
+ ValueProfNode *NextStartNode = 0;
+ NRemain = VPDataReader->GetNumValueDataForSite(I, J);
+ if (!NRemain)
+ continue;
+ /* Read and write out value data in small chunks till it is done. */
+ do {
+ NRead = (NRemain > VPDataArraySize ? VPDataArraySize : NRemain);
+ NextStartNode =
+ VPDataReader->GetValueData(I, /* ValueKind */
+ J, /* Site */
+ &VPDataArray[0], NextStartNode, NRead);
+ if (lprofBufferIOWrite(BufferIO, (const uint8_t *)&VPDataArray[0],
+ NRead * sizeof(InstrProfValueData)))
+ return -1;
+ NRemain -= NRead;
+ } while (NRemain != 0);
+ }
+ }
+ /* All done report success. */
+ return 0;
+}
+
+static int writeValueProfData(ProfDataWriter *Writer,
+ VPDataReaderType *VPDataReader,
+ const __llvm_profile_data *DataBegin,
+ const __llvm_profile_data *DataEnd) {
+ ProfBufferIO *BufferIO;
+ const __llvm_profile_data *DI = 0;
+
+ if (!VPDataReader)
+ return 0;
+
+ BufferIO = lprofCreateBufferIO(Writer);
+
+ for (DI = DataBegin; DI < DataEnd; DI++) {
+ if (writeOneValueProfData(BufferIO, VPDataReader, DI))
+ return -1;
+ }
+
+ if (lprofBufferIOFlush(BufferIO) != 0)
+ return -1;
+ lprofDeleteBufferIO(BufferIO);
+
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY int lprofWriteData(ProfDataWriter *Writer,
+ VPDataReaderType *VPDataReader,
+ int SkipNameDataWrite) {
+ /* Match logic in __llvm_profile_write_buffer(). */
+ const __llvm_profile_data *DataBegin = __llvm_profile_begin_data();
+ const __llvm_profile_data *DataEnd = __llvm_profile_end_data();
+ const uint64_t *CountersBegin = __llvm_profile_begin_counters();
+ const uint64_t *CountersEnd = __llvm_profile_end_counters();
+ const char *NamesBegin = __llvm_profile_begin_names();
+ const char *NamesEnd = __llvm_profile_end_names();
+ return lprofWriteDataImpl(Writer, DataBegin, DataEnd, CountersBegin,
+ CountersEnd, VPDataReader, NamesBegin, NamesEnd,
+ SkipNameDataWrite);
+}
+
+COMPILER_RT_VISIBILITY int
+lprofWriteDataImpl(ProfDataWriter *Writer, const __llvm_profile_data *DataBegin,
+ const __llvm_profile_data *DataEnd,
+ const uint64_t *CountersBegin, const uint64_t *CountersEnd,
+ VPDataReaderType *VPDataReader, const char *NamesBegin,
+ const char *NamesEnd, int SkipNameDataWrite) {
+
+ /* Calculate size of sections. */
+ const uint64_t DataSize = __llvm_profile_get_data_size(DataBegin, DataEnd);
+ const uint64_t CountersSize = CountersEnd - CountersBegin;
+ const uint64_t NamesSize = NamesEnd - NamesBegin;
+ const uint64_t Padding = __llvm_profile_get_num_padding_bytes(NamesSize);
+
+ /* Enough zeroes for padding. */
+ const char Zeroes[sizeof(uint64_t)] = {0};
+
+ /* Create the header. */
+ __llvm_profile_header Header;
+
+ if (!DataSize)
+ return 0;
+
+/* Initialize header structure. */
+#define INSTR_PROF_RAW_HEADER(Type, Name, Init) Header.Name = Init;
+#include "InstrProfData.inc"
+
+ /* Write the data. */
+ ProfDataIOVec IOVec[] = {
+ {&Header, sizeof(__llvm_profile_header), 1},
+ {DataBegin, sizeof(__llvm_profile_data), DataSize},
+ {CountersBegin, sizeof(uint64_t), CountersSize},
+ {SkipNameDataWrite ? NULL : NamesBegin, sizeof(uint8_t), NamesSize},
+ {Zeroes, sizeof(uint8_t), Padding}};
+ if (Writer->Write(Writer, IOVec, sizeof(IOVec) / sizeof(*IOVec)))
+ return -1;
+
+ return writeValueProfData(Writer, VPDataReader, DataBegin, DataEnd);
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/InstrProfilingWriter.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/WindowsMMap.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/WindowsMMap.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/WindowsMMap.h (revision 351984)
@@ -0,0 +1,66 @@
+/*===- WindowsMMap.h - Support library for PGO instrumentation ------------===*\
+|*
+|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+|* See https://llvm.org/LICENSE.txt for license information.
+|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+|*
+\*===----------------------------------------------------------------------===*/
+
+#ifndef PROFILE_INSTRPROFILING_WINDOWS_MMAP_H
+#define PROFILE_INSTRPROFILING_WINDOWS_MMAP_H
+
+#if defined(_WIN32)
+
+#include <basetsd.h>
+#include <io.h>
+#include <sys/types.h>
+
+/*
+ * mmap() flags
+ */
+#define PROT_READ 0x1
+#define PROT_WRITE 0x2
+#define PROT_EXEC 0x0
+
+#define MAP_FILE 0x00
+#define MAP_SHARED 0x01
+#define MAP_PRIVATE 0x02
+#define MAP_ANONYMOUS 0x20
+#define MAP_ANON MAP_ANONYMOUS
+#define MAP_FAILED ((void *) -1)
+
+/*
+ * msync() flags
+ */
+#define MS_ASYNC 0x0001 /* return immediately */
+#define MS_INVALIDATE 0x0002 /* invalidate all cached data */
+#define MS_SYNC 0x0010 /* msync synchronously */
+
+/*
+ * flock() operations
+ */
+#define LOCK_SH 1 /* shared lock */
+#define LOCK_EX 2 /* exclusive lock */
+#define LOCK_NB 4 /* don't block when locking */
+#define LOCK_UN 8 /* unlock */
+
+#ifdef __USE_FILE_OFFSET64
+# define DWORD_HI(x) (x >> 32)
+# define DWORD_LO(x) ((x) & 0xffffffff)
+#else
+# define DWORD_HI(x) (0)
+# define DWORD_LO(x) (x)
+#endif
+
+void *mmap(void *start, size_t length, int prot, int flags, int fd,
+ off_t offset);
+
+void munmap(void *addr, size_t length);
+
+int msync(void *addr, size_t length, int flags);
+
+int flock(int fd, int operation);
+
+#endif /* _WIN32 */
+
+#endif /* PROFILE_INSTRPROFILING_WINDOWS_MMAP_H */
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/WindowsMMap.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/WindowsMMap.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/WindowsMMap.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/WindowsMMap.c (revision 351984)
@@ -0,0 +1,176 @@
+/*
+ * This code is derived from uClibc (original license follows).
+ * https://git.uclibc.org/uClibc/tree/utils/mmap-windows.c
+ */
+ /* mmap() replacement for Windows
+ *
+ * Author: Mike Frysinger <vapier@gentoo.org>
+ * Placed into the public domain
+ */
+
+/* References:
+ * CreateFileMapping: http://msdn.microsoft.com/en-us/library/aa366537(VS.85).aspx
+ * CloseHandle: http://msdn.microsoft.com/en-us/library/ms724211(VS.85).aspx
+ * MapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366761(VS.85).aspx
+ * UnmapViewOfFile: http://msdn.microsoft.com/en-us/library/aa366882(VS.85).aspx
+ */
+
+#if defined(_WIN32)
+
+#include "WindowsMMap.h"
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+
+#include "InstrProfiling.h"
+
+COMPILER_RT_VISIBILITY
+void *mmap(void *start, size_t length, int prot, int flags, int fd, off_t offset)
+{
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
+ return MAP_FAILED;
+ if (fd == -1) {
+ if (!(flags & MAP_ANON) || offset)
+ return MAP_FAILED;
+ } else if (flags & MAP_ANON)
+ return MAP_FAILED;
+
+ DWORD flProtect;
+ if (prot & PROT_WRITE) {
+ if (prot & PROT_EXEC)
+ flProtect = PAGE_EXECUTE_READWRITE;
+ else
+ flProtect = PAGE_READWRITE;
+ } else if (prot & PROT_EXEC) {
+ if (prot & PROT_READ)
+ flProtect = PAGE_EXECUTE_READ;
+ else if (prot & PROT_EXEC)
+ flProtect = PAGE_EXECUTE;
+ } else
+ flProtect = PAGE_READONLY;
+
+ off_t end = length + offset;
+ HANDLE mmap_fd, h;
+ if (fd == -1)
+ mmap_fd = INVALID_HANDLE_VALUE;
+ else
+ mmap_fd = (HANDLE)_get_osfhandle(fd);
+ h = CreateFileMapping(mmap_fd, NULL, flProtect, DWORD_HI(end), DWORD_LO(end), NULL);
+ if (h == NULL)
+ return MAP_FAILED;
+
+ DWORD dwDesiredAccess;
+ if (prot & PROT_WRITE)
+ dwDesiredAccess = FILE_MAP_WRITE;
+ else
+ dwDesiredAccess = FILE_MAP_READ;
+ if (prot & PROT_EXEC)
+ dwDesiredAccess |= FILE_MAP_EXECUTE;
+ if (flags & MAP_PRIVATE)
+ dwDesiredAccess |= FILE_MAP_COPY;
+ void *ret = MapViewOfFile(h, dwDesiredAccess, DWORD_HI(offset), DWORD_LO(offset), length);
+ if (ret == NULL) {
+ CloseHandle(h);
+ ret = MAP_FAILED;
+ }
+ return ret;
+}
+
+COMPILER_RT_VISIBILITY
+void munmap(void *addr, size_t length)
+{
+ UnmapViewOfFile(addr);
+ /* ruh-ro, we leaked handle from CreateFileMapping() ... */
+}
+
+COMPILER_RT_VISIBILITY
+int msync(void *addr, size_t length, int flags)
+{
+ if (flags & MS_INVALIDATE)
+ return -1; /* Not supported. */
+
+ /* Exactly one of MS_ASYNC or MS_SYNC must be specified. */
+ switch (flags & (MS_ASYNC | MS_SYNC)) {
+ case MS_SYNC:
+ case MS_ASYNC:
+ break;
+ default:
+ return -1;
+ }
+
+ if (!FlushViewOfFile(addr, length))
+ return -1;
+
+ if (flags & MS_SYNC) {
+ /* FIXME: No longer have access to handle from CreateFileMapping(). */
+ /*
+ * if (!FlushFileBuffers(h))
+ * return -1;
+ */
+ }
+
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY
+int lock(HANDLE handle, DWORD lockType, BOOL blocking) {
+ DWORD flags = lockType;
+ if (!blocking)
+ flags |= LOCKFILE_FAIL_IMMEDIATELY;
+
+ OVERLAPPED overlapped;
+ ZeroMemory(&overlapped, sizeof(OVERLAPPED));
+ overlapped.hEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
+ BOOL result = LockFileEx(handle, flags, 0, MAXDWORD, MAXDWORD, &overlapped);
+ if (!result) {
+ DWORD dw = GetLastError();
+
+ // In non-blocking mode, return an error if the file is locked.
+ if (!blocking && dw == ERROR_LOCK_VIOLATION)
+ return -1; // EWOULDBLOCK
+
+ // If the error is ERROR_IO_PENDING, we need to wait until the operation
+ // finishes. Otherwise, we return an error.
+ if (dw != ERROR_IO_PENDING)
+ return -1;
+
+ DWORD dwNumBytes;
+ if (!GetOverlappedResult(handle, &overlapped, &dwNumBytes, TRUE))
+ return -1;
+ }
+
+ return 0;
+}
+
+COMPILER_RT_VISIBILITY
+int flock(int fd, int operation) {
+ HANDLE handle = (HANDLE)_get_osfhandle(fd);
+ if (handle == INVALID_HANDLE_VALUE)
+ return -1;
+
+ BOOL blocking = (operation & LOCK_NB) == 0;
+ int op = operation & ~LOCK_NB;
+
+ switch (op) {
+ case LOCK_EX:
+ return lock(handle, LOCKFILE_EXCLUSIVE_LOCK, blocking);
+
+ case LOCK_SH:
+ return lock(handle, 0, blocking);
+
+ case LOCK_UN:
+ if (!UnlockFile(handle, 0, 0, MAXDWORD, MAXDWORD))
+ return -1;
+ break;
+
+ default:
+ return -1;
+ }
+
+ return 0;
+}
+
+#undef DWORD_HI
+#undef DWORD_LO
+
+#endif /* _WIN32 */
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/profile/WindowsMMap.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack.cc (revision 351984)
@@ -0,0 +1,310 @@
+//===-- safestack.cc ------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements the runtime support for the safe stack protection
+// mechanism. The runtime manages allocation/deallocation of the unsafe stack
+// for the main thread, as well as all pthreads that are created/destroyed
+// during program execution.
+//
+//===----------------------------------------------------------------------===//
+
+#include "safestack_platform.h"
+#include "safestack_util.h"
+
+#include <errno.h>
+#include <sys/resource.h>
+
+#include "interception/interception.h"
+
+using namespace safestack;
+
+// TODO: To make accessing the unsafe stack pointer faster, we plan to
+// eventually store it directly in the thread control block data structure on
+// platforms where this structure is pointed to by %fs or %gs. This is exactly
+// the same mechanism as currently being used by the traditional stack
+// protector pass to store the stack guard (see getStackCookieLocation()
+// function above). Doing so requires changing the tcbhead_t struct in glibc
+// on Linux and tcb struct in libc on FreeBSD.
+//
+// For now, store it in a thread-local variable.
+extern "C" {
+__attribute__((visibility(
+ "default"))) __thread void *__safestack_unsafe_stack_ptr = nullptr;
+}
+
+namespace {
+
+// TODO: The runtime library does not currently protect the safe stack beyond
+// relying on the system-enforced ASLR. The protection of the (safe) stack can
+// be provided by three alternative features:
+//
+// 1) Protection via hardware segmentation on x86-32 and some x86-64
+// architectures: the (safe) stack segment (implicitly accessed via the %ss
+// segment register) can be separated from the data segment (implicitly
+// accessed via the %ds segment register). Dereferencing a pointer to the safe
+// segment would result in a segmentation fault.
+//
+// 2) Protection via software fault isolation: memory writes that are not meant
+// to access the safe stack can be prevented from doing so through runtime
+// instrumentation. One way to do it is to allocate the safe stack(s) in the
+// upper half of the userspace and bitmask the corresponding upper bit of the
+// memory addresses of memory writes that are not meant to access the safe
+// stack.
+//
+// 3) Protection via information hiding on 64 bit architectures: the location
+// of the safe stack(s) can be randomized through secure mechanisms, and the
+// leakage of the stack pointer can be prevented. Currently, libc can leak the
+// stack pointer in several ways (e.g. in longjmp, signal handling, user-level
+// context switching related functions, etc.). These can be fixed in libc and
+// in other low-level libraries, by either eliminating the escaping/dumping of
+// the stack pointer (i.e., %rsp) when that's possible, or by using
+// encryption/PTR_MANGLE (XOR-ing the dumped stack pointer with another secret
+// we control and protect better, as is already done for setjmp in glibc.)
+// Furthermore, a static machine code level verifier can be ran after code
+// generation to make sure that the stack pointer is never written to memory,
+// or if it is, its written on the safe stack.
+//
+// Finally, while the Unsafe Stack pointer is currently stored in a thread
+// local variable, with libc support it could be stored in the TCB (thread
+// control block) as well, eliminating another level of indirection and making
+// such accesses faster. Alternatively, dedicating a separate register for
+// storing it would also be possible.
+
+/// Minimum stack alignment for the unsafe stack.
+const unsigned kStackAlign = 16;
+
+/// Default size of the unsafe stack. This value is only used if the stack
+/// size rlimit is set to infinity.
+const unsigned kDefaultUnsafeStackSize = 0x2800000;
+
+// Per-thread unsafe stack information. It's not frequently accessed, so there
+// it can be kept out of the tcb in normal thread-local variables.
+__thread void *unsafe_stack_start = nullptr;
+__thread size_t unsafe_stack_size = 0;
+__thread size_t unsafe_stack_guard = 0;
+
+inline void *unsafe_stack_alloc(size_t size, size_t guard) {
+ SFS_CHECK(size + guard >= size);
+ void *addr = Mmap(nullptr, size + guard, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANON, -1, 0);
+ SFS_CHECK(MAP_FAILED != addr);
+ Mprotect(addr, guard, PROT_NONE);
+ return (char *)addr + guard;
+}
+
+inline void unsafe_stack_setup(void *start, size_t size, size_t guard) {
+ SFS_CHECK((char *)start + size >= (char *)start);
+ SFS_CHECK((char *)start + guard >= (char *)start);
+ void *stack_ptr = (char *)start + size;
+ SFS_CHECK((((size_t)stack_ptr) & (kStackAlign - 1)) == 0);
+
+ __safestack_unsafe_stack_ptr = stack_ptr;
+ unsafe_stack_start = start;
+ unsafe_stack_size = size;
+ unsafe_stack_guard = guard;
+}
+
+/// Thread data for the cleanup handler
+pthread_key_t thread_cleanup_key;
+
+/// Safe stack per-thread information passed to the thread_start function
+struct tinfo {
+ void *(*start_routine)(void *);
+ void *start_routine_arg;
+
+ void *unsafe_stack_start;
+ size_t unsafe_stack_size;
+ size_t unsafe_stack_guard;
+};
+
+/// Wrap the thread function in order to deallocate the unsafe stack when the
+/// thread terminates by returning from its main function.
+void *thread_start(void *arg) {
+ struct tinfo *tinfo = (struct tinfo *)arg;
+
+ void *(*start_routine)(void *) = tinfo->start_routine;
+ void *start_routine_arg = tinfo->start_routine_arg;
+
+ // Setup the unsafe stack; this will destroy tinfo content
+ unsafe_stack_setup(tinfo->unsafe_stack_start, tinfo->unsafe_stack_size,
+ tinfo->unsafe_stack_guard);
+
+ // Make sure out thread-specific destructor will be called
+ pthread_setspecific(thread_cleanup_key, (void *)1);
+
+ return start_routine(start_routine_arg);
+}
+
+/// Linked list used to store exiting threads stack/thread information.
+struct thread_stack_ll {
+ struct thread_stack_ll *next;
+ void *stack_base;
+ size_t size;
+ pid_t pid;
+ ThreadId tid;
+};
+
+/// Linked list of unsafe stacks for threads that are exiting. We delay
+/// unmapping them until the thread exits.
+thread_stack_ll *thread_stacks = nullptr;
+pthread_mutex_t thread_stacks_mutex = PTHREAD_MUTEX_INITIALIZER;
+
+/// Thread-specific data destructor. We want to free the unsafe stack only after
+/// this thread is terminated. libc can call functions in safestack-instrumented
+/// code (like free) after thread-specific data destructors have run.
+void thread_cleanup_handler(void *_iter) {
+ SFS_CHECK(unsafe_stack_start != nullptr);
+ pthread_setspecific(thread_cleanup_key, NULL);
+
+ pthread_mutex_lock(&thread_stacks_mutex);
+ // Temporary list to hold the previous threads stacks so we don't hold the
+ // thread_stacks_mutex for long.
+ thread_stack_ll *temp_stacks = thread_stacks;
+ thread_stacks = nullptr;
+ pthread_mutex_unlock(&thread_stacks_mutex);
+
+ pid_t pid = getpid();
+ ThreadId tid = GetTid();
+
+ // Free stacks for dead threads
+ thread_stack_ll **stackp = &temp_stacks;
+ while (*stackp) {
+ thread_stack_ll *stack = *stackp;
+ if (stack->pid != pid ||
+ (-1 == TgKill(stack->pid, stack->tid, 0) && errno == ESRCH)) {
+ Munmap(stack->stack_base, stack->size);
+ *stackp = stack->next;
+ free(stack);
+ } else
+ stackp = &stack->next;
+ }
+
+ thread_stack_ll *cur_stack =
+ (thread_stack_ll *)malloc(sizeof(thread_stack_ll));
+ cur_stack->stack_base = (char *)unsafe_stack_start - unsafe_stack_guard;
+ cur_stack->size = unsafe_stack_size + unsafe_stack_guard;
+ cur_stack->pid = pid;
+ cur_stack->tid = tid;
+
+ pthread_mutex_lock(&thread_stacks_mutex);
+ // Merge thread_stacks with the current thread's stack and any remaining
+ // temp_stacks
+ *stackp = thread_stacks;
+ cur_stack->next = temp_stacks;
+ thread_stacks = cur_stack;
+ pthread_mutex_unlock(&thread_stacks_mutex);
+
+ unsafe_stack_start = nullptr;
+}
+
+void EnsureInterceptorsInitialized();
+
+/// Intercept thread creation operation to allocate and setup the unsafe stack
+INTERCEPTOR(int, pthread_create, pthread_t *thread,
+ const pthread_attr_t *attr,
+ void *(*start_routine)(void*), void *arg) {
+ EnsureInterceptorsInitialized();
+ size_t size = 0;
+ size_t guard = 0;
+
+ if (attr) {
+ pthread_attr_getstacksize(attr, &size);
+ pthread_attr_getguardsize(attr, &guard);
+ } else {
+ // get pthread default stack size
+ pthread_attr_t tmpattr;
+ pthread_attr_init(&tmpattr);
+ pthread_attr_getstacksize(&tmpattr, &size);
+ pthread_attr_getguardsize(&tmpattr, &guard);
+ pthread_attr_destroy(&tmpattr);
+ }
+
+ SFS_CHECK(size);
+ size = RoundUpTo(size, kStackAlign);
+
+ void *addr = unsafe_stack_alloc(size, guard);
+ // Put tinfo at the end of the buffer. guard may be not page aligned.
+ // If that is so then some bytes after addr can be mprotected.
+ struct tinfo *tinfo =
+ (struct tinfo *)(((char *)addr) + size - sizeof(struct tinfo));
+ tinfo->start_routine = start_routine;
+ tinfo->start_routine_arg = arg;
+ tinfo->unsafe_stack_start = addr;
+ tinfo->unsafe_stack_size = size;
+ tinfo->unsafe_stack_guard = guard;
+
+ return REAL(pthread_create)(thread, attr, thread_start, tinfo);
+}
+
+pthread_mutex_t interceptor_init_mutex = PTHREAD_MUTEX_INITIALIZER;
+bool interceptors_inited = false;
+
+void EnsureInterceptorsInitialized() {
+ MutexLock lock(interceptor_init_mutex);
+ if (interceptors_inited)
+ return;
+
+ // Initialize pthread interceptors for thread allocation
+ INTERCEPT_FUNCTION(pthread_create);
+
+ interceptors_inited = true;
+}
+
+} // namespace
+
+extern "C" __attribute__((visibility("default")))
+#if !SANITIZER_CAN_USE_PREINIT_ARRAY
+// On ELF platforms, the constructor is invoked using .preinit_array (see below)
+__attribute__((constructor(0)))
+#endif
+void __safestack_init() {
+ // Determine the stack size for the main thread.
+ size_t size = kDefaultUnsafeStackSize;
+ size_t guard = 4096;
+
+ struct rlimit limit;
+ if (getrlimit(RLIMIT_STACK, &limit) == 0 && limit.rlim_cur != RLIM_INFINITY)
+ size = limit.rlim_cur;
+
+ // Allocate unsafe stack for main thread
+ void *addr = unsafe_stack_alloc(size, guard);
+ unsafe_stack_setup(addr, size, guard);
+
+ // Setup the cleanup handler
+ pthread_key_create(&thread_cleanup_key, thread_cleanup_handler);
+}
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+// On ELF platforms, run safestack initialization before any other constructors.
+// On other platforms we use the constructor attribute to arrange to run our
+// initialization early.
+extern "C" {
+__attribute__((section(".preinit_array"),
+ used)) void (*__safestack_preinit)(void) = __safestack_init;
+}
+#endif
+
+extern "C"
+ __attribute__((visibility("default"))) void *__get_unsafe_stack_bottom() {
+ return unsafe_stack_start;
+}
+
+extern "C"
+ __attribute__((visibility("default"))) void *__get_unsafe_stack_top() {
+ return (char*)unsafe_stack_start + unsafe_stack_size;
+}
+
+extern "C"
+ __attribute__((visibility("default"))) void *__get_unsafe_stack_start() {
+ return unsafe_stack_start;
+}
+
+extern "C"
+ __attribute__((visibility("default"))) void *__get_unsafe_stack_ptr() {
+ return __safestack_unsafe_stack_ptr;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack_platform.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack_platform.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack_platform.h (revision 351984)
@@ -0,0 +1,124 @@
+//===-- safestack_platform.h ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file implements platform specific parts of SafeStack runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SAFESTACK_PLATFORM_H
+#define SAFESTACK_PLATFORM_H
+
+#include "safestack_util.h"
+#include "sanitizer_common/sanitizer_platform.h"
+
+#include <dlfcn.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <sys/syscall.h>
+#include <sys/types.h>
+#include <unistd.h>
+
+#if !(SANITIZER_NETBSD || SANITIZER_FREEBSD || SANITIZER_LINUX)
+#error "Support for your platform has not been implemented"
+#endif
+
+#if SANITIZER_NETBSD
+#include <lwp.h>
+
+extern "C" void *__mmap(void *, size_t, int, int, int, int, off_t);
+#endif
+
+#if SANITIZER_FREEBSD
+#include <sys/thr.h>
+#endif
+
+namespace safestack {
+
+#if SANITIZER_NETBSD
+static void *GetRealLibcAddress(const char *symbol) {
+ void *real = dlsym(RTLD_NEXT, symbol);
+ if (!real)
+ real = dlsym(RTLD_DEFAULT, symbol);
+ if (!real) {
+ fprintf(stderr, "safestack GetRealLibcAddress failed for symbol=%s",
+ symbol);
+ abort();
+ }
+ return real;
+}
+
+#define _REAL(func, ...) real##_##func(__VA_ARGS__)
+#define DEFINE__REAL(ret_type, func, ...) \
+ static ret_type (*real_##func)(__VA_ARGS__) = NULL; \
+ if (!real_##func) { \
+ real_##func = (ret_type(*)(__VA_ARGS__))GetRealLibcAddress(#func); \
+ } \
+ SFS_CHECK(real_##func);
+#endif
+
+using ThreadId = uint64_t;
+
+inline ThreadId GetTid() {
+#if SANITIZER_NETBSD
+ DEFINE__REAL(int, _lwp_self);
+ return _REAL(_lwp_self);
+#elif SANITIZER_FREEBSD
+ long Tid;
+ thr_self(&Tid);
+ return Tid;
+#else
+ return syscall(SYS_gettid);
+#endif
+}
+
+inline int TgKill(pid_t pid, ThreadId tid, int sig) {
+#if SANITIZER_NETBSD
+ DEFINE__REAL(int, _lwp_kill, int a, int b);
+ (void)pid;
+ return _REAL(_lwp_kill, tid, sig);
+#elif SANITIZER_FREEBSD
+ return syscall(SYS_thr_kill2, pid, tid, sig);
+#else
+ return syscall(SYS_tgkill, pid, tid, sig);
+#endif
+}
+
+inline void *Mmap(void *addr, size_t length, int prot, int flags, int fd,
+ off_t offset) {
+#if SANITIZER_NETBSD
+ return __mmap(addr, length, prot, flags, fd, 0, offset);
+#elif defined(__x86_64__) && (SANITIZER_FREEBSD)
+ return (void *)__syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
+#else
+ return (void *)syscall(SYS_mmap, addr, length, prot, flags, fd, offset);
+#endif
+}
+
+inline int Munmap(void *addr, size_t length) {
+#if SANITIZER_NETBSD
+ DEFINE__REAL(int, munmap, void *a, size_t b);
+ return _REAL(munmap, addr, length);
+#else
+ return syscall(SYS_munmap, addr, length);
+#endif
+}
+
+inline int Mprotect(void *addr, size_t length, int prot) {
+#if SANITIZER_NETBSD
+ DEFINE__REAL(int, mprotect, void *a, size_t b, int c);
+ return _REAL(mprotect, addr, length, prot);
+#else
+ return syscall(SYS_mprotect, addr, length, prot);
+#endif
+}
+
+} // namespace safestack
+
+#endif // SAFESTACK_PLATFORM_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack_util.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack_util.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/safestack/safestack_util.h (revision 351984)
@@ -0,0 +1,49 @@
+//===-- safestack_util.h --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file contains utility code for SafeStack implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SAFESTACK_UTIL_H
+#define SAFESTACK_UTIL_H
+
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+namespace safestack {
+
+#define SFS_CHECK(a) \
+ do { \
+ if (!(a)) { \
+ fprintf(stderr, "safestack CHECK failed: %s:%d %s\n", __FILE__, \
+ __LINE__, #a); \
+ abort(); \
+ }; \
+ } while (false)
+
+inline size_t RoundUpTo(size_t size, size_t boundary) {
+ SFS_CHECK((boundary & (boundary - 1)) == 0);
+ return (size + boundary - 1) & ~(boundary - 1);
+}
+
+class MutexLock {
+ public:
+ explicit MutexLock(pthread_mutex_t &mutex) : mutex_(&mutex) {
+ pthread_mutex_lock(mutex_);
+ }
+ ~MutexLock() { pthread_mutex_unlock(mutex_); }
+
+ private:
+ pthread_mutex_t *mutex_ = nullptr;
+};
+
+} // namespace safestack
+
+#endif // SAFESTACK_UTIL_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator.cpp (revision 351984)
@@ -0,0 +1,820 @@
+//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Hardened Allocator implementation.
+/// It uses the sanitizer_common allocator as a base and aims at mitigating
+/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
+/// header, a delayed free list, and additional sanity checks.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+#include "scudo_crc32.h"
+#include "scudo_errors.h"
+#include "scudo_flags.h"
+#include "scudo_interface_internal.h"
+#include "scudo_tsd.h"
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+
+#ifdef GWP_ASAN_HOOKS
+# include "gwp_asan/guarded_pool_allocator.h"
+# include "gwp_asan/optional/backtrace.h"
+# include "gwp_asan/optional/options_parser.h"
+#endif // GWP_ASAN_HOOKS
+
+#include <errno.h>
+#include <string.h>
+
+namespace __scudo {
+
+// Global static cookie, initialized at start-up.
+static u32 Cookie;
+
+// We default to software CRC32 if the alternatives are not supported, either
+// at compilation or at runtime.
+static atomic_uint8_t HashAlgorithm = { CRC32Software };
+
+INLINE u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
+ // If the hardware CRC32 feature is defined here, it was enabled everywhere,
+ // as opposed to only for scudo_crc32.cpp. This means that other hardware
+ // specific instructions were likely emitted at other places, and as a
+ // result there is no reason to not use it here.
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+ Crc = CRC32_INTRINSIC(Crc, Value);
+ for (uptr i = 0; i < ArraySize; i++)
+ Crc = CRC32_INTRINSIC(Crc, Array[i]);
+ return Crc;
+#else
+ if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
+ Crc = computeHardwareCRC32(Crc, Value);
+ for (uptr i = 0; i < ArraySize; i++)
+ Crc = computeHardwareCRC32(Crc, Array[i]);
+ return Crc;
+ }
+ Crc = computeSoftwareCRC32(Crc, Value);
+ for (uptr i = 0; i < ArraySize; i++)
+ Crc = computeSoftwareCRC32(Crc, Array[i]);
+ return Crc;
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+}
+
+static BackendT &getBackend();
+
+namespace Chunk {
+ static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+ return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
+ getHeaderSize());
+ }
+ static INLINE
+ const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
+ return reinterpret_cast<const AtomicPackedHeader *>(
+ reinterpret_cast<uptr>(Ptr) - getHeaderSize());
+ }
+
+ static INLINE bool isAligned(const void *Ptr) {
+ return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
+ }
+
+ // We can't use the offset member of the chunk itself, as we would double
+ // fetch it without any warranty that it wouldn't have been tampered. To
+ // prevent this, we work with a local copy of the header.
+ static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
+ return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+ getHeaderSize() - (Header->Offset << MinAlignmentLog));
+ }
+
+ // Returns the usable size for a chunk, meaning the amount of bytes from the
+ // beginning of the user data to the end of the backend allocated chunk.
+ static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
+ const uptr ClassId = Header->ClassId;
+ if (ClassId)
+ return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
+ (Header->Offset << MinAlignmentLog);
+ return SecondaryT::GetActuallyAllocatedSize(
+ getBackendPtr(Ptr, Header)) - getHeaderSize();
+ }
+
+ // Returns the size the user requested when allocating the chunk.
+ static INLINE uptr getSize(const void *Ptr, UnpackedHeader *Header) {
+ const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
+ if (Header->ClassId)
+ return SizeOrUnusedBytes;
+ return SecondaryT::GetActuallyAllocatedSize(
+ getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
+ }
+
+ // Compute the checksum of the chunk pointer and its header.
+ static INLINE u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
+ UnpackedHeader ZeroChecksumHeader = *Header;
+ ZeroChecksumHeader.Checksum = 0;
+ uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
+ memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
+ const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
+ HeaderHolder, ARRAY_SIZE(HeaderHolder));
+ return static_cast<u16>(Crc);
+ }
+
+ // Checks the validity of a chunk by verifying its checksum. It doesn't
+ // incur termination in the event of an invalid chunk.
+ static INLINE bool isValid(const void *Ptr) {
+ PackedHeader NewPackedHeader =
+ atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ UnpackedHeader NewUnpackedHeader =
+ bit_cast<UnpackedHeader>(NewPackedHeader);
+ return (NewUnpackedHeader.Checksum ==
+ computeChecksum(Ptr, &NewUnpackedHeader));
+ }
+
+ // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
+ // for a fully nulled out header, its state will be available anyway.
+ COMPILER_CHECK(ChunkAvailable == 0);
+
+ // Loads and unpacks the header, verifying the checksum in the process.
+ static INLINE
+ void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader =
+ atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ if (UNLIKELY(NewUnpackedHeader->Checksum !=
+ computeChecksum(Ptr, NewUnpackedHeader)))
+ dieWithMessage("corrupted chunk header at address %p\n", Ptr);
+ }
+
+ // Packs and stores the header, computing the checksum in the process.
+ static INLINE void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+ NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
+ }
+
+ // Packs and stores the header, computing the checksum in the process. We
+ // compare the current header with the expected provided one to ensure that
+ // we are not being raced by a corruption occurring in another thread.
+ static INLINE void compareExchangeHeader(void *Ptr,
+ UnpackedHeader *NewUnpackedHeader,
+ UnpackedHeader *OldUnpackedHeader) {
+ NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
+ if (UNLIKELY(!atomic_compare_exchange_strong(
+ getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
+ memory_order_relaxed)))
+ dieWithMessage("race on chunk header at address %p\n", Ptr);
+ }
+} // namespace Chunk
+
+struct QuarantineCallback {
+ explicit QuarantineCallback(AllocatorCacheT *Cache)
+ : Cache_(Cache) {}
+
+ // Chunk recycling function, returns a quarantined chunk to the backend,
+ // first making sure it hasn't been tampered with.
+ void Recycle(void *Ptr) {
+ UnpackedHeader Header;
+ Chunk::loadHeader(Ptr, &Header);
+ if (UNLIKELY(Header.State != ChunkQuarantine))
+ dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
+ UnpackedHeader NewHeader = Header;
+ NewHeader.State = ChunkAvailable;
+ Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
+ void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
+ if (Header.ClassId)
+ getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
+ else
+ getBackend().deallocateSecondary(BackendPtr);
+ }
+
+ // Internal quarantine allocation and deallocation functions. We first check
+ // that the batches are indeed serviced by the Primary.
+ // TODO(kostyak): figure out the best way to protect the batches.
+ void *Allocate(uptr Size) {
+ const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
+ return getBackend().allocatePrimary(Cache_, BatchClassId);
+ }
+
+ void Deallocate(void *Ptr) {
+ const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
+ getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
+ }
+
+ AllocatorCacheT *Cache_;
+ COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
+};
+
+typedef Quarantine<QuarantineCallback, void> QuarantineT;
+typedef QuarantineT::Cache QuarantineCacheT;
+COMPILER_CHECK(sizeof(QuarantineCacheT) <=
+ sizeof(ScudoTSD::QuarantineCachePlaceHolder));
+
+QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
+ return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
+}
+
+#ifdef GWP_ASAN_HOOKS
+static gwp_asan::GuardedPoolAllocator GuardedAlloc;
+#endif // GWP_ASAN_HOOKS
+
+struct Allocator {
+ static const uptr MaxAllowedMallocSize =
+ FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
+
+ BackendT Backend;
+ QuarantineT Quarantine;
+
+ u32 QuarantineChunksUpToSize;
+
+ bool DeallocationTypeMismatch;
+ bool ZeroContents;
+ bool DeleteSizeMismatch;
+
+ bool CheckRssLimit;
+ uptr HardRssLimitMb;
+ uptr SoftRssLimitMb;
+ atomic_uint8_t RssLimitExceeded;
+ atomic_uint64_t RssLastCheckedAtNS;
+
+ explicit Allocator(LinkerInitialized)
+ : Quarantine(LINKER_INITIALIZED) {}
+
+ NOINLINE void performSanityChecks();
+
+ void init() {
+ SanitizerToolName = "Scudo";
+ PrimaryAllocatorName = "ScudoPrimary";
+ SecondaryAllocatorName = "ScudoSecondary";
+
+ initFlags();
+
+ performSanityChecks();
+
+ // Check if hardware CRC32 is supported in the binary and by the platform,
+ // if so, opt for the CRC32 hardware version of the checksum.
+ if (&computeHardwareCRC32 && hasHardwareCRC32())
+ atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
+
+ SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+ Backend.init(common_flags()->allocator_release_to_os_interval_ms);
+ HardRssLimitMb = common_flags()->hard_rss_limit_mb;
+ SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
+ Quarantine.Init(
+ static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
+ static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
+ QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
+ getFlags()->QuarantineChunksUpToSize;
+ DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
+ DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
+ ZeroContents = getFlags()->ZeroContents;
+
+ if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
+ /*blocking=*/false))) {
+ Cookie = static_cast<u32>((NanoTime() >> 12) ^
+ (reinterpret_cast<uptr>(this) >> 4));
+ }
+
+ CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
+ if (CheckRssLimit)
+ atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
+ }
+
+ // Helper function that checks for a valid Scudo chunk. nullptr isn't.
+ bool isValidPointer(const void *Ptr) {
+ initThreadMaybe();
+ if (UNLIKELY(!Ptr))
+ return false;
+ if (!Chunk::isAligned(Ptr))
+ return false;
+ return Chunk::isValid(Ptr);
+ }
+
+ NOINLINE bool isRssLimitExceeded();
+
+ // Allocates a chunk.
+ void *allocate(uptr Size, uptr Alignment, AllocType Type,
+ bool ForceZeroContents = false) {
+ initThreadMaybe();
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.shouldSample())) {
+ if (void *Ptr = GuardedAlloc.allocate(Size))
+ return Ptr;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ if (UNLIKELY(Alignment > MaxAlignment)) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
+ }
+ if (UNLIKELY(Alignment < MinAlignment))
+ Alignment = MinAlignment;
+
+ const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
+ Chunk::getHeaderSize();
+ const uptr AlignedSize = (Alignment > MinAlignment) ?
+ NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
+ if (UNLIKELY(Size >= MaxAllowedMallocSize) ||
+ UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
+ }
+
+ if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportRssLimitExceeded();
+ }
+
+ // Primary and Secondary backed allocations have a different treatment. We
+ // deal with alignment requirements of Primary serviced allocations here,
+ // but the Secondary will take care of its own alignment needs.
+ void *BackendPtr;
+ uptr BackendSize;
+ u8 ClassId;
+ if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
+ BackendSize = AlignedSize;
+ ClassId = SizeClassMap::ClassID(BackendSize);
+ bool UnlockRequired;
+ ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+ BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ BackendSize = NeededSize;
+ ClassId = 0;
+ BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
+ }
+ if (UNLIKELY(!BackendPtr)) {
+ SetAllocatorOutOfMemory();
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportOutOfMemory(Size);
+ }
+
+ // If requested, we will zero out the entire contents of the returned chunk.
+ if ((ForceZeroContents || ZeroContents) && ClassId)
+ memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
+
+ UnpackedHeader Header = {};
+ uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
+ if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
+ // Since the Secondary takes care of alignment, a non-aligned pointer
+ // means it is from the Primary. It is also the only case where the offset
+ // field of the header would be non-zero.
+ DCHECK(ClassId);
+ const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
+ Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
+ UserPtr = AlignedUserPtr;
+ }
+ DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
+ Header.State = ChunkAllocated;
+ Header.AllocType = Type;
+ if (ClassId) {
+ Header.ClassId = ClassId;
+ Header.SizeOrUnusedBytes = Size;
+ } else {
+ // The secondary fits the allocations to a page, so the amount of unused
+ // bytes is the difference between the end of the user allocation and the
+ // next page boundary.
+ const uptr PageSize = GetPageSizeCached();
+ const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
+ if (TrailingBytes)
+ Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
+ }
+ void *Ptr = reinterpret_cast<void *>(UserPtr);
+ Chunk::storeHeader(Ptr, &Header);
+ if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
+ __sanitizer_malloc_hook(Ptr, Size);
+ return Ptr;
+ }
+
+ // Place a chunk in the quarantine or directly deallocate it in the event of
+ // a zero-sized quarantine, or if the size of the chunk is greater than the
+ // quarantine chunk size threshold.
+ void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
+ uptr Size) {
+ const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
+ if (BypassQuarantine) {
+ UnpackedHeader NewHeader = *Header;
+ NewHeader.State = ChunkAvailable;
+ Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
+ void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
+ if (Header->ClassId) {
+ bool UnlockRequired;
+ ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+ getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
+ Header->ClassId);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ getBackend().deallocateSecondary(BackendPtr);
+ }
+ } else {
+ // If a small memory amount was allocated with a larger alignment, we want
+ // to take that into account. Otherwise the Quarantine would be filled
+ // with tiny chunks, taking a lot of VA memory. This is an approximation
+ // of the usable size, that allows us to not call
+ // GetActuallyAllocatedSize.
+ const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
+ UnpackedHeader NewHeader = *Header;
+ NewHeader.State = ChunkQuarantine;
+ Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
+ bool UnlockRequired;
+ ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+ Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
+ Ptr, EstimatedSize);
+ if (UnlockRequired)
+ TSD->unlock();
+ }
+ }
+
+ // Deallocates a Chunk, which means either adding it to the quarantine or
+ // directly returning it to the backend if criteria are met.
+ void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
+ AllocType Type) {
+ // For a deallocation, we only ensure minimal initialization, meaning thread
+ // local data will be left uninitialized for now (when using ELF TLS). The
+ // fallback cache will be used instead. This is a workaround for a situation
+ // where the only heap operation performed in a thread would be a free past
+ // the TLS destructors, ending up in initialized thread specific data never
+ // being destroyed properly. Any other heap operation will do a full init.
+ initThreadMaybe(/*MinimalInit=*/true);
+ if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook)
+ __sanitizer_free_hook(Ptr);
+ if (UNLIKELY(!Ptr))
+ return;
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
+ GuardedAlloc.deallocate(Ptr);
+ return;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ if (UNLIKELY(!Chunk::isAligned(Ptr)))
+ dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
+ UnpackedHeader Header;
+ Chunk::loadHeader(Ptr, &Header);
+ if (UNLIKELY(Header.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
+ if (DeallocationTypeMismatch) {
+ // The deallocation type has to match the allocation one.
+ if (Header.AllocType != Type) {
+ // With the exception of memalign'd Chunks, that can be still be free'd.
+ if (Header.AllocType != FromMemalign || Type != FromMalloc)
+ dieWithMessage("allocation type mismatch when deallocating address "
+ "%p\n", Ptr);
+ }
+ }
+ const uptr Size = Chunk::getSize(Ptr, &Header);
+ if (DeleteSizeMismatch) {
+ if (DeleteSize && DeleteSize != Size)
+ dieWithMessage("invalid sized delete when deallocating address %p\n",
+ Ptr);
+ }
+ (void)DeleteAlignment; // TODO(kostyak): verify that the alignment matches.
+ quarantineOrDeallocateChunk(Ptr, &Header, Size);
+ }
+
+ // Reallocates a chunk. We can save on a new allocation if the new requested
+ // size still fits in the chunk.
+ void *reallocate(void *OldPtr, uptr NewSize) {
+ initThreadMaybe();
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
+ size_t OldSize = GuardedAlloc.getSize(OldPtr);
+ void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
+ if (NewPtr)
+ memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
+ GuardedAlloc.deallocate(OldPtr);
+ return NewPtr;
+ }
+#endif // GWP_ASAN_HOOKS
+
+ if (UNLIKELY(!Chunk::isAligned(OldPtr)))
+ dieWithMessage("misaligned address when reallocating address %p\n",
+ OldPtr);
+ UnpackedHeader OldHeader;
+ Chunk::loadHeader(OldPtr, &OldHeader);
+ if (UNLIKELY(OldHeader.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when reallocating address %p\n",
+ OldPtr);
+ if (DeallocationTypeMismatch) {
+ if (UNLIKELY(OldHeader.AllocType != FromMalloc))
+ dieWithMessage("allocation type mismatch when reallocating address "
+ "%p\n", OldPtr);
+ }
+ const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
+ // The new size still fits in the current chunk, and the size difference
+ // is reasonable.
+ if (NewSize <= UsableSize &&
+ (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
+ UnpackedHeader NewHeader = OldHeader;
+ NewHeader.SizeOrUnusedBytes =
+ OldHeader.ClassId ? NewSize : UsableSize - NewSize;
+ Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
+ return OldPtr;
+ }
+ // Otherwise, we have to allocate a new chunk and copy the contents of the
+ // old one.
+ void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
+ if (NewPtr) {
+ const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
+ UsableSize - OldHeader.SizeOrUnusedBytes;
+ memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
+ quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+ }
+ return NewPtr;
+ }
+
+ // Helper function that returns the actual usable size of a chunk.
+ uptr getUsableSize(const void *Ptr) {
+ initThreadMaybe();
+ if (UNLIKELY(!Ptr))
+ return 0;
+
+#ifdef GWP_ASAN_HOOKS
+ if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
+ return GuardedAlloc.getSize(Ptr);
+#endif // GWP_ASAN_HOOKS
+
+ UnpackedHeader Header;
+ Chunk::loadHeader(Ptr, &Header);
+ // Getting the usable size of a chunk only makes sense if it's allocated.
+ if (UNLIKELY(Header.State != ChunkAllocated))
+ dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
+ return Chunk::getUsableSize(Ptr, &Header);
+ }
+
+ void *calloc(uptr NMemB, uptr Size) {
+ initThreadMaybe();
+ if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) {
+ if (AllocatorMayReturnNull())
+ return nullptr;
+ reportCallocOverflow(NMemB, Size);
+ }
+ return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
+ }
+
+ void commitBack(ScudoTSD *TSD) {
+ Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
+ Backend.destroyCache(&TSD->Cache);
+ }
+
+ uptr getStats(AllocatorStat StatType) {
+ initThreadMaybe();
+ uptr stats[AllocatorStatCount];
+ Backend.getStats(stats);
+ return stats[StatType];
+ }
+
+ bool canReturnNull() {
+ initThreadMaybe();
+ return AllocatorMayReturnNull();
+ }
+
+ void setRssLimit(uptr LimitMb, bool HardLimit) {
+ if (HardLimit)
+ HardRssLimitMb = LimitMb;
+ else
+ SoftRssLimitMb = LimitMb;
+ CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
+ }
+
+ void printStats() {
+ initThreadMaybe();
+ Backend.printStats();
+ }
+};
+
+NOINLINE void Allocator::performSanityChecks() {
+ // Verify that the header offset field can hold the maximum offset. In the
+ // case of the Secondary allocator, it takes care of alignment and the
+ // offset will always be 0. In the case of the Primary, the worst case
+ // scenario happens in the last size class, when the backend allocation
+ // would already be aligned on the requested alignment, which would happen
+ // to be the maximum alignment that would fit in that size class. As a
+ // result, the maximum offset will be at most the maximum alignment for the
+ // last size class minus the header size, in multiples of MinAlignment.
+ UnpackedHeader Header = {};
+ const uptr MaxPrimaryAlignment =
+ 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
+ const uptr MaxOffset =
+ (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+ Header.Offset = MaxOffset;
+ if (Header.Offset != MaxOffset)
+ dieWithMessage("maximum possible offset doesn't fit in header\n");
+ // Verify that we can fit the maximum size or amount of unused bytes in the
+ // header. Given that the Secondary fits the allocation to a page, the worst
+ // case scenario happens in the Primary. It will depend on the second to
+ // last and last class sizes, as well as the dynamic base for the Primary.
+ // The following is an over-approximation that works for our needs.
+ const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
+ Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
+ if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
+ dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
+
+ const uptr LargestClassId = SizeClassMap::kLargestClassID;
+ Header.ClassId = LargestClassId;
+ if (Header.ClassId != LargestClassId)
+ dieWithMessage("largest class ID doesn't fit in header\n");
+}
+
+// Opportunistic RSS limit check. This will update the RSS limit status, if
+// it can, every 250ms, otherwise it will just return the current one.
+NOINLINE bool Allocator::isRssLimitExceeded() {
+ u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
+ const u64 CurrentCheck = MonotonicNanoTime();
+ if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL)))
+ return atomic_load_relaxed(&RssLimitExceeded);
+ if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
+ CurrentCheck, memory_order_relaxed))
+ return atomic_load_relaxed(&RssLimitExceeded);
+ // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
+ // RSS from /proc/self/statm by default. We might want to
+ // call getrusage directly, even if it's less accurate.
+ const uptr CurrentRssMb = GetRSS() >> 20;
+ if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
+ dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
+ HardRssLimitMb, CurrentRssMb);
+ if (SoftRssLimitMb) {
+ if (atomic_load_relaxed(&RssLimitExceeded)) {
+ if (CurrentRssMb <= SoftRssLimitMb)
+ atomic_store_relaxed(&RssLimitExceeded, false);
+ } else {
+ if (CurrentRssMb > SoftRssLimitMb) {
+ atomic_store_relaxed(&RssLimitExceeded, true);
+ Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
+ SoftRssLimitMb, CurrentRssMb);
+ }
+ }
+ }
+ return atomic_load_relaxed(&RssLimitExceeded);
+}
+
+static Allocator Instance(LINKER_INITIALIZED);
+
+static BackendT &getBackend() {
+ return Instance.Backend;
+}
+
+void initScudo() {
+ Instance.init();
+#ifdef GWP_ASAN_HOOKS
+ gwp_asan::options::initOptions();
+ gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
+ Opts.Backtrace = gwp_asan::options::getBacktraceFunction();
+ Opts.PrintBacktrace = gwp_asan::options::getPrintBacktraceFunction();
+ GuardedAlloc.init(Opts);
+#endif // GWP_ASAN_HOOKS
+}
+
+void ScudoTSD::init() {
+ getBackend().initCache(&Cache);
+ memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
+}
+
+void ScudoTSD::commitBack() {
+ Instance.commitBack(this);
+}
+
+void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
+ if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) {
+ errno = EINVAL;
+ if (Instance.canReturnNull())
+ return nullptr;
+ reportAllocationAlignmentNotPowerOfTwo(Alignment);
+ }
+ return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
+}
+
+void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
+ Instance.deallocate(Ptr, Size, Alignment, Type);
+}
+
+void *scudoRealloc(void *Ptr, uptr Size) {
+ if (!Ptr)
+ return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
+ if (Size == 0) {
+ Instance.deallocate(Ptr, 0, 0, FromMalloc);
+ return nullptr;
+ }
+ return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
+}
+
+void *scudoCalloc(uptr NMemB, uptr Size) {
+ return SetErrnoOnNull(Instance.calloc(NMemB, Size));
+}
+
+void *scudoValloc(uptr Size) {
+ return SetErrnoOnNull(
+ Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
+}
+
+void *scudoPvalloc(uptr Size) {
+ const uptr PageSize = GetPageSizeCached();
+ if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
+ errno = ENOMEM;
+ if (Instance.canReturnNull())
+ return nullptr;
+ reportPvallocOverflow(Size);
+ }
+ // pvalloc(0) should allocate one page.
+ Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
+ return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
+}
+
+int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
+ if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
+ if (!Instance.canReturnNull())
+ reportInvalidPosixMemalignAlignment(Alignment);
+ return EINVAL;
+ }
+ void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
+ if (UNLIKELY(!Ptr))
+ return ENOMEM;
+ *MemPtr = Ptr;
+ return 0;
+}
+
+void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
+ if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
+ errno = EINVAL;
+ if (Instance.canReturnNull())
+ return nullptr;
+ reportInvalidAlignedAllocAlignment(Size, Alignment);
+ }
+ return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
+}
+
+uptr scudoMallocUsableSize(void *Ptr) {
+ return Instance.getUsableSize(Ptr);
+}
+
+} // namespace __scudo
+
+using namespace __scudo;
+
+// MallocExtension helper functions
+
+uptr __sanitizer_get_current_allocated_bytes() {
+ return Instance.getStats(AllocatorStatAllocated);
+}
+
+uptr __sanitizer_get_heap_size() {
+ return Instance.getStats(AllocatorStatMapped);
+}
+
+uptr __sanitizer_get_free_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_unmapped_bytes() {
+ return 1;
+}
+
+uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
+ return Size;
+}
+
+int __sanitizer_get_ownership(const void *Ptr) {
+ return Instance.isValidPointer(Ptr);
+}
+
+uptr __sanitizer_get_allocated_size(const void *Ptr) {
+ return Instance.getUsableSize(Ptr);
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
+ void *Ptr, uptr Size) {
+ (void)Ptr;
+ (void)Size;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) {
+ (void)Ptr;
+}
+#endif
+
+// Interface functions
+
+void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
+ if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
+ return;
+ Instance.setRssLimit(LimitMb, !!HardLimit);
+}
+
+void __scudo_print_stats() {
+ Instance.printStats();
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator.h (revision 351984)
@@ -0,0 +1,125 @@
+//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_allocator.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_H_
+#define SCUDO_ALLOCATOR_H_
+
+#include "scudo_platform.h"
+
+namespace __scudo {
+
+enum AllocType : u8 {
+ FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc.
+ FromNew = 1, // Memory block came from operator new.
+ FromNewArray = 2, // Memory block came from operator new [].
+ FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc.
+};
+
+enum ChunkState : u8 {
+ ChunkAvailable = 0,
+ ChunkAllocated = 1,
+ ChunkQuarantine = 2
+};
+
+// Our header requires 64 bits of storage. Having the offset saves us from
+// using functions such as GetBlockBegin, that is fairly costly. Our first
+// implementation used the MetaData as well, which offers the advantage of
+// being stored away from the chunk itself, but accessing it was costly as
+// well. The header will be atomically loaded and stored.
+typedef u64 PackedHeader;
+struct UnpackedHeader {
+ u64 Checksum : 16;
+ u64 ClassId : 8;
+ u64 SizeOrUnusedBytes : 20; // Size for Primary backed allocations, amount of
+ // unused bytes in the chunk for Secondary ones.
+ u64 State : 2; // available, allocated, or quarantined
+ u64 AllocType : 2; // malloc, new, new[], or memalign
+ u64 Offset : 16; // Offset from the beginning of the backend
+ // allocation to the beginning of the chunk
+ // itself, in multiples of MinAlignment. See
+ // comment about its maximum value and in init().
+};
+
+typedef atomic_uint64_t AtomicPackedHeader;
+COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+
+// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
+const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
+const uptr MaxAlignmentLog = 24; // 16 MB
+const uptr MinAlignment = 1 << MinAlignmentLog;
+const uptr MaxAlignment = 1 << MaxAlignmentLog;
+
+// constexpr version of __sanitizer::RoundUp without the extraneous CHECK.
+// This way we can use it in constexpr variables and functions declarations.
+constexpr uptr RoundUpTo(uptr Size, uptr Boundary) {
+ return (Size + Boundary - 1) & ~(Boundary - 1);
+}
+
+namespace Chunk {
+ constexpr uptr getHeaderSize() {
+ return RoundUpTo(sizeof(PackedHeader), MinAlignment);
+ }
+}
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+const uptr AllocatorSpace = ~0ULL;
+struct AP64 {
+ static const uptr kSpaceBeg = AllocatorSpace;
+ static const uptr kSpaceSize = AllocatorSize;
+ static const uptr kMetadataSize = 0;
+ typedef __scudo::SizeClassMap SizeClassMap;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags =
+ SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+ using AddressSpaceView = LocalAddressSpaceView;
+};
+typedef SizeClassAllocator64<AP64> PrimaryT;
+#else
+struct AP32 {
+ static const uptr kSpaceBeg = 0;
+ static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+ static const uptr kMetadataSize = 0;
+ typedef __scudo::SizeClassMap SizeClassMap;
+ static const uptr kRegionSizeLog = RegionSizeLog;
+ using AddressSpaceView = LocalAddressSpaceView;
+ typedef NoOpMapUnmapCallback MapUnmapCallback;
+ static const uptr kFlags =
+ SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
+ SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
+};
+typedef SizeClassAllocator32<AP32> PrimaryT;
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
+#include "scudo_allocator_secondary.h"
+
+typedef LargeMmapAllocator SecondaryT;
+
+#include "scudo_allocator_combined.h"
+
+typedef CombinedAllocator BackendT;
+typedef CombinedAllocator::AllocatorCache AllocatorCacheT;
+
+void initScudo();
+
+void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type);
+void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type);
+void *scudoRealloc(void *Ptr, uptr Size);
+void *scudoCalloc(uptr NMemB, uptr Size);
+void *scudoValloc(uptr Size);
+void *scudoPvalloc(uptr Size);
+int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
+void *scudoAlignedAlloc(uptr Alignment, uptr Size);
+uptr scudoMallocUsableSize(void *Ptr);
+
+} // namespace __scudo
+
+#endif // SCUDO_ALLOCATOR_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator_combined.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator_combined.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator_combined.h (revision 351984)
@@ -0,0 +1,75 @@
+//===-- scudo_allocator_combined.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Combined Allocator, dispatches allocation & deallocation requests to
+/// the Primary or the Secondary backend allocators.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_COMBINED_H_
+#define SCUDO_ALLOCATOR_COMBINED_H_
+
+#ifndef SCUDO_ALLOCATOR_H_
+# error "This file must be included inside scudo_allocator.h."
+#endif
+
+class CombinedAllocator {
+ public:
+ using PrimaryAllocator = PrimaryT;
+ using SecondaryAllocator = SecondaryT;
+ using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
+ void init(s32 ReleaseToOSIntervalMs) {
+ Primary.Init(ReleaseToOSIntervalMs);
+ Secondary.Init();
+ Stats.Init();
+ }
+
+ // Primary allocations are always MinAlignment aligned, and as such do not
+ // require an Alignment parameter.
+ void *allocatePrimary(AllocatorCache *Cache, uptr ClassId) {
+ return Cache->Allocate(&Primary, ClassId);
+ }
+
+ // Secondary allocations do not require a Cache, but do require an Alignment
+ // parameter.
+ void *allocateSecondary(uptr Size, uptr Alignment) {
+ return Secondary.Allocate(&Stats, Size, Alignment);
+ }
+
+ void deallocatePrimary(AllocatorCache *Cache, void *Ptr, uptr ClassId) {
+ Cache->Deallocate(&Primary, ClassId, Ptr);
+ }
+
+ void deallocateSecondary(void *Ptr) {
+ Secondary.Deallocate(&Stats, Ptr);
+ }
+
+ void initCache(AllocatorCache *Cache) {
+ Cache->Init(&Stats);
+ }
+
+ void destroyCache(AllocatorCache *Cache) {
+ Cache->Destroy(&Primary, &Stats);
+ }
+
+ void getStats(AllocatorStatCounters StatType) const {
+ Stats.Get(StatType);
+ }
+
+ void printStats() {
+ Primary.PrintStats();
+ Secondary.PrintStats();
+ }
+
+ private:
+ PrimaryAllocator Primary;
+ SecondaryAllocator Secondary;
+ AllocatorGlobalStats Stats;
+};
+
+#endif // SCUDO_ALLOCATOR_COMBINED_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator_combined.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator_secondary.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator_secondary.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator_secondary.h (revision 351984)
@@ -0,0 +1,192 @@
+//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Secondary Allocator.
+/// This services allocation that are too large to be serviced by the Primary
+/// Allocator. It is directly backed by the memory mapping functions of the
+/// operating system.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
+#define SCUDO_ALLOCATOR_SECONDARY_H_
+
+#ifndef SCUDO_ALLOCATOR_H_
+# error "This file must be included inside scudo_allocator.h."
+#endif
+
+// Secondary backed allocations are standalone chunks that contain extra
+// information stored in a LargeChunk::Header prior to the frontend's header.
+//
+// The secondary takes care of alignment requirements (so that it can release
+// unnecessary pages in the rare event of larger alignments), and as such must
+// know about the frontend's header size.
+//
+// Since Windows doesn't support partial releasing of a reserved memory region,
+// we have to keep track of both the reserved and the committed memory.
+//
+// The resulting chunk resembles the following:
+//
+// +--------------------+
+// | Guard page(s) |
+// +--------------------+
+// | Unused space* |
+// +--------------------+
+// | LargeChunk::Header |
+// +--------------------+
+// | {Unp,P}ackedHeader |
+// +--------------------+
+// | Data (aligned) |
+// +--------------------+
+// | Unused space** |
+// +--------------------+
+// | Guard page(s) |
+// +--------------------+
+
+namespace LargeChunk {
+ struct Header {
+ ReservedAddressRange StoredRange;
+ uptr CommittedSize;
+ uptr Size;
+ };
+ constexpr uptr getHeaderSize() {
+ return RoundUpTo(sizeof(Header), MinAlignment);
+ }
+ static Header *getHeader(uptr Ptr) {
+ return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+ }
+ static Header *getHeader(const void *Ptr) {
+ return getHeader(reinterpret_cast<uptr>(Ptr));
+ }
+} // namespace LargeChunk
+
+class LargeMmapAllocator {
+ public:
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+
+ void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
+ const uptr UserSize = Size - Chunk::getHeaderSize();
+ // The Scudo frontend prevents us from allocating more than
+ // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
+ uptr ReservedSize = Size + LargeChunk::getHeaderSize();
+ if (UNLIKELY(Alignment > MinAlignment))
+ ReservedSize += Alignment;
+ const uptr PageSize = GetPageSizeCached();
+ ReservedSize = RoundUpTo(ReservedSize, PageSize);
+ // Account for 2 guard pages, one before and one after the chunk.
+ ReservedSize += 2 * PageSize;
+
+ ReservedAddressRange AddressRange;
+ uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
+ if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
+ return nullptr;
+ // A page-aligned pointer is assumed after that, so check it now.
+ DCHECK(IsAligned(ReservedBeg, PageSize));
+ uptr ReservedEnd = ReservedBeg + ReservedSize;
+ // The beginning of the user area for that allocation comes after the
+ // initial guard page, and both headers. This is the pointer that has to
+ // abide by alignment requirements.
+ uptr CommittedBeg = ReservedBeg + PageSize;
+ uptr UserBeg = CommittedBeg + HeadersSize;
+ uptr UserEnd = UserBeg + UserSize;
+ uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
+
+ // In the rare event of larger alignments, we will attempt to fit the mmap
+ // area better and unmap extraneous memory. This will also ensure that the
+ // offset and unused bytes field of the header stay small.
+ if (UNLIKELY(Alignment > MinAlignment)) {
+ if (!IsAligned(UserBeg, Alignment)) {
+ UserBeg = RoundUpTo(UserBeg, Alignment);
+ CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
+ const uptr NewReservedBeg = CommittedBeg - PageSize;
+ DCHECK_GE(NewReservedBeg, ReservedBeg);
+ if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
+ AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
+ ReservedBeg = NewReservedBeg;
+ }
+ UserEnd = UserBeg + UserSize;
+ CommittedEnd = RoundUpTo(UserEnd, PageSize);
+ }
+ const uptr NewReservedEnd = CommittedEnd + PageSize;
+ DCHECK_LE(NewReservedEnd, ReservedEnd);
+ if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
+ AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
+ ReservedEnd = NewReservedEnd;
+ }
+ }
+
+ DCHECK_LE(UserEnd, CommittedEnd);
+ const uptr CommittedSize = CommittedEnd - CommittedBeg;
+ // Actually mmap the memory, preserving the guard pages on either sides.
+ CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
+ const uptr Ptr = UserBeg - Chunk::getHeaderSize();
+ LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
+ H->StoredRange = AddressRange;
+ H->Size = CommittedEnd - Ptr;
+ H->CommittedSize = CommittedSize;
+
+ // The primary adds the whole class size to the stats when allocating a
+ // chunk, so we will do something similar here. But we will not account for
+ // the guard pages.
+ {
+ SpinMutexLock l(&StatsMutex);
+ Stats->Add(AllocatorStatAllocated, CommittedSize);
+ Stats->Add(AllocatorStatMapped, CommittedSize);
+ AllocatedBytes += CommittedSize;
+ if (LargestSize < CommittedSize)
+ LargestSize = CommittedSize;
+ NumberOfAllocs++;
+ }
+
+ return reinterpret_cast<void *>(Ptr);
+ }
+
+ void Deallocate(AllocatorStats *Stats, void *Ptr) {
+ LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
+ // Since we're unmapping the entirety of where the ReservedAddressRange
+ // actually is, copy onto the stack.
+ ReservedAddressRange AddressRange = H->StoredRange;
+ const uptr Size = H->CommittedSize;
+ {
+ SpinMutexLock l(&StatsMutex);
+ Stats->Sub(AllocatorStatAllocated, Size);
+ Stats->Sub(AllocatorStatMapped, Size);
+ FreedBytes += Size;
+ NumberOfFrees++;
+ }
+ AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
+ AddressRange.size());
+ }
+
+ static uptr GetActuallyAllocatedSize(void *Ptr) {
+ return LargeChunk::getHeader(Ptr)->Size;
+ }
+
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
+ "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
+ FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
+ (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
+ }
+
+ private:
+ static constexpr uptr HeadersSize =
+ LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
+
+ StaticSpinMutex StatsMutex;
+ u32 NumberOfAllocs;
+ u32 NumberOfFrees;
+ uptr AllocatedBytes;
+ uptr FreedBytes;
+ uptr LargestSize;
+};
+
+#endif // SCUDO_ALLOCATOR_SECONDARY_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_allocator_secondary.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_crc32.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_crc32.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_crc32.cpp (revision 351984)
@@ -0,0 +1,24 @@
+//===-- scudo_crc32.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// CRC32 function leveraging hardware specific instructions. This has to be
+/// kept separated to restrict the use of compiler specific flags to this file.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_crc32.h"
+
+namespace __scudo {
+
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+ return CRC32_INTRINSIC(Crc, Data);
+}
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+} // namespace __scudo
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_crc32.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_crc32.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_crc32.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_crc32.h (revision 351984)
@@ -0,0 +1,100 @@
+//===-- scudo_crc32.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo chunk header checksum related definitions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CRC32_H_
+#define SCUDO_CRC32_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+// Hardware CRC32 is supported at compilation via the following:
+// - for i386 & x86_64: -msse4.2
+// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
+// An additional check must be performed at runtime as well to make sure the
+// emitted instructions are valid on the target host.
+
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+# ifdef __SSE4_2__
+# include <smmintrin.h>
+# define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
+# endif
+# ifdef __ARM_FEATURE_CRC32
+# include <arm_acle.h>
+# define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
+# endif
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+namespace __scudo {
+
+enum : u8 {
+ CRC32Software = 0,
+ CRC32Hardware = 1,
+};
+
+static const u32 CRC32Table[] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
+ 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
+ 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
+ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
+ 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
+ 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
+ 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
+ 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
+ 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
+ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
+ 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
+ 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
+ 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
+ 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
+ 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
+ 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
+ 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
+ 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+INLINE u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
+ for (uptr i = 0; i < sizeof(Data); i++) {
+ Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8);
+ Data >>= 8;
+ }
+ return Crc;
+}
+
+SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data);
+
+} // namespace __scudo
+
+#endif // SCUDO_CRC32_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_crc32.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_errors.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_errors.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_errors.cpp (revision 351984)
@@ -0,0 +1,76 @@
+//===-- scudo_errors.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Verbose termination functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __scudo {
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
+ dieWithMessage("calloc parameters overflow: count * size (%zd * %zd) cannot "
+ "be represented with type size_t\n", Count, Size);
+}
+
+void NORETURN reportPvallocOverflow(uptr Size) {
+ dieWithMessage("pvalloc parameters overflow: size 0x%zx rounded up to system "
+ "page size 0x%zx cannot be represented in type size_t\n", Size,
+ GetPageSizeCached());
+}
+
+void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
+ uptr MaxAlignment) {
+ dieWithMessage("invalid allocation alignment: %zd exceeds maximum supported "
+ "allocation of %zd\n", Alignment, MaxAlignment);
+}
+
+void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment) {
+ dieWithMessage("invalid allocation alignment: %zd, alignment must be a power "
+ "of two\n", Alignment);
+}
+
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
+ dieWithMessage("invalid alignment requested in posix_memalign: %zd, alignment"
+ " must be a power of two and a multiple of sizeof(void *) == %zd\n",
+ Alignment, sizeof(void *)); // NOLINT
+}
+
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment) {
+#if SANITIZER_POSIX
+ dieWithMessage("invalid alignment requested in aligned_alloc: %zd, alignment "
+ "must be a power of two and the requested size 0x%zx must be a multiple "
+ "of alignment\n", Alignment, Size);
+#else
+ dieWithMessage("invalid alignment requested in aligned_alloc: %zd, the "
+ "requested size 0x%zx must be a multiple of alignment\n", Alignment,
+ Size);
+#endif
+}
+
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize) {
+ dieWithMessage("requested allocation size 0x%zx (0x%zx after adjustments) "
+ "exceeds maximum supported size of 0x%zx\n", UserSize, TotalSize,
+ MaxSize);
+}
+
+void NORETURN reportRssLimitExceeded() {
+ dieWithMessage("specified RSS limit exceeded, currently set to "
+ "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb);
+}
+
+void NORETURN reportOutOfMemory(uptr RequestedSize) {
+ dieWithMessage("allocator is out of memory trying to allocate 0x%zx bytes\n",
+ RequestedSize);
+}
+
+} // namespace __scudo
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_errors.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_errors.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_errors.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_errors.h (revision 351984)
@@ -0,0 +1,34 @@
+//===-- scudo_errors.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_errors.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ERRORS_H_
+#define SCUDO_ERRORS_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __scudo {
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size);
+void NORETURN reportPvallocOverflow(uptr Size);
+void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
+ uptr MaxAlignment);
+void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment);
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize);
+void NORETURN reportRssLimitExceeded();
+void NORETURN reportOutOfMemory(uptr RequestedSize);
+
+} // namespace __scudo
+
+#endif // SCUDO_ERRORS_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_errors.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.cpp (revision 351984)
@@ -0,0 +1,136 @@
+//===-- scudo_flags.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Hardened Allocator flag parsing logic.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_flags.h"
+#include "scudo_interface_internal.h"
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __scudo {
+
+static Flags ScudoFlags; // Use via getFlags().
+
+void Flags::setDefaults() {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+}
+
+static void RegisterScudoFlags(FlagParser *parser, Flags *f) {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+}
+
+static const char *getCompileDefinitionScudoDefaultOptions() {
+#ifdef SCUDO_DEFAULT_OPTIONS
+ return SANITIZER_STRINGIFY(SCUDO_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+static const char *getScudoDefaultOptions() {
+ return (&__scudo_default_options) ? __scudo_default_options() : "";
+}
+
+void initFlags() {
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.exitcode = 1;
+ OverrideCommonFlags(cf);
+ }
+ Flags *f = getFlags();
+ f->setDefaults();
+
+ FlagParser ScudoParser;
+ RegisterScudoFlags(&ScudoParser, f);
+ RegisterCommonFlags(&ScudoParser);
+
+ // Override from compile definition.
+ ScudoParser.ParseString(getCompileDefinitionScudoDefaultOptions());
+
+ // Override from user-specified string.
+ ScudoParser.ParseString(getScudoDefaultOptions());
+
+ // Override from environment.
+ ScudoParser.ParseStringFromEnv("SCUDO_OPTIONS");
+
+ InitializeCommonFlags();
+
+ // Sanity checks and default settings for the Quarantine parameters.
+
+ if (f->QuarantineSizeMb >= 0) {
+ // Backward compatible logic if QuarantineSizeMb is set.
+ if (f->QuarantineSizeKb >= 0) {
+ dieWithMessage("ERROR: please use either QuarantineSizeMb (deprecated) "
+ "or QuarantineSizeKb, but not both\n");
+ }
+ if (f->QuarantineChunksUpToSize >= 0) {
+ dieWithMessage("ERROR: QuarantineChunksUpToSize cannot be used in "
+ " conjunction with the deprecated QuarantineSizeMb option\n");
+ }
+ // If everything is in order, update QuarantineSizeKb accordingly.
+ f->QuarantineSizeKb = f->QuarantineSizeMb * 1024;
+ } else {
+ // Otherwise proceed with the new options.
+ if (f->QuarantineSizeKb < 0) {
+ const int DefaultQuarantineSizeKb = FIRST_32_SECOND_64(64, 256);
+ f->QuarantineSizeKb = DefaultQuarantineSizeKb;
+ }
+ if (f->QuarantineChunksUpToSize < 0) {
+ const int DefaultQuarantineChunksUpToSize = FIRST_32_SECOND_64(512, 2048);
+ f->QuarantineChunksUpToSize = DefaultQuarantineChunksUpToSize;
+ }
+ }
+
+ // We enforce an upper limit for the chunk quarantine threshold of 4Mb.
+ if (f->QuarantineChunksUpToSize > (4 * 1024 * 1024)) {
+ dieWithMessage("ERROR: the chunk quarantine threshold is too large\n");
+ }
+
+ // We enforce an upper limit for the quarantine size of 32Mb.
+ if (f->QuarantineSizeKb > (32 * 1024)) {
+ dieWithMessage("ERROR: the quarantine size is too large\n");
+ }
+
+ if (f->ThreadLocalQuarantineSizeKb < 0) {
+ const int DefaultThreadLocalQuarantineSizeKb = FIRST_32_SECOND_64(16, 64);
+ f->ThreadLocalQuarantineSizeKb = DefaultThreadLocalQuarantineSizeKb;
+ }
+ // And an upper limit of 8Mb for the thread quarantine cache.
+ if (f->ThreadLocalQuarantineSizeKb > (8 * 1024)) {
+ dieWithMessage("ERROR: the per thread quarantine cache size is too "
+ "large\n");
+ }
+ if (f->ThreadLocalQuarantineSizeKb == 0 && f->QuarantineSizeKb > 0) {
+ dieWithMessage("ERROR: ThreadLocalQuarantineSizeKb can be set to 0 only "
+ "when QuarantineSizeKb is set to 0\n");
+ }
+}
+
+Flags *getFlags() {
+ return &ScudoFlags;
+}
+
+} // namespace __scudo
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __scudo_default_options, void) {
+ return "";
+}
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.h (revision 351984)
@@ -0,0 +1,32 @@
+//===-- scudo_flags.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_flags.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_H_
+#define SCUDO_FLAGS_H_
+
+namespace __scudo {
+
+struct Flags {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+
+ void setDefaults();
+};
+
+Flags *getFlags();
+
+void initFlags();
+
+} // namespace __scudo
+
+#endif // SCUDO_FLAGS_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.inc (revision 351984)
@@ -0,0 +1,48 @@
+//===-- scudo_flags.inc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Hardened Allocator runtime flags.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAG
+# error "Define SCUDO_FLAG prior to including this file!"
+#endif
+
+SCUDO_FLAG(int, QuarantineSizeMb, -1,
+ "Deprecated. Please use QuarantineSizeKb.")
+
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, QuarantineSizeKb, -1,
+ "Size in KB of quarantine used to delay the actual deallocation of "
+ "chunks. Lower value may reduce memory usage but decrease the "
+ "effectiveness of the mitigation. Defaults to 64KB (32-bit) or "
+ "256KB (64-bit)")
+
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, ThreadLocalQuarantineSizeKb, -1,
+ "Size in KB of per-thread cache used to offload the global "
+ "quarantine. Lower value may reduce memory usage but might increase "
+ "the contention on the global quarantine. Defaults to 16KB (32-bit) "
+ "or 64KB (64-bit)")
+
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, QuarantineChunksUpToSize, -1,
+ "Size in bytes up to which chunks will be quarantined (if lower than"
+ "or equal to). Defaults to 256 (32-bit) or 2048 (64-bit)")
+
+// Disable the deallocation type check by default on Android, it causes too many
+// issues with third party libraries.
+SCUDO_FLAG(bool, DeallocationTypeMismatch, !SANITIZER_ANDROID,
+ "Report errors on malloc/delete, new/free, new/delete[], etc.")
+
+SCUDO_FLAG(bool, DeleteSizeMismatch, true,
+ "Report errors on mismatch between size of new and delete.")
+
+SCUDO_FLAG(bool, ZeroContents, false,
+ "Zero chunk contents on allocation and deallocation.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_interface_internal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_interface_internal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_interface_internal.h (revision 351984)
@@ -0,0 +1,32 @@
+//===-- scudo_interface_internal.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Private Scudo interface header.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERFACE_INTERNAL_H_
+#define SCUDO_INTERFACE_INTERNAL_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+using __sanitizer::uptr;
+using __sanitizer::s32;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __scudo_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __scudo_print_stats();
+} // extern "C"
+
+#endif // SCUDO_INTERFACE_INTERNAL_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_interface_internal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_malloc.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_malloc.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_malloc.cpp (revision 351984)
@@ -0,0 +1,84 @@
+//===-- scudo_malloc.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Interceptors for malloc related functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+#include <stddef.h>
+
+using namespace __scudo;
+
+extern "C" {
+INTERCEPTOR_ATTRIBUTE void free(void *ptr) {
+ scudoDeallocate(ptr, 0, 0, FromMalloc);
+}
+
+INTERCEPTOR_ATTRIBUTE void *malloc(size_t size) {
+ return scudoAllocate(size, 0, FromMalloc);
+}
+
+INTERCEPTOR_ATTRIBUTE void *realloc(void *ptr, size_t size) {
+ return scudoRealloc(ptr, size);
+}
+
+INTERCEPTOR_ATTRIBUTE void *calloc(size_t nmemb, size_t size) {
+ return scudoCalloc(nmemb, size);
+}
+
+INTERCEPTOR_ATTRIBUTE void *valloc(size_t size) {
+ return scudoValloc(size);
+}
+
+INTERCEPTOR_ATTRIBUTE
+int posix_memalign(void **memptr, size_t alignment, size_t size) {
+ return scudoPosixMemalign(memptr, alignment, size);
+}
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR_ATTRIBUTE void cfree(void *ptr) ALIAS("free");
+#endif
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR_ATTRIBUTE void *memalign(size_t alignment, size_t size) {
+ return scudoAllocate(size, alignment, FromMemalign);
+}
+
+INTERCEPTOR_ATTRIBUTE
+void *__libc_memalign(size_t alignment, size_t size) ALIAS("memalign");
+#endif
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR_ATTRIBUTE void *pvalloc(size_t size) {
+ return scudoPvalloc(size);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR_ATTRIBUTE void *aligned_alloc(size_t alignment, size_t size) {
+ return scudoAlignedAlloc(alignment, size);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+INTERCEPTOR_ATTRIBUTE size_t malloc_usable_size(void *ptr) {
+ return scudoMallocUsableSize(ptr);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+INTERCEPTOR_ATTRIBUTE int mallopt(int cmd, int value) {
+ return 0;
+}
+#endif
+} // extern "C"
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_malloc.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_new_delete.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_new_delete.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_new_delete.cpp (revision 351984)
@@ -0,0 +1,107 @@
+//===-- scudo_new_delete.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Interceptors for operators new and delete.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+#include "scudo_errors.h"
+
+#include "interception/interception.h"
+
+#include <stddef.h>
+
+using namespace __scudo;
+
+#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+
+// Fake std::nothrow_t to avoid including <new>.
+namespace std {
+struct nothrow_t {};
+enum class align_val_t: size_t {};
+} // namespace std
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY_ALIGN(Type, Align, NoThrow) \
+ void *Ptr = scudoAllocate(size, static_cast<uptr>(Align), Type); \
+ if (!NoThrow && UNLIKELY(!Ptr)) reportOutOfMemory(size); \
+ return Ptr;
+#define OPERATOR_NEW_BODY(Type, NoThrow) \
+ OPERATOR_NEW_BODY_ALIGN(Type, 0, NoThrow)
+
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size)
+{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size)
+{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/true); }
+
+#define OPERATOR_DELETE_BODY(Type) \
+ scudoDeallocate(ptr, 0, 0, Type);
+#define OPERATOR_DELETE_BODY_SIZE(Type) \
+ scudoDeallocate(ptr, size, 0, Type);
+#define OPERATOR_DELETE_BODY_ALIGN(Type) \
+ scudoDeallocate(ptr, 0, static_cast<uptr>(align), Type);
+#define OPERATOR_DELETE_BODY_SIZE_ALIGN(Type) \
+ scudoDeallocate(ptr, size, static_cast<uptr>(align), Type);
+
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNewArray); }
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_new_delete.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_platform.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_platform.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_platform.h (revision 351984)
@@ -0,0 +1,93 @@
+//===-- scudo_platform.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo platform specific definitions.
+/// TODO(kostyak): add tests for the compile time defines.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PLATFORM_H_
+#define SCUDO_PLATFORM_H_
+
+#include "sanitizer_common/sanitizer_allocator.h"
+
+#if !SANITIZER_LINUX && !SANITIZER_FUCHSIA
+# error "The Scudo hardened allocator is not supported on this platform."
+#endif
+
+#define SCUDO_TSD_EXCLUSIVE_SUPPORTED (!SANITIZER_ANDROID && !SANITIZER_FUCHSIA)
+
+#ifndef SCUDO_TSD_EXCLUSIVE
+// SCUDO_TSD_EXCLUSIVE wasn't defined, use a default TSD model for the platform.
+# if SANITIZER_ANDROID || SANITIZER_FUCHSIA
+// Android and Fuchsia use a pool of TSDs shared between threads.
+# define SCUDO_TSD_EXCLUSIVE 0
+# elif SANITIZER_LINUX && !SANITIZER_ANDROID
+// Non-Android Linux use an exclusive TSD per thread.
+# define SCUDO_TSD_EXCLUSIVE 1
+# else
+# error "No default TSD model defined for this platform."
+# endif // SANITIZER_ANDROID || SANITIZER_FUCHSIA
+#endif // SCUDO_TSD_EXCLUSIVE
+
+// If the exclusive TSD model is chosen, make sure the platform supports it.
+#if SCUDO_TSD_EXCLUSIVE && !SCUDO_TSD_EXCLUSIVE_SUPPORTED
+# error "The exclusive TSD model is not supported on this platform."
+#endif
+
+// Maximum number of TSDs that can be created for the Shared model.
+#ifndef SCUDO_SHARED_TSD_POOL_SIZE
+# if SANITIZER_ANDROID
+# define SCUDO_SHARED_TSD_POOL_SIZE 2U
+# else
+# define SCUDO_SHARED_TSD_POOL_SIZE 32U
+# endif // SANITIZER_ANDROID
+#endif // SCUDO_SHARED_TSD_POOL_SIZE
+
+// The following allows the public interface functions to be disabled.
+#ifndef SCUDO_CAN_USE_PUBLIC_INTERFACE
+# define SCUDO_CAN_USE_PUBLIC_INTERFACE 1
+#endif
+
+// Hooks in the allocation & deallocation paths can become a security concern if
+// implemented improperly, or if overwritten by an attacker. Use with caution.
+#ifndef SCUDO_CAN_USE_HOOKS
+# if SANITIZER_FUCHSIA
+# define SCUDO_CAN_USE_HOOKS 1
+# else
+# define SCUDO_CAN_USE_HOOKS 0
+# endif // SANITIZER_FUCHSIA
+#endif // SCUDO_CAN_USE_HOOKS
+
+namespace __scudo {
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+# if defined(__aarch64__) && SANITIZER_ANDROID
+const uptr AllocatorSize = 0x4000000000ULL; // 256G.
+# elif defined(__aarch64__)
+const uptr AllocatorSize = 0x10000000000ULL; // 1T.
+# else
+const uptr AllocatorSize = 0x40000000000ULL; // 4T.
+# endif
+#else
+const uptr RegionSizeLog = SANITIZER_ANDROID ? 19 : 20;
+#endif // SANITIZER_CAN_USE_ALLOCATOR64
+
+#if !defined(SCUDO_SIZE_CLASS_MAP)
+# define SCUDO_SIZE_CLASS_MAP Dense
+#endif
+
+#define SIZE_CLASS_MAP_TYPE SIZE_CLASS_MAP_TYPE_(SCUDO_SIZE_CLASS_MAP)
+#define SIZE_CLASS_MAP_TYPE_(T) SIZE_CLASS_MAP_TYPE__(T)
+#define SIZE_CLASS_MAP_TYPE__(T) T##SizeClassMap
+
+typedef SIZE_CLASS_MAP_TYPE SizeClassMap;
+
+} // namespace __scudo
+
+#endif // SCUDO_PLATFORM_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_platform.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_termination.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_termination.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_termination.cpp (revision 351984)
@@ -0,0 +1,41 @@
+//===-- scudo_termination.cpp -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains bare-bones termination functions to replace the
+/// __sanitizer ones, in order to avoid any potential abuse of the callbacks
+/// functionality.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __sanitizer {
+
+bool AddDieCallback(DieCallbackType Callback) { return true; }
+
+bool RemoveDieCallback(DieCallbackType Callback) { return true; }
+
+void SetUserDieCallback(DieCallbackType Callback) {}
+
+void NORETURN Die() {
+ if (common_flags()->abort_on_error)
+ Abort();
+ internal__exit(common_flags()->exitcode);
+}
+
+void SetCheckFailedCallback(CheckFailedCallbackType callback) {}
+
+void NORETURN CheckFailed(const char *File, int Line, const char *Condition,
+ u64 Value1, u64 Value2) {
+ __scudo::dieWithMessage("CHECK failed at %s:%d %s (%lld, %lld)\n",
+ File, Line, Condition, Value1, Value2);
+}
+
+} // namespace __sanitizer
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_termination.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd.h (revision 351984)
@@ -0,0 +1,65 @@
+//===-- scudo_tsd.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo thread specific data definition.
+/// Implementation will differ based on the thread local storage primitives
+/// offered by the underlying platform.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+#define SCUDO_TSD_H_
+
+#include "scudo_allocator.h"
+#include "scudo_utils.h"
+
+#include <pthread.h>
+
+namespace __scudo {
+
+struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
+ AllocatorCacheT Cache;
+ uptr QuarantineCachePlaceHolder[4];
+
+ void init();
+ void commitBack();
+
+ INLINE bool tryLock() {
+ if (Mutex.TryLock()) {
+ atomic_store_relaxed(&Precedence, 0);
+ return true;
+ }
+ if (atomic_load_relaxed(&Precedence) == 0)
+ atomic_store_relaxed(&Precedence, static_cast<uptr>(
+ MonotonicNanoTime() >> FIRST_32_SECOND_64(16, 0)));
+ return false;
+ }
+
+ INLINE void lock() {
+ atomic_store_relaxed(&Precedence, 0);
+ Mutex.Lock();
+ }
+
+ INLINE void unlock() { Mutex.Unlock(); }
+
+ INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+
+ private:
+ StaticSpinMutex Mutex;
+ atomic_uintptr_t Precedence;
+};
+
+void initThread(bool MinimalInit);
+
+// TSD model specific fastpath functions definitions.
+#include "scudo_tsd_exclusive.inc"
+#include "scudo_tsd_shared.inc"
+
+} // namespace __scudo
+
+#endif // SCUDO_TSD_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_exclusive.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_exclusive.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_exclusive.cpp (revision 351984)
@@ -0,0 +1,67 @@
+//===-- scudo_tsd_exclusive.cpp ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo exclusive TSD implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_tsd.h"
+
+#if SCUDO_TSD_EXCLUSIVE
+
+namespace __scudo {
+
+static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
+static pthread_key_t PThreadKey;
+
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ScudoTSD TSD;
+
+// Fallback TSD for when the thread isn't initialized yet or is torn down. It
+// can be shared between multiple threads and as such must be locked.
+ScudoTSD FallbackTSD;
+
+static void teardownThread(void *Ptr) {
+ uptr I = reinterpret_cast<uptr>(Ptr);
+ // The glibc POSIX thread-local-storage deallocation routine calls user
+ // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
+ // We want to be called last since other destructors might call free and the
+ // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
+ // quarantine and swallowing the cache.
+ if (I > 1) {
+ // If pthread_setspecific fails, we will go ahead with the teardown.
+ if (LIKELY(pthread_setspecific(PThreadKey,
+ reinterpret_cast<void *>(I - 1)) == 0))
+ return;
+ }
+ TSD.commitBack();
+ ScudoThreadState = ThreadTornDown;
+}
+
+
+static void initOnce() {
+ CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0);
+ initScudo();
+ FallbackTSD.init();
+}
+
+void initThread(bool MinimalInit) {
+ CHECK_EQ(pthread_once(&GlobalInitialized, initOnce), 0);
+ if (UNLIKELY(MinimalInit))
+ return;
+ CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
+ GetPthreadDestructorIterations())), 0);
+ TSD.init();
+ ScudoThreadState = ThreadInitialized;
+}
+
+} // namespace __scudo
+
+#endif // SCUDO_TSD_EXCLUSIVE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_exclusive.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_exclusive.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_exclusive.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_exclusive.inc (revision 351984)
@@ -0,0 +1,47 @@
+//===-- scudo_tsd_exclusive.inc ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo exclusive TSD fastpath functions implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+# error "This file must be included inside scudo_tsd.h."
+#endif // SCUDO_TSD_H_
+
+#if SCUDO_TSD_EXCLUSIVE
+
+enum ThreadState : u8 {
+ ThreadNotInitialized = 0,
+ ThreadInitialized,
+ ThreadTornDown,
+};
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ThreadState ScudoThreadState;
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ScudoTSD TSD;
+
+extern ScudoTSD FallbackTSD;
+
+ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+ if (LIKELY(ScudoThreadState != ThreadNotInitialized))
+ return;
+ initThread(MinimalInit);
+}
+
+ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
+ if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
+ FallbackTSD.lock();
+ *UnlockRequired = true;
+ return &FallbackTSD;
+ }
+ *UnlockRequired = false;
+ return &TSD;
+}
+
+#endif // SCUDO_TSD_EXCLUSIVE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_exclusive.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_shared.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_shared.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_shared.cpp (revision 351984)
@@ -0,0 +1,107 @@
+//===-- scudo_tsd_shared.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo shared TSD implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_tsd.h"
+
+#if !SCUDO_TSD_EXCLUSIVE
+
+namespace __scudo {
+
+static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
+pthread_key_t PThreadKey;
+
+static atomic_uint32_t CurrentIndex;
+static ScudoTSD *TSDs;
+static u32 NumberOfTSDs;
+static u32 CoPrimes[SCUDO_SHARED_TSD_POOL_SIZE];
+static u32 NumberOfCoPrimes = 0;
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ScudoTSD *CurrentTSD;
+#endif
+
+static void initOnce() {
+ CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0);
+ initScudo();
+ NumberOfTSDs = Min(Max(1U, GetNumberOfCPUsCached()),
+ static_cast<u32>(SCUDO_SHARED_TSD_POOL_SIZE));
+ TSDs = reinterpret_cast<ScudoTSD *>(
+ MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs"));
+ for (u32 I = 0; I < NumberOfTSDs; I++) {
+ TSDs[I].init();
+ u32 A = I + 1;
+ u32 B = NumberOfTSDs;
+ while (B != 0) { const u32 T = A; A = B; B = T % B; }
+ if (A == 1)
+ CoPrimes[NumberOfCoPrimes++] = I + 1;
+ }
+}
+
+ALWAYS_INLINE void setCurrentTSD(ScudoTSD *TSD) {
+#if SANITIZER_ANDROID
+ *get_android_tls_ptr() = reinterpret_cast<uptr>(TSD);
+#elif SANITIZER_LINUX
+ CurrentTSD = TSD;
+#else
+ CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(TSD)), 0);
+#endif // SANITIZER_ANDROID
+}
+
+void initThread(bool MinimalInit) {
+ pthread_once(&GlobalInitialized, initOnce);
+ // Initial context assignment is done in a plain round-robin fashion.
+ u32 Index = atomic_fetch_add(&CurrentIndex, 1, memory_order_relaxed);
+ setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
+}
+
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) {
+ if (NumberOfTSDs > 1) {
+ // Use the Precedence of the current TSD as our random seed. Since we are in
+ // the slow path, it means that tryLock failed, and as a result it's very
+ // likely that said Precedence is non-zero.
+ u32 RandState = static_cast<u32>(TSD->getPrecedence());
+ const u32 R = Rand(&RandState);
+ const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
+ u32 Index = R % NumberOfTSDs;
+ uptr LowestPrecedence = UINTPTR_MAX;
+ ScudoTSD *CandidateTSD = nullptr;
+ // Go randomly through at most 4 contexts and find a candidate.
+ for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
+ if (TSDs[Index].tryLock()) {
+ setCurrentTSD(&TSDs[Index]);
+ return &TSDs[Index];
+ }
+ const uptr Precedence = TSDs[Index].getPrecedence();
+ // A 0 precedence here means another thread just locked this TSD.
+ if (Precedence && Precedence < LowestPrecedence) {
+ CandidateTSD = &TSDs[Index];
+ LowestPrecedence = Precedence;
+ }
+ Index += Inc;
+ if (Index >= NumberOfTSDs)
+ Index -= NumberOfTSDs;
+ }
+ if (CandidateTSD) {
+ CandidateTSD->lock();
+ setCurrentTSD(CandidateTSD);
+ return CandidateTSD;
+ }
+ }
+ // Last resort, stick with the current one.
+ TSD->lock();
+ return TSD;
+}
+
+} // namespace __scudo
+
+#endif // !SCUDO_TSD_EXCLUSIVE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_shared.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_shared.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_shared.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_shared.inc (revision 351984)
@@ -0,0 +1,55 @@
+//===-- scudo_tsd_shared.inc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo shared TSD fastpath functions implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+# error "This file must be included inside scudo_tsd.h."
+#endif // SCUDO_TSD_H_
+
+#if !SCUDO_TSD_EXCLUSIVE
+
+extern pthread_key_t PThreadKey;
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ScudoTSD *CurrentTSD;
+#endif
+
+ALWAYS_INLINE ScudoTSD* getCurrentTSD() {
+#if SANITIZER_ANDROID
+ return reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr());
+#elif SANITIZER_LINUX
+ return CurrentTSD;
+#else
+ return reinterpret_cast<ScudoTSD *>(pthread_getspecific(PThreadKey));
+#endif // SANITIZER_ANDROID
+}
+
+ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+ if (LIKELY(getCurrentTSD()))
+ return;
+ initThread(MinimalInit);
+}
+
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD);
+
+ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
+ ScudoTSD *TSD = getCurrentTSD();
+ DCHECK(TSD && "No TSD associated with the current thread!");
+ *UnlockRequired = true;
+ // Try to lock the currently associated context.
+ if (TSD->tryLock())
+ return TSD;
+ // If it failed, go the slow path.
+ return getTSDAndLockSlow(TSD);
+}
+
+#endif // !SCUDO_TSD_EXCLUSIVE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_tsd_shared.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_utils.cpp
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_utils.cpp (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_utils.cpp (revision 351984)
@@ -0,0 +1,134 @@
+//===-- scudo_utils.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Platform specific utility functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#if defined(__x86_64__) || defined(__i386__)
+# include <cpuid.h>
+#elif defined(__arm__) || defined(__aarch64__)
+# include "sanitizer_common/sanitizer_getauxval.h"
+# if SANITIZER_FUCHSIA
+# include <zircon/syscalls.h>
+# include <zircon/features.h>
+# elif SANITIZER_POSIX
+# include "sanitizer_common/sanitizer_posix.h"
+# include <fcntl.h>
+# endif
+#endif
+
+#include <stdarg.h>
+
+// TODO(kostyak): remove __sanitizer *Printf uses in favor for our own less
+// complicated string formatting code. The following is a
+// temporary workaround to be able to use __sanitizer::VSNPrintf.
+namespace __sanitizer {
+
+extern int VSNPrintf(char *buff, int buff_length, const char *format,
+ va_list args);
+
+} // namespace __sanitizer
+
+namespace __scudo {
+
+FORMAT(1, 2) void NORETURN dieWithMessage(const char *Format, ...) {
+ static const char ScudoError[] = "Scudo ERROR: ";
+ static constexpr uptr PrefixSize = sizeof(ScudoError) - 1;
+ // Our messages are tiny, 256 characters is more than enough.
+ char Message[256];
+ va_list Args;
+ va_start(Args, Format);
+ internal_memcpy(Message, ScudoError, PrefixSize);
+ VSNPrintf(Message + PrefixSize, sizeof(Message) - PrefixSize, Format, Args);
+ va_end(Args);
+ LogMessageOnPrintf(Message);
+ if (common_flags()->abort_on_error)
+ SetAbortMessage(Message);
+ RawWrite(Message);
+ Die();
+}
+
+#if defined(__x86_64__) || defined(__i386__)
+// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
+// CRC32 requires the SSE 4.2 instruction set.
+# ifndef bit_SSE4_2
+# define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
+# endif
+bool hasHardwareCRC32() {
+ u32 Eax, Ebx, Ecx, Edx;
+ __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
+ const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
+ (Edx == signature_INTEL_edx) &&
+ (Ecx == signature_INTEL_ecx);
+ const bool IsAMD = (Ebx == signature_AMD_ebx) &&
+ (Edx == signature_AMD_edx) &&
+ (Ecx == signature_AMD_ecx);
+ if (!IsIntel && !IsAMD)
+ return false;
+ __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
+ return !!(Ecx & bit_SSE4_2);
+}
+#elif defined(__arm__) || defined(__aarch64__)
+// For ARM and AArch64, hardware CRC32 support is indicated in the AT_HWCAP
+// auxiliary vector.
+# ifndef AT_HWCAP
+# define AT_HWCAP 16
+# endif
+# ifndef HWCAP_CRC32
+# define HWCAP_CRC32 (1 << 7) // HWCAP_CRC32 is missing on older platforms.
+# endif
+# if SANITIZER_POSIX
+bool hasHardwareCRC32ARMPosix() {
+ uptr F = internal_open("/proc/self/auxv", O_RDONLY);
+ if (internal_iserror(F))
+ return false;
+ struct { uptr Tag; uptr Value; } Entry = { 0, 0 };
+ for (;;) {
+ uptr N = internal_read(F, &Entry, sizeof(Entry));
+ if (internal_iserror(N) || N != sizeof(Entry) ||
+ (Entry.Tag == 0 && Entry.Value == 0) || Entry.Tag == AT_HWCAP)
+ break;
+ }
+ internal_close(F);
+ return (Entry.Tag == AT_HWCAP && (Entry.Value & HWCAP_CRC32) != 0);
+}
+# else
+bool hasHardwareCRC32ARMPosix() { return false; }
+# endif // SANITIZER_POSIX
+
+// Bionic doesn't initialize its globals early enough. This causes issues when
+// trying to access them from a preinit_array (b/25751302) or from another
+// constructor called before the libc one (b/68046352). __progname is
+// initialized after the other globals, so we can check its value to know if
+// calling getauxval is safe.
+extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
+INLINE bool areBionicGlobalsInitialized() {
+ return !SANITIZER_ANDROID || (&__progname && __progname);
+}
+
+bool hasHardwareCRC32() {
+#if SANITIZER_FUCHSIA
+ u32 HWCap;
+ zx_status_t Status = zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
+ if (Status != ZX_OK || (HWCap & ZX_ARM64_FEATURE_ISA_CRC32) == 0)
+ return false;
+ return true;
+#else
+ if (&getauxval && areBionicGlobalsInitialized())
+ return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
+ return hasHardwareCRC32ARMPosix();
+#endif // SANITIZER_FUCHSIA
+}
+#else
+bool hasHardwareCRC32() { return false; }
+#endif // defined(__x86_64__) || defined(__i386__)
+
+} // namespace __scudo
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_utils.cpp
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_utils.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_utils.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_utils.h (revision 351984)
@@ -0,0 +1,36 @@
+//===-- scudo_utils.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_utils.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_UTILS_H_
+#define SCUDO_UTILS_H_
+
+#include "sanitizer_common/sanitizer_common.h"
+
+#include <string.h>
+
+namespace __scudo {
+
+template <class Dest, class Source>
+INLINE Dest bit_cast(const Source& source) {
+ static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!");
+ Dest dest;
+ memcpy(&dest, &source, sizeof(dest));
+ return dest;
+}
+
+void NORETURN dieWithMessage(const char *Format, ...);
+
+bool hasHardwareCRC32();
+
+} // namespace __scudo
+
+#endif // SCUDO_UTILS_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/scudo_utils.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/allocator_config.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/allocator_config.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/allocator_config.h (revision 351984)
@@ -0,0 +1,80 @@
+//===-- allocator_config.h --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_CONFIG_H_
+#define SCUDO_ALLOCATOR_CONFIG_H_
+
+#include "combined.h"
+#include "common.h"
+#include "flags.h"
+#include "primary32.h"
+#include "primary64.h"
+#include "size_class_map.h"
+#include "tsd_exclusive.h"
+#include "tsd_shared.h"
+
+namespace scudo {
+
+// Default configurations for various platforms.
+
+struct DefaultConfig {
+ using SizeClassMap = DefaultSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ // 1GB Regions
+ typedef SizeClassAllocator64<SizeClassMap, 30U> Primary;
+#else
+ // 512KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
+#endif
+ template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+};
+
+struct AndroidConfig {
+ using SizeClassMap = AndroidSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ // 1GB regions
+ typedef SizeClassAllocator64<SizeClassMap, 30U> Primary;
+#else
+ // 512KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
+#endif
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
+};
+
+struct AndroidSvelteConfig {
+ using SizeClassMap = SvelteSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+ // 512MB regions
+ typedef SizeClassAllocator64<SizeClassMap, 29U> Primary;
+#else
+ // 256KB regions
+ typedef SizeClassAllocator32<SizeClassMap, 18U> Primary;
+#endif
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
+};
+
+struct FuchsiaConfig {
+ // 1GB Regions
+ typedef SizeClassAllocator64<DefaultSizeClassMap, 30U> Primary;
+ template <class A>
+ using TSDRegistryT = TSDRegistrySharedT<A, 8U>; // Shared, max 8 TSDs.
+};
+
+#if SCUDO_ANDROID
+typedef AndroidConfig Config;
+#elif SCUDO_FUCHSIA
+typedef FuchsiaConfig Config;
+#else
+typedef DefaultConfig Config;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_CONFIG_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/atomic_helpers.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/atomic_helpers.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/atomic_helpers.h (revision 351984)
@@ -0,0 +1,139 @@
+//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ATOMIC_H_
+#define SCUDO_ATOMIC_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+enum memory_order {
+ memory_order_relaxed = 0,
+ memory_order_consume = 1,
+ memory_order_acquire = 2,
+ memory_order_release = 3,
+ memory_order_acq_rel = 4,
+ memory_order_seq_cst = 5
+};
+COMPILER_CHECK(memory_order_relaxed == __ATOMIC_RELAXED);
+COMPILER_CHECK(memory_order_consume == __ATOMIC_CONSUME);
+COMPILER_CHECK(memory_order_acquire == __ATOMIC_ACQUIRE);
+COMPILER_CHECK(memory_order_release == __ATOMIC_RELEASE);
+COMPILER_CHECK(memory_order_acq_rel == __ATOMIC_ACQ_REL);
+COMPILER_CHECK(memory_order_seq_cst == __ATOMIC_SEQ_CST);
+
+struct atomic_u8 {
+ typedef u8 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u16 {
+ typedef u16 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_s32 {
+ typedef s32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u32 {
+ typedef u32 Type;
+ volatile Type ValDoNotUse;
+};
+
+struct atomic_u64 {
+ typedef u64 Type;
+ // On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
+ ALIGNED(8) volatile Type ValDoNotUse;
+};
+
+struct atomic_uptr {
+ typedef uptr Type;
+ volatile Type ValDoNotUse;
+};
+
+template <typename T>
+INLINE typename T::Type atomic_load(const volatile T *A, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type V;
+ __atomic_load(&A->ValDoNotUse, &V, MO);
+ return V;
+}
+
+template <typename T>
+INLINE void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ __atomic_store(&A->ValDoNotUse, &V, MO);
+}
+
+INLINE void atomic_thread_fence(memory_order) { __sync_synchronize(); }
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
+ memory_order MO) {
+ DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+ typename T::Type R;
+ __atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
+ return R;
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
+ typename T::Type Xchg,
+ memory_order MO) {
+ return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
+ __ATOMIC_RELAXED);
+}
+
+template <typename T>
+INLINE bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
+ typename T::Type Xchg,
+ memory_order MO) {
+ return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
+ __ATOMIC_RELAXED);
+}
+
+// Clutter-reducing helpers.
+
+template <typename T>
+INLINE typename T::Type atomic_load_relaxed(const volatile T *A) {
+ return atomic_load(A, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
+ atomic_store(A, V, memory_order_relaxed);
+}
+
+template <typename T>
+INLINE typename T::Type atomic_compare_exchange(volatile T *A,
+ typename T::Type Cmp,
+ typename T::Type Xchg) {
+ atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+ return Cmp;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_ATOMIC_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/bytemap.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/bytemap.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/bytemap.h (revision 351984)
@@ -0,0 +1,111 @@
+//===-- bytemap.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_BYTEMAP_H_
+#define SCUDO_BYTEMAP_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+namespace scudo {
+
+template <uptr Size> class FlatByteMap {
+public:
+ void initLinkerInitialized() {
+ Map = reinterpret_cast<u8 *>(map(nullptr, Size, "scudo:bytemap"));
+ }
+ void init() { initLinkerInitialized(); }
+
+ void unmapTestOnly() { unmap(reinterpret_cast<void *>(Map), Size); }
+
+ void set(uptr Index, u8 Value) {
+ DCHECK_LT(Index, Size);
+ DCHECK_EQ(0U, Map[Index]);
+ Map[Index] = Value;
+ }
+ u8 operator[](uptr Index) {
+ DCHECK_LT(Index, Size);
+ return Map[Index];
+ }
+
+private:
+ u8 *Map;
+};
+
+template <uptr Level1Size, uptr Level2Size> class TwoLevelByteMap {
+public:
+ void initLinkerInitialized() {
+ Level1Map = reinterpret_cast<atomic_uptr *>(
+ map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap"));
+ }
+ void init() {
+ Mutex.init();
+ initLinkerInitialized();
+ }
+
+ void reset() {
+ for (uptr I = 0; I < Level1Size; I++) {
+ u8 *P = get(I);
+ if (!P)
+ continue;
+ unmap(P, Level2Size);
+ }
+ memset(Level1Map, 0, sizeof(atomic_uptr) * Level1Size);
+ }
+
+ void unmapTestOnly() {
+ reset();
+ unmap(reinterpret_cast<void *>(Level1Map),
+ sizeof(atomic_uptr) * Level1Size);
+ }
+
+ uptr size() const { return Level1Size * Level2Size; }
+
+ void set(uptr Index, u8 Value) {
+ DCHECK_LT(Index, Level1Size * Level2Size);
+ u8 *Level2Map = getOrCreate(Index / Level2Size);
+ DCHECK_EQ(0U, Level2Map[Index % Level2Size]);
+ Level2Map[Index % Level2Size] = Value;
+ }
+
+ u8 operator[](uptr Index) const {
+ DCHECK_LT(Index, Level1Size * Level2Size);
+ u8 *Level2Map = get(Index / Level2Size);
+ if (!Level2Map)
+ return 0;
+ return Level2Map[Index % Level2Size];
+ }
+
+private:
+ u8 *get(uptr Index) const {
+ DCHECK_LT(Index, Level1Size);
+ return reinterpret_cast<u8 *>(
+ atomic_load(&Level1Map[Index], memory_order_acquire));
+ }
+
+ u8 *getOrCreate(uptr Index) {
+ u8 *Res = get(Index);
+ if (!Res) {
+ ScopedLock L(Mutex);
+ if (!(Res = get(Index))) {
+ Res = reinterpret_cast<u8 *>(map(nullptr, Level2Size, "scudo:bytemap"));
+ atomic_store(&Level1Map[Index], reinterpret_cast<uptr>(Res),
+ memory_order_release);
+ }
+ }
+ return Res;
+ }
+
+ atomic_uptr *Level1Map;
+ HybridMutex Mutex;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_BYTEMAP_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/checksum.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/checksum.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/checksum.cc (revision 351984)
@@ -0,0 +1,70 @@
+//===-- checksum.cc ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+#include "atomic_helpers.h"
+
+#if defined(__x86_64__) || defined(__i386__)
+#include <cpuid.h>
+#elif defined(__arm__) || defined(__aarch64__)
+#if SCUDO_FUCHSIA
+#include <zircon/features.h>
+#include <zircon/syscalls.h>
+#else
+#include <sys/auxv.h>
+#endif
+#endif
+
+namespace scudo {
+
+Checksum HashAlgorithm = {Checksum::BSD};
+
+#if defined(__x86_64__) || defined(__i386__)
+// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
+// CRC32 requires the SSE 4.2 instruction set.
+#ifndef bit_SSE4_2
+#define bit_SSE4_2 bit_SSE42 // clang and gcc have different defines.
+#endif
+
+bool hasHardwareCRC32() {
+ u32 Eax, Ebx = 0, Ecx = 0, Edx = 0;
+ __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
+ const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
+ (Edx == signature_INTEL_edx) &&
+ (Ecx == signature_INTEL_ecx);
+ const bool IsAMD = (Ebx == signature_AMD_ebx) && (Edx == signature_AMD_edx) &&
+ (Ecx == signature_AMD_ecx);
+ if (!IsIntel && !IsAMD)
+ return false;
+ __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
+ return !!(Ecx & bit_SSE4_2);
+}
+
+#elif defined(__arm__) || defined(__aarch64__)
+#ifndef AT_HWCAP
+#define AT_HWCAP 16
+#endif
+#ifndef HWCAP_CRC32
+#define HWCAP_CRC32 (1U << 7) // HWCAP_CRC32 is missing on older platforms.
+#endif
+
+bool hasHardwareCRC32() {
+#if SCUDO_FUCHSIA
+ u32 HWCap;
+ const zx_status_t Status =
+ zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
+ if (Status != ZX_OK)
+ return false;
+ return !!(HWCap & ZX_ARM64_FEATURE_ISA_CRC32);
+#else
+ return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
+#endif // SCUDO_FUCHSIA
+}
+#endif // defined(__x86_64__) || defined(__i386__)
+
+} // namespace scudo
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/checksum.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/checksum.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/checksum.h (revision 351984)
@@ -0,0 +1,54 @@
+//===-- checksum.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKSUM_H_
+#define SCUDO_CHECKSUM_H_
+
+#include "internal_defs.h"
+
+// Hardware CRC32 is supported at compilation via the following:
+// - for i386 & x86_64: -msse4.2
+// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
+// An additional check must be performed at runtime as well to make sure the
+// emitted instructions are valid on the target host.
+
+#ifdef __SSE4_2__
+#include <smmintrin.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
+#endif
+#ifdef __ARM_FEATURE_CRC32
+#include <arm_acle.h>
+#define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
+#endif
+
+namespace scudo {
+
+enum class Checksum : u8 {
+ BSD = 0,
+ HardwareCRC32 = 1,
+};
+
+// BSD checksum, unlike a software CRC32, doesn't use any array lookup. We save
+// significantly on memory accesses, as well as 1K of CRC32 table, on platforms
+// that do no support hardware CRC32. The checksum itself is 16-bit, which is at
+// odds with CRC32, but enough for our needs.
+INLINE u16 computeBSDChecksum(u16 Sum, uptr Data) {
+ for (u8 I = 0; I < sizeof(Data); I++) {
+ Sum = static_cast<u16>((Sum >> 1) | ((Sum & 1) << 15));
+ Sum = static_cast<u16>(Sum + (Data & 0xff));
+ Data >>= 8;
+ }
+ return Sum;
+}
+
+bool hasHardwareCRC32();
+WEAK u32 computeHardwareCRC32(u32 Crc, uptr Data);
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKSUM_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/chunk.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/chunk.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/chunk.h (revision 351984)
@@ -0,0 +1,156 @@
+//===-- chunk.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHUNK_H_
+#define SCUDO_CHUNK_H_
+
+#include "platform.h"
+
+#include "atomic_helpers.h"
+#include "checksum.h"
+#include "common.h"
+#include "report.h"
+
+namespace scudo {
+
+extern Checksum HashAlgorithm;
+
+INLINE u16 computeChecksum(u32 Seed, uptr Value, uptr *Array, uptr ArraySize) {
+ // If the hardware CRC32 feature is defined here, it was enabled everywhere,
+ // as opposed to only for crc32_hw.cc. This means that other hardware specific
+ // instructions were likely emitted at other places, and as a result there is
+ // no reason to not use it here.
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+ u32 Crc = static_cast<u32>(CRC32_INTRINSIC(Seed, Value));
+ for (uptr I = 0; I < ArraySize; I++)
+ Crc = static_cast<u32>(CRC32_INTRINSIC(Crc, Array[I]));
+ return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+#else
+ if (HashAlgorithm == Checksum::HardwareCRC32) {
+ u32 Crc = computeHardwareCRC32(Seed, Value);
+ for (uptr I = 0; I < ArraySize; I++)
+ Crc = computeHardwareCRC32(Crc, Array[I]);
+ return static_cast<u16>((Crc & 0xffff) ^ (Crc >> 16));
+ } else {
+ u16 Checksum = computeBSDChecksum(static_cast<u16>(Seed & 0xffff), Value);
+ for (uptr I = 0; I < ArraySize; I++)
+ Checksum = computeBSDChecksum(Checksum, Array[I]);
+ return Checksum;
+ }
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+}
+
+namespace Chunk {
+
+// Note that in an ideal world, `State` and `Origin` should be `enum class`, and
+// the associated `UnpackedHeader` fields of their respective enum class type
+// but https://gcc.gnu.org/bugzilla/show_bug.cgi?id=61414 prevents it from
+// happening, as it will error, complaining the number of bits is not enough.
+enum Origin : u8 {
+ Malloc = 0,
+ New = 1,
+ NewArray = 2,
+ Memalign = 3,
+};
+
+enum State : u8 { Available = 0, Allocated = 1, Quarantined = 2 };
+
+typedef u64 PackedHeader;
+// Update the 'Mask' constants to reflect changes in this structure.
+struct UnpackedHeader {
+ u64 Checksum : 16;
+ u64 ClassId : 8;
+ u64 SizeOrUnusedBytes : 20;
+ u8 State : 2;
+ u8 Origin : 2;
+ u64 Offset : 16;
+};
+typedef atomic_u64 AtomicPackedHeader;
+COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+
+// Those constants are required to silence some -Werror=conversion errors when
+// assigning values to the related bitfield variables.
+constexpr uptr ChecksumMask = (1UL << 16) - 1;
+constexpr uptr ClassIdMask = (1UL << 8) - 1;
+constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1;
+constexpr uptr StateMask = (1UL << 2) - 1;
+constexpr uptr OriginMask = (1UL << 2) - 1;
+constexpr uptr OffsetMask = (1UL << 16) - 1;
+
+constexpr uptr getHeaderSize() {
+ return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+ return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
+ getHeaderSize());
+}
+
+INLINE
+const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
+ return reinterpret_cast<const AtomicPackedHeader *>(
+ reinterpret_cast<uptr>(Ptr) - getHeaderSize());
+}
+
+// We do not need a cryptographically strong hash for the checksum, but a CRC
+// type function that can alert us in the event a header is invalid or
+// corrupted. Ideally slightly better than a simple xor of all fields.
+static INLINE u16 computeHeaderChecksum(u32 Cookie, const void *Ptr,
+ UnpackedHeader *Header) {
+ UnpackedHeader ZeroChecksumHeader = *Header;
+ ZeroChecksumHeader.Checksum = 0;
+ uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
+ memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
+ return computeChecksum(Cookie, reinterpret_cast<uptr>(Ptr), HeaderHolder,
+ ARRAY_SIZE(HeaderHolder));
+}
+
+INLINE void storeHeader(u32 Cookie, void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
+ NewUnpackedHeader->Checksum =
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
+}
+
+INLINE
+void loadHeader(u32 Cookie, const void *Ptr,
+ UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ if (UNLIKELY(NewUnpackedHeader->Checksum !=
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader)))
+ reportHeaderCorruption(const_cast<void *>(Ptr));
+}
+
+INLINE void compareExchangeHeader(u32 Cookie, void *Ptr,
+ UnpackedHeader *NewUnpackedHeader,
+ UnpackedHeader *OldUnpackedHeader) {
+ NewUnpackedHeader->Checksum =
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+ PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+ PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
+ if (UNLIKELY(!atomic_compare_exchange_strong(
+ getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
+ memory_order_relaxed)))
+ reportHeaderRace(Ptr);
+}
+
+INLINE
+bool isValid(u32 Cookie, const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+ PackedHeader NewPackedHeader = atomic_load_relaxed(getConstAtomicHeader(Ptr));
+ *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+ return NewUnpackedHeader->Checksum ==
+ computeHeaderChecksum(Cookie, Ptr, NewUnpackedHeader);
+}
+
+} // namespace Chunk
+
+} // namespace scudo
+
+#endif // SCUDO_CHUNK_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/combined.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/combined.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/combined.h (revision 351984)
@@ -0,0 +1,557 @@
+//===-- combined.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMBINED_H_
+#define SCUDO_COMBINED_H_
+
+#include "chunk.h"
+#include "common.h"
+#include "flags.h"
+#include "flags_parser.h"
+#include "interface.h"
+#include "local_cache.h"
+#include "quarantine.h"
+#include "report.h"
+#include "secondary.h"
+#include "tsd.h"
+
+namespace scudo {
+
+template <class Params> class Allocator {
+public:
+ using PrimaryT = typename Params::Primary;
+ using CacheT = typename PrimaryT::CacheT;
+ typedef Allocator<Params> ThisT;
+ typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
+
+ struct QuarantineCallback {
+ explicit QuarantineCallback(ThisT &Instance, CacheT &LocalCache)
+ : Allocator(Instance), Cache(LocalCache) {}
+
+ // Chunk recycling function, returns a quarantined chunk to the backend,
+ // first making sure it hasn't been tampered with.
+ void recycle(void *Ptr) {
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+ if (UNLIKELY(Header.State != Chunk::State::Quarantined))
+ reportInvalidChunkState(AllocatorAction::Recycling, Ptr);
+
+ Chunk::UnpackedHeader NewHeader = Header;
+ NewHeader.State = Chunk::State::Available;
+ Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+
+ void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
+ const uptr ClassId = Header.ClassId;
+ if (ClassId)
+ Cache.deallocate(ClassId, BlockBegin);
+ else
+ Allocator.Secondary.deallocate(BlockBegin);
+ }
+
+ // We take a shortcut when allocating a quarantine batch by working with the
+ // appropriate class ID instead of using Size. The compiler should optimize
+ // the class ID computation and work with the associated cache directly.
+ void *allocate(UNUSED uptr Size) {
+ const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+ sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+ void *Ptr = Cache.allocate(QuarantineClassId);
+ // Quarantine batch allocation failure is fatal.
+ if (UNLIKELY(!Ptr))
+ reportOutOfMemory(SizeClassMap::getSizeByClassId(QuarantineClassId));
+
+ Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) +
+ Chunk::getHeaderSize());
+ Chunk::UnpackedHeader Header = {};
+ Header.ClassId = QuarantineClassId & Chunk::ClassIdMask;
+ Header.SizeOrUnusedBytes = sizeof(QuarantineBatch);
+ Header.State = Chunk::State::Allocated;
+ Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
+
+ return Ptr;
+ }
+
+ void deallocate(void *Ptr) {
+ const uptr QuarantineClassId = SizeClassMap::getClassIdBySize(
+ sizeof(QuarantineBatch) + Chunk::getHeaderSize());
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Allocator.Cookie, Ptr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+ DCHECK_EQ(Header.ClassId, QuarantineClassId);
+ DCHECK_EQ(Header.Offset, 0);
+ DCHECK_EQ(Header.SizeOrUnusedBytes, sizeof(QuarantineBatch));
+
+ Chunk::UnpackedHeader NewHeader = Header;
+ NewHeader.State = Chunk::State::Available;
+ Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
+ Cache.deallocate(QuarantineClassId,
+ reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+ Chunk::getHeaderSize()));
+ }
+
+ private:
+ ThisT &Allocator;
+ CacheT &Cache;
+ };
+
+ typedef GlobalQuarantine<QuarantineCallback, void> QuarantineT;
+ typedef typename QuarantineT::CacheT QuarantineCacheT;
+
+ void initLinkerInitialized() {
+ performSanityChecks();
+
+ // Check if hardware CRC32 is supported in the binary and by the platform,
+ // if so, opt for the CRC32 hardware version of the checksum.
+ if (&computeHardwareCRC32 && hasHardwareCRC32())
+ HashAlgorithm = Checksum::HardwareCRC32;
+
+ if (UNLIKELY(!getRandom(&Cookie, sizeof(Cookie))))
+ Cookie = static_cast<u32>(getMonotonicTime() ^
+ (reinterpret_cast<uptr>(this) >> 4));
+
+ initFlags();
+ reportUnrecognizedFlags();
+
+ // Store some flags locally.
+ Options.MayReturnNull = getFlags()->may_return_null;
+ Options.ZeroContents = getFlags()->zero_contents;
+ Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
+ Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
+ Options.QuarantineMaxChunkSize = getFlags()->quarantine_max_chunk_size;
+
+ Stats.initLinkerInitialized();
+ Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms);
+ Secondary.initLinkerInitialized(&Stats);
+
+ Quarantine.init(getFlags()->quarantine_size_kb << 10,
+ getFlags()->thread_local_quarantine_size_kb << 10);
+ }
+
+ void reset() { memset(this, 0, sizeof(*this)); }
+
+ void unmapTestOnly() {
+ TSDRegistry.unmapTestOnly();
+ Primary.unmapTestOnly();
+ }
+
+ TSDRegistryT *getTSDRegistry() { return &TSDRegistry; }
+
+ void initCache(CacheT *Cache) { Cache->init(&Stats, &Primary); }
+
+ // Release the resources used by a TSD, which involves:
+ // - draining the local quarantine cache to the global quarantine;
+ // - releasing the cached pointers back to the Primary;
+ // - unlinking the local stats from the global ones (destroying the cache does
+ // the last two items).
+ void commitBack(TSD<ThisT> *TSD) {
+ Quarantine.drain(&TSD->QuarantineCache,
+ QuarantineCallback(*this, TSD->Cache));
+ TSD->Cache.destroy(&Stats);
+ }
+
+ NOINLINE void *allocate(uptr Size, Chunk::Origin Origin,
+ uptr Alignment = MinAlignment,
+ bool ZeroContents = false) {
+ initThreadMaybe();
+
+ if (UNLIKELY(Alignment > MaxAlignment)) {
+ if (Options.MayReturnNull)
+ return nullptr;
+ reportAlignmentTooBig(Alignment, MaxAlignment);
+ }
+ if (UNLIKELY(Alignment < MinAlignment))
+ Alignment = MinAlignment;
+
+ // If the requested size happens to be 0 (more common than you might think),
+ // allocate 1 byte on top of the header. Then add the extra bytes required
+ // to fulfill the alignment requirements: we allocate enough to be sure that
+ // there will be an address in the block that will satisfy the alignment.
+ const uptr NeededSize =
+ Chunk::getHeaderSize() + roundUpTo(Size ? Size : 1, MinAlignment) +
+ ((Alignment > MinAlignment) ? (Alignment - Chunk::getHeaderSize()) : 0);
+
+ // Takes care of extravagantly large sizes as well as integer overflows.
+ if (UNLIKELY(Size >= MaxAllowedMallocSize ||
+ NeededSize >= MaxAllowedMallocSize)) {
+ if (Options.MayReturnNull)
+ return nullptr;
+ reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
+ }
+
+ void *Block;
+ uptr ClassId;
+ uptr BlockEnd = 0;
+ if (PrimaryT::canAllocate(NeededSize)) {
+ ClassId = SizeClassMap::getClassIdBySize(NeededSize);
+ bool UnlockRequired;
+ auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ Block = TSD->Cache.allocate(ClassId);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ ClassId = 0;
+ Block = Secondary.allocate(NeededSize, Alignment, &BlockEnd);
+ }
+
+ if (UNLIKELY(!Block)) {
+ if (Options.MayReturnNull)
+ return nullptr;
+ reportOutOfMemory(NeededSize);
+ }
+
+ // We only need to zero the contents for Primary backed allocations.
+ if ((ZeroContents || Options.ZeroContents) && ClassId)
+ memset(Block, 0, PrimaryT::getSizeByClassId(ClassId));
+
+ Chunk::UnpackedHeader Header = {};
+ uptr UserPtr = reinterpret_cast<uptr>(Block) + Chunk::getHeaderSize();
+ // The following condition isn't necessarily "UNLIKELY".
+ if (!isAligned(UserPtr, Alignment)) {
+ const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment);
+ const uptr Offset = AlignedUserPtr - UserPtr;
+ Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask;
+ DCHECK_GT(Offset, 2 * sizeof(u32));
+ // The BlockMarker has no security purpose, but is specifically meant for
+ // the chunk iteration function that can be used in debugging situations.
+ // It is the only situation where we have to locate the start of a chunk
+ // based on its block address.
+ reinterpret_cast<u32 *>(Block)[0] = BlockMarker;
+ reinterpret_cast<u32 *>(Block)[1] = static_cast<u32>(Offset);
+ UserPtr = AlignedUserPtr;
+ }
+ Header.State = Chunk::State::Allocated;
+ Header.Origin = Origin & Chunk::OriginMask;
+ if (ClassId) {
+ Header.ClassId = ClassId & Chunk::ClassIdMask;
+ Header.SizeOrUnusedBytes = Size & Chunk::SizeOrUnusedBytesMask;
+ } else {
+ Header.SizeOrUnusedBytes =
+ (BlockEnd - (UserPtr + Size)) & Chunk::SizeOrUnusedBytesMask;
+ }
+ void *Ptr = reinterpret_cast<void *>(UserPtr);
+ Chunk::storeHeader(Cookie, Ptr, &Header);
+
+ if (&__scudo_allocate_hook)
+ __scudo_allocate_hook(Ptr, Size);
+
+ return Ptr;
+ }
+
+ NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0,
+ UNUSED uptr Alignment = MinAlignment) {
+ // For a deallocation, we only ensure minimal initialization, meaning thread
+ // local data will be left uninitialized for now (when using ELF TLS). The
+ // fallback cache will be used instead. This is a workaround for a situation
+ // where the only heap operation performed in a thread would be a free past
+ // the TLS destructors, ending up in initialized thread specific data never
+ // being destroyed properly. Any other heap operation will do a full init.
+ initThreadMaybe(/*MinimalInit=*/true);
+
+ if (&__scudo_deallocate_hook)
+ __scudo_deallocate_hook(Ptr);
+
+ if (UNLIKELY(!Ptr))
+ return;
+ if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(Ptr), MinAlignment)))
+ reportMisalignedPointer(AllocatorAction::Deallocating, Ptr);
+
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
+ if (Options.DeallocTypeMismatch) {
+ if (Header.Origin != Origin) {
+ // With the exception of memalign'd chunks, that can be still be free'd.
+ if (UNLIKELY(Header.Origin != Chunk::Origin::Memalign ||
+ Origin != Chunk::Origin::Malloc))
+ reportDeallocTypeMismatch(AllocatorAction::Deallocating, Ptr,
+ Header.Origin, Origin);
+ }
+ }
+
+ const uptr Size = getSize(Ptr, &Header);
+ if (DeleteSize && Options.DeleteSizeMismatch) {
+ if (UNLIKELY(DeleteSize != Size))
+ reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
+ }
+
+ quarantineOrDeallocateChunk(Ptr, &Header, Size);
+ }
+
+ void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
+ initThreadMaybe();
+
+ // The following cases are handled by the C wrappers.
+ DCHECK_NE(OldPtr, nullptr);
+ DCHECK_NE(NewSize, 0);
+
+ if (UNLIKELY(!isAligned(reinterpret_cast<uptr>(OldPtr), MinAlignment)))
+ reportMisalignedPointer(AllocatorAction::Reallocating, OldPtr);
+
+ Chunk::UnpackedHeader OldHeader;
+ Chunk::loadHeader(Cookie, OldPtr, &OldHeader);
+
+ if (UNLIKELY(OldHeader.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Reallocating, OldPtr);
+
+ // Pointer has to be allocated with a malloc-type function. Some
+ // applications think that it is OK to realloc a memalign'ed pointer, which
+ // will trigger this check. It really isn't.
+ if (Options.DeallocTypeMismatch) {
+ if (UNLIKELY(OldHeader.Origin != Chunk::Origin::Malloc))
+ reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
+ OldHeader.Origin, Chunk::Origin::Malloc);
+ }
+
+ const uptr OldSize = getSize(OldPtr, &OldHeader);
+ // If the new size is identical to the old one, or lower but within an
+ // acceptable range, we just keep the old chunk, and update its header.
+ if (NewSize == OldSize)
+ return OldPtr;
+ if (NewSize < OldSize) {
+ const uptr Delta = OldSize - NewSize;
+ if (Delta < (SizeClassMap::MaxSize / 2)) {
+ Chunk::UnpackedHeader NewHeader = OldHeader;
+ NewHeader.SizeOrUnusedBytes =
+ (OldHeader.ClassId ? NewHeader.SizeOrUnusedBytes - Delta
+ : NewHeader.SizeOrUnusedBytes + Delta) &
+ Chunk::SizeOrUnusedBytesMask;
+ Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
+ return OldPtr;
+ }
+ }
+
+ // Otherwise we allocate a new one, and deallocate the old one. Some
+ // allocators will allocate an even larger chunk (by a fixed factor) to
+ // allow for potential further in-place realloc. The gains of such a trick
+ // are currently unclear.
+ void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment);
+ if (NewPtr) {
+ memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
+ quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+ }
+ return NewPtr;
+ }
+
+ // TODO(kostyak): while this locks the Primary & Secondary, it still allows
+ // pointers to be fetched from the TSD. We ultimately want to
+ // lock the registry as well. For now, it's good enough.
+ void disable() {
+ initThreadMaybe();
+ Primary.disable();
+ Secondary.disable();
+ }
+
+ void enable() {
+ initThreadMaybe();
+ Secondary.enable();
+ Primary.enable();
+ }
+
+ void printStats() {
+ disable();
+ Primary.printStats();
+ Secondary.printStats();
+ Quarantine.printStats();
+ enable();
+ }
+
+ void releaseToOS() { Primary.releaseToOS(); }
+
+ // Iterate over all chunks and call a callback for all busy chunks located
+ // within the provided memory range. Said callback must not use this allocator
+ // or a deadlock can ensue. This fits Android's malloc_iterate() needs.
+ void iterateOverChunks(uptr Base, uptr Size, iterate_callback Callback,
+ void *Arg) {
+ initThreadMaybe();
+ const uptr From = Base;
+ const uptr To = Base + Size;
+ auto Lambda = [this, From, To, Callback, Arg](uptr Block) {
+ if (Block < From || Block > To)
+ return;
+ uptr ChunkSize;
+ const uptr ChunkBase = getChunkFromBlock(Block, &ChunkSize);
+ if (ChunkBase != InvalidChunk)
+ Callback(ChunkBase, ChunkSize, Arg);
+ };
+ Primary.iterateOverBlocks(Lambda);
+ Secondary.iterateOverBlocks(Lambda);
+ }
+
+ bool canReturnNull() {
+ initThreadMaybe();
+ return Options.MayReturnNull;
+ }
+
+ // TODO(kostyak): implement this as a "backend" to mallopt.
+ bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; }
+
+ // Return the usable size for a given chunk. Technically we lie, as we just
+ // report the actual size of a chunk. This is done to counteract code actively
+ // writing past the end of a chunk (like sqlite3) when the usable size allows
+ // for it, which then forces realloc to copy the usable size of a chunk as
+ // opposed to its actual size.
+ uptr getUsableSize(const void *Ptr) {
+ initThreadMaybe();
+ if (UNLIKELY(!Ptr))
+ return 0;
+ Chunk::UnpackedHeader Header;
+ Chunk::loadHeader(Cookie, Ptr, &Header);
+ // Getting the usable size of a chunk only makes sense if it's allocated.
+ if (UNLIKELY(Header.State != Chunk::State::Allocated))
+ reportInvalidChunkState(AllocatorAction::Sizing, const_cast<void *>(Ptr));
+ return getSize(Ptr, &Header);
+ }
+
+ void getStats(StatCounters S) {
+ initThreadMaybe();
+ Stats.get(S);
+ }
+
+private:
+ typedef MapAllocator SecondaryT;
+ typedef typename PrimaryT::SizeClassMap SizeClassMap;
+
+ static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
+ static const uptr MaxAlignmentLog = 24U; // 16 MB seems reasonable.
+ static const uptr MinAlignment = 1UL << MinAlignmentLog;
+ static const uptr MaxAlignment = 1UL << MaxAlignmentLog;
+ static const uptr MaxAllowedMallocSize =
+ FIRST_32_SECOND_64(1UL << 31, 1ULL << 40);
+
+ // Constants used by the chunk iteration mechanism.
+ static const u32 BlockMarker = 0x44554353U;
+ static const uptr InvalidChunk = ~static_cast<uptr>(0);
+
+ GlobalStats Stats;
+ TSDRegistryT TSDRegistry;
+ PrimaryT Primary;
+ SecondaryT Secondary;
+ QuarantineT Quarantine;
+
+ u32 Cookie;
+
+ struct {
+ u8 MayReturnNull : 1; // may_return_null
+ u8 ZeroContents : 1; // zero_contents
+ u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch
+ u8 DeleteSizeMismatch : 1; // delete_size_mismatch
+ u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
+ } Options;
+
+ // The following might get optimized out by the compiler.
+ NOINLINE void performSanityChecks() {
+ // Verify that the header offset field can hold the maximum offset. In the
+ // case of the Secondary allocator, it takes care of alignment and the
+ // offset will always be small. In the case of the Primary, the worst case
+ // scenario happens in the last size class, when the backend allocation
+ // would already be aligned on the requested alignment, which would happen
+ // to be the maximum alignment that would fit in that size class. As a
+ // result, the maximum offset will be at most the maximum alignment for the
+ // last size class minus the header size, in multiples of MinAlignment.
+ Chunk::UnpackedHeader Header = {};
+ const uptr MaxPrimaryAlignment = 1UL << getMostSignificantSetBitIndex(
+ SizeClassMap::MaxSize - MinAlignment);
+ const uptr MaxOffset =
+ (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+ Header.Offset = MaxOffset & Chunk::OffsetMask;
+ if (UNLIKELY(Header.Offset != MaxOffset))
+ reportSanityCheckError("offset");
+
+ // Verify that we can fit the maximum size or amount of unused bytes in the
+ // header. Given that the Secondary fits the allocation to a page, the worst
+ // case scenario happens in the Primary. It will depend on the second to
+ // last and last class sizes, as well as the dynamic base for the Primary.
+ // The following is an over-approximation that works for our needs.
+ const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1;
+ Header.SizeOrUnusedBytes =
+ MaxSizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask;
+ if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes))
+ reportSanityCheckError("size (or unused bytes)");
+
+ const uptr LargestClassId = SizeClassMap::LargestClassId;
+ Header.ClassId = LargestClassId;
+ if (UNLIKELY(Header.ClassId != LargestClassId))
+ reportSanityCheckError("class ID");
+ }
+
+ static INLINE void *getBlockBegin(const void *Ptr,
+ Chunk::UnpackedHeader *Header) {
+ return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+ Chunk::getHeaderSize() -
+ (Header->Offset << MinAlignmentLog));
+ }
+
+ // Return the size of a chunk as requested during its allocation.
+ INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) {
+ const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
+ if (Header->ClassId)
+ return SizeOrUnusedBytes;
+ return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
+ reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+ TSDRegistry.initThreadMaybe(this, MinimalInit);
+ }
+
+ void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
+ uptr Size) {
+ Chunk::UnpackedHeader NewHeader = *Header;
+ // If the quarantine is disabled, the actual size of a chunk is 0 or larger
+ // than the maximum allowed, we return a chunk directly to the backend.
+ const bool BypassQuarantine = !Quarantine.getCacheSize() || !Size ||
+ (Size > Options.QuarantineMaxChunkSize);
+ if (BypassQuarantine) {
+ NewHeader.State = Chunk::State::Available;
+ Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+ void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
+ const uptr ClassId = NewHeader.ClassId;
+ if (ClassId) {
+ bool UnlockRequired;
+ auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ TSD->Cache.deallocate(ClassId, BlockBegin);
+ if (UnlockRequired)
+ TSD->unlock();
+ } else {
+ Secondary.deallocate(BlockBegin);
+ }
+ } else {
+ NewHeader.State = Chunk::State::Quarantined;
+ Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
+ bool UnlockRequired;
+ auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired);
+ Quarantine.put(&TSD->QuarantineCache,
+ QuarantineCallback(*this, TSD->Cache), Ptr, Size);
+ if (UnlockRequired)
+ TSD->unlock();
+ }
+ }
+
+ // This only cares about valid busy chunks. This might change in the future.
+ uptr getChunkFromBlock(uptr Block, uptr *Size) {
+ u32 Offset = 0;
+ if (reinterpret_cast<u32 *>(Block)[0] == BlockMarker)
+ Offset = reinterpret_cast<u32 *>(Block)[1];
+ const uptr P = Block + Offset + Chunk::getHeaderSize();
+ const void *Ptr = reinterpret_cast<const void *>(P);
+ Chunk::UnpackedHeader Header;
+ if (!Chunk::isValid(Cookie, Ptr, &Header) ||
+ Header.State != Chunk::State::Allocated)
+ return InvalidChunk;
+ if (Size)
+ *Size = getSize(Ptr, &Header);
+ return P;
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_COMBINED_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/common.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/common.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/common.cc (revision 351984)
@@ -0,0 +1,32 @@
+//===-- common.cc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "common.h"
+#include "atomic_helpers.h"
+
+namespace scudo {
+
+uptr PageSizeCached;
+uptr getPageSize();
+
+uptr getPageSizeSlow() {
+ PageSizeCached = getPageSize();
+ CHECK_NE(PageSizeCached, 0);
+ return PageSizeCached;
+}
+
+// Fatal internal map() or unmap() error (potentially OOM related).
+void NORETURN dieOnMapUnmapError(bool OutOfMemory) {
+ outputRaw("Scudo ERROR: internal map or unmap failure");
+ if (OutOfMemory)
+ outputRaw(" (OOM)");
+ outputRaw("\n");
+ die();
+}
+
+} // namespace scudo
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/common.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/common.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/common.h (revision 351984)
@@ -0,0 +1,176 @@
+//===-- common.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_COMMON_H_
+#define SCUDO_COMMON_H_
+
+#include "internal_defs.h"
+
+#include "fuchsia.h"
+#include "linux.h"
+
+#include <stddef.h>
+#include <string.h>
+
+namespace scudo {
+
+template <class Dest, class Source> INLINE Dest bit_cast(const Source &S) {
+ COMPILER_CHECK(sizeof(Dest) == sizeof(Source));
+ Dest D;
+ memcpy(&D, &S, sizeof(D));
+ return D;
+}
+
+INLINE constexpr uptr roundUpTo(uptr X, uptr Boundary) {
+ return (X + Boundary - 1) & ~(Boundary - 1);
+}
+
+INLINE constexpr uptr roundDownTo(uptr X, uptr Boundary) {
+ return X & ~(Boundary - 1);
+}
+
+INLINE constexpr bool isAligned(uptr X, uptr Alignment) {
+ return (X & (Alignment - 1)) == 0;
+}
+
+template <class T> constexpr T Min(T A, T B) { return A < B ? A : B; }
+
+template <class T> constexpr T Max(T A, T B) { return A > B ? A : B; }
+
+template <class T> void Swap(T &A, T &B) {
+ T Tmp = A;
+ A = B;
+ B = Tmp;
+}
+
+INLINE bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; }
+
+INLINE uptr getMostSignificantSetBitIndex(uptr X) {
+ DCHECK_NE(X, 0U);
+ return SCUDO_WORDSIZE - 1U - static_cast<uptr>(__builtin_clzl(X));
+}
+
+INLINE uptr roundUpToPowerOfTwo(uptr Size) {
+ DCHECK(Size);
+ if (isPowerOfTwo(Size))
+ return Size;
+ const uptr Up = getMostSignificantSetBitIndex(Size);
+ DCHECK_LT(Size, (1UL << (Up + 1)));
+ DCHECK_GT(Size, (1UL << Up));
+ return 1UL << (Up + 1);
+}
+
+INLINE uptr getLeastSignificantSetBitIndex(uptr X) {
+ DCHECK_NE(X, 0U);
+ return static_cast<uptr>(__builtin_ctzl(X));
+}
+
+INLINE uptr getLog2(uptr X) {
+ DCHECK(isPowerOfTwo(X));
+ return getLeastSignificantSetBitIndex(X);
+}
+
+INLINE u32 getRandomU32(u32 *State) {
+ // ANSI C linear congruential PRNG (16-bit output).
+ // return (*State = *State * 1103515245 + 12345) >> 16;
+ // XorShift (32-bit output).
+ *State ^= *State << 13;
+ *State ^= *State >> 17;
+ *State ^= *State << 5;
+ return *State;
+}
+
+INLINE u32 getRandomModN(u32 *State, u32 N) {
+ return getRandomU32(State) % N; // [0, N)
+}
+
+template <typename T> INLINE void shuffle(T *A, u32 N, u32 *RandState) {
+ if (N <= 1)
+ return;
+ u32 State = *RandState;
+ for (u32 I = N - 1; I > 0; I--)
+ Swap(A[I], A[getRandomModN(&State, I + 1)]);
+ *RandState = State;
+}
+
+// Hardware specific inlinable functions.
+
+INLINE void yieldProcessor(u8 Count) {
+#if defined(__i386__) || defined(__x86_64__)
+ __asm__ __volatile__("" ::: "memory");
+ for (u8 I = 0; I < Count; I++)
+ __asm__ __volatile__("pause");
+#elif defined(__aarch64__) || defined(__arm__)
+ __asm__ __volatile__("" ::: "memory");
+ for (u8 I = 0; I < Count; I++)
+ __asm__ __volatile__("yield");
+#endif
+ __asm__ __volatile__("" ::: "memory");
+}
+
+// Platform specific functions.
+
+extern uptr PageSizeCached;
+uptr getPageSizeSlow();
+INLINE uptr getPageSizeCached() {
+ // Bionic uses a hardcoded value.
+ if (SCUDO_ANDROID)
+ return 4096U;
+ if (LIKELY(PageSizeCached))
+ return PageSizeCached;
+ return getPageSizeSlow();
+}
+
+u32 getNumberOfCPUs();
+
+const char *getEnv(const char *Name);
+
+u64 getMonotonicTime();
+
+// Our randomness gathering function is limited to 256 bytes to ensure we get
+// as many bytes as requested, and avoid interruptions (on Linux).
+constexpr uptr MaxRandomLength = 256U;
+bool getRandom(void *Buffer, uptr Length, bool Blocking = false);
+
+// Platform memory mapping functions.
+
+#define MAP_ALLOWNOMEM (1U << 0)
+#define MAP_NOACCESS (1U << 1)
+#define MAP_RESIZABLE (1U << 2)
+
+// Our platform memory mapping use is restricted to 3 scenarios:
+// - reserve memory at a random address (MAP_NOACCESS);
+// - commit memory in a previously reserved space;
+// - commit memory at a random address.
+// As such, only a subset of parameters combinations is valid, which is checked
+// by the function implementation. The Data parameter allows to pass opaque
+// platform specific data to the function.
+// Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
+ MapPlatformData *Data = nullptr);
+
+// Indicates that we are getting rid of the whole mapping, which might have
+// further consequences on Data, depending on the platform.
+#define UNMAP_ALL (1U << 0)
+
+void unmap(void *Addr, uptr Size, uptr Flags = 0,
+ MapPlatformData *Data = nullptr);
+
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+ MapPlatformData *Data = nullptr);
+
+// Internal map & unmap fatal error. This must not call map().
+void NORETURN dieOnMapUnmapError(bool OutOfMemory = false);
+
+// Logging related functions.
+
+void setAbortMessage(const char *Message);
+
+} // namespace scudo
+
+#endif // SCUDO_COMMON_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/crc32_hw.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/crc32_hw.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/crc32_hw.cc (revision 351984)
@@ -0,0 +1,19 @@
+//===-- crc32_hw.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "checksum.h"
+
+namespace scudo {
+
+#if defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+ return static_cast<u32>(CRC32_INTRINSIC(Crc, Data));
+}
+#endif // defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+} // namespace scudo
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags.cc (revision 351984)
@@ -0,0 +1,57 @@
+//===-- flags.cc ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags.h"
+#include "common.h"
+#include "flags_parser.h"
+#include "interface.h"
+
+namespace scudo {
+
+Flags *getFlags() {
+ static Flags F;
+ return &F;
+}
+
+void Flags::setDefaults() {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "flags.inc"
+#undef SCUDO_FLAG
+}
+
+void registerFlags(FlagParser *Parser, Flags *F) {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
+ Parser->registerFlag(#Name, Description, FlagType::FT_##Type, \
+ reinterpret_cast<void *>(&F->Name));
+#include "flags.inc"
+#undef SCUDO_FLAG
+}
+
+static const char *getCompileDefinitionScudoDefaultOptions() {
+#ifdef SCUDO_DEFAULT_OPTIONS
+ return STRINGIFY(SCUDO_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+static const char *getScudoDefaultOptions() {
+ return (&__scudo_default_options) ? __scudo_default_options() : "";
+}
+
+void initFlags() {
+ Flags *F = getFlags();
+ F->setDefaults();
+ FlagParser Parser;
+ registerFlags(&Parser, F);
+ Parser.parseString(getCompileDefinitionScudoDefaultOptions());
+ Parser.parseString(getScudoDefaultOptions());
+ Parser.parseString(getEnv("SCUDO_OPTIONS"));
+}
+
+} // namespace scudo
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags.h (revision 351984)
@@ -0,0 +1,30 @@
+//===-- flags.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_H_
+#define SCUDO_FLAGS_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+struct Flags {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "flags.inc"
+#undef SCUDO_FLAG
+ void setDefaults();
+};
+
+Flags *getFlags();
+void initFlags();
+class FlagParser;
+void registerFlags(FlagParser *Parser, Flags *F);
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags.inc (revision 351984)
@@ -0,0 +1,50 @@
+//===-- flags.inc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAG
+#error "Define SCUDO_FLAG prior to including this file!"
+#endif
+
+SCUDO_FLAG(int, quarantine_size_kb, 0,
+ "Size (in kilobytes) of quarantine used to delay the actual "
+ "deallocation of chunks. Lower value may reduce memory usage but "
+ "decrease the effectiveness of the mitigation.")
+
+SCUDO_FLAG(int, thread_local_quarantine_size_kb, 0,
+ "Size (in kilobytes) of per-thread cache used to offload the global "
+ "quarantine. Lower value may reduce memory usage but might increase "
+ "the contention on the global quarantine.")
+
+SCUDO_FLAG(int, quarantine_max_chunk_size, 0,
+ "Size (in bytes) up to which chunks will be quarantined (if lower "
+ "than or equal to).")
+
+SCUDO_FLAG(bool, dealloc_type_mismatch, false,
+ "Terminate on a type mismatch in allocation-deallocation functions, "
+ "eg: malloc/delete, new/free, new/delete[], etc.")
+
+SCUDO_FLAG(bool, delete_size_mismatch, true,
+ "Terminate on a size mismatch between a sized-delete and the actual "
+ "size of a chunk (as provided to new/new[]).")
+
+SCUDO_FLAG(bool, zero_contents, false, "Zero chunk contents on allocation.")
+
+SCUDO_FLAG(int, rss_limit_mb, -1,
+ "Enforce an upper limit (in megabytes) to the process RSS. The "
+ "allocator will terminate or return NULL when allocations are "
+ "attempted past that limit (depending on may_return_null). Negative "
+ "values disable the feature.")
+
+SCUDO_FLAG(bool, may_return_null, true,
+ "Indicate whether the allocator should terminate instead of "
+ "returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
+ "invalid allocation alignments, etc.")
+
+SCUDO_FLAG(int, release_to_os_interval_ms, 5000,
+ "Interval (in milliseconds) at which to attempt release of unused "
+ "memory to the OS. Negative values disable the feature.")
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags_parser.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags_parser.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags_parser.cc (revision 351984)
@@ -0,0 +1,164 @@
+//===-- flags_parser.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "flags_parser.h"
+#include "common.h"
+#include "report.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+namespace scudo {
+
+class UnknownFlagsRegistry {
+ static const u32 MaxUnknownFlags = 16;
+ const char *UnknownFlagsNames[MaxUnknownFlags];
+ u32 NumberOfUnknownFlags;
+
+public:
+ void add(const char *Name) {
+ CHECK_LT(NumberOfUnknownFlags, MaxUnknownFlags);
+ UnknownFlagsNames[NumberOfUnknownFlags++] = Name;
+ }
+
+ void report() {
+ if (!NumberOfUnknownFlags)
+ return;
+ Printf("Scudo WARNING: found %d unrecognized flag(s):\n",
+ NumberOfUnknownFlags);
+ for (u32 I = 0; I < NumberOfUnknownFlags; ++I)
+ Printf(" %s\n", UnknownFlagsNames[I]);
+ NumberOfUnknownFlags = 0;
+ }
+};
+static UnknownFlagsRegistry UnknownFlags;
+
+void reportUnrecognizedFlags() { UnknownFlags.report(); }
+
+void FlagParser::printFlagDescriptions() {
+ Printf("Available flags for Scudo:\n");
+ for (u32 I = 0; I < NumberOfFlags; ++I)
+ Printf("\t%s\n\t\t- %s\n", Flags[I].Name, Flags[I].Desc);
+}
+
+static bool isSeparator(char C) {
+ return C == ' ' || C == ',' || C == ':' || C == '\n' || C == '\t' ||
+ C == '\r';
+}
+
+static bool isSeparatorOrNull(char C) { return !C || isSeparator(C); }
+
+void FlagParser::skipWhitespace() {
+ while (isSeparator(Buffer[Pos]))
+ ++Pos;
+}
+
+void FlagParser::parseFlag() {
+ const uptr NameStart = Pos;
+ while (Buffer[Pos] != '=' && !isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+ if (Buffer[Pos] != '=')
+ reportError("expected '='");
+ const char *Name = Buffer + NameStart;
+ const uptr ValueStart = ++Pos;
+ const char *Value;
+ if (Buffer[Pos] == '\'' || Buffer[Pos] == '"') {
+ const char Quote = Buffer[Pos++];
+ while (Buffer[Pos] != 0 && Buffer[Pos] != Quote)
+ ++Pos;
+ if (Buffer[Pos] == 0)
+ reportError("unterminated string");
+ Value = Buffer + ValueStart + 1;
+ ++Pos; // consume the closing quote
+ } else {
+ while (!isSeparatorOrNull(Buffer[Pos]))
+ ++Pos;
+ Value = Buffer + ValueStart;
+ }
+ if (!runHandler(Name, Value))
+ reportError("flag parsing failed.");
+}
+
+void FlagParser::parseFlags() {
+ while (true) {
+ skipWhitespace();
+ if (Buffer[Pos] == 0)
+ break;
+ parseFlag();
+ }
+}
+
+void FlagParser::parseString(const char *S) {
+ if (!S)
+ return;
+ // Backup current parser state to allow nested parseString() calls.
+ const char *OldBuffer = Buffer;
+ const uptr OldPos = Pos;
+ Buffer = S;
+ Pos = 0;
+
+ parseFlags();
+
+ Buffer = OldBuffer;
+ Pos = OldPos;
+}
+
+INLINE bool parseBool(const char *Value, bool *b) {
+ if (strncmp(Value, "0", 1) == 0 || strncmp(Value, "no", 2) == 0 ||
+ strncmp(Value, "false", 5) == 0) {
+ *b = false;
+ return true;
+ }
+ if (strncmp(Value, "1", 1) == 0 || strncmp(Value, "yes", 3) == 0 ||
+ strncmp(Value, "true", 4) == 0) {
+ *b = true;
+ return true;
+ }
+ return false;
+}
+
+bool FlagParser::runHandler(const char *Name, const char *Value) {
+ for (u32 I = 0; I < NumberOfFlags; ++I) {
+ const uptr Len = strlen(Flags[I].Name);
+ if (strncmp(Name, Flags[I].Name, Len) != 0 || Name[Len] != '=')
+ continue;
+ bool Ok = false;
+ switch (Flags[I].Type) {
+ case FlagType::FT_bool:
+ Ok = parseBool(Value, reinterpret_cast<bool *>(Flags[I].Var));
+ if (!Ok)
+ reportInvalidFlag("bool", Value);
+ break;
+ case FlagType::FT_int:
+ char *ValueEnd;
+ *reinterpret_cast<int *>(Flags[I].Var) =
+ static_cast<int>(strtol(Value, &ValueEnd, 10));
+ Ok =
+ *ValueEnd == '"' || *ValueEnd == '\'' || isSeparatorOrNull(*ValueEnd);
+ if (!Ok)
+ reportInvalidFlag("int", Value);
+ break;
+ }
+ return Ok;
+ }
+ // Unrecognized flag. This is not a fatal error, we may print a warning later.
+ UnknownFlags.add(Name);
+ return true;
+}
+
+void FlagParser::registerFlag(const char *Name, const char *Desc, FlagType Type,
+ void *Var) {
+ CHECK_LT(NumberOfFlags, MaxFlags);
+ Flags[NumberOfFlags].Name = Name;
+ Flags[NumberOfFlags].Desc = Desc;
+ Flags[NumberOfFlags].Type = Type;
+ Flags[NumberOfFlags].Var = Var;
+ ++NumberOfFlags;
+}
+
+} // namespace scudo
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags_parser.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags_parser.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/flags_parser.h (revision 351984)
@@ -0,0 +1,55 @@
+//===-- flags_parser.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_PARSER_H_
+#define SCUDO_FLAGS_PARSER_H_
+
+#include "report.h"
+#include "string_utils.h"
+
+#include <stddef.h>
+
+namespace scudo {
+
+enum class FlagType : u8 {
+ FT_bool,
+ FT_int,
+};
+
+class FlagParser {
+public:
+ void registerFlag(const char *Name, const char *Desc, FlagType Type,
+ void *Var);
+ void parseString(const char *S);
+ void printFlagDescriptions();
+
+private:
+ static const u32 MaxFlags = 12;
+ struct Flag {
+ const char *Name;
+ const char *Desc;
+ FlagType Type;
+ void *Var;
+ } Flags[MaxFlags];
+
+ u32 NumberOfFlags = 0;
+ const char *Buffer = nullptr;
+ uptr Pos = 0;
+
+ void reportFatalError(const char *Error);
+ void skipWhitespace();
+ void parseFlags();
+ void parseFlag();
+ bool runHandler(const char *Name, const char *Value);
+};
+
+void reportUnrecognizedFlags();
+
+} // namespace scudo
+
+#endif // SCUDO_FLAGS_PARSER_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/fuchsia.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/fuchsia.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/fuchsia.cc (revision 351984)
@@ -0,0 +1,189 @@
+//===-- fuchsia.cc ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include "common.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#include <limits.h> // for PAGE_SIZE
+#include <stdlib.h> // for getenv()
+#include <zircon/compiler.h>
+#include <zircon/sanitizer.h>
+#include <zircon/syscalls.h>
+
+namespace scudo {
+
+uptr getPageSize() { return PAGE_SIZE; }
+
+void NORETURN die() { __builtin_trap(); }
+
+// We zero-initialize the Extra parameter of map(), make sure this is consistent
+// with ZX_HANDLE_INVALID.
+COMPILER_CHECK(ZX_HANDLE_INVALID == 0);
+
+static void *allocateVmar(uptr Size, MapPlatformData *Data, bool AllowNoMem) {
+ // Only scenario so far.
+ DCHECK(Data);
+ DCHECK_EQ(Data->Vmar, ZX_HANDLE_INVALID);
+
+ const zx_status_t Status = _zx_vmar_allocate(
+ _zx_vmar_root_self(),
+ ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
+ Size, &Data->Vmar, &Data->VmarBase);
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ return reinterpret_cast<void *>(Data->VmarBase);
+}
+
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
+ MapPlatformData *Data) {
+ DCHECK_EQ(Size % PAGE_SIZE, 0);
+ const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
+
+ // For MAP_NOACCESS, just allocate a Vmar and return.
+ if (Flags & MAP_NOACCESS)
+ return allocateVmar(Size, Data, AllowNoMem);
+
+ const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
+ CHECK_NE(Vmar, ZX_HANDLE_INVALID);
+
+ zx_status_t Status;
+ zx_handle_t Vmo;
+ uint64_t VmoSize = 0;
+ if (Data && Data->Vmo != ZX_HANDLE_INVALID) {
+ // If a Vmo was specified, it's a resize operation.
+ CHECK(Addr);
+ DCHECK(Flags & MAP_RESIZABLE);
+ Vmo = Data->Vmo;
+ VmoSize = Data->VmoSize;
+ Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ } else {
+ // Otherwise, create a Vmo and set its name.
+ Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo);
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ _zx_object_set_property(Vmo, ZX_PROP_NAME, Name, strlen(Name));
+ }
+
+ uintptr_t P;
+ zx_vm_option_t MapFlags =
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_ALLOW_FAULTS;
+ const uint64_t Offset =
+ Addr ? reinterpret_cast<uintptr_t>(Addr) - Data->VmarBase : 0;
+ if (Offset)
+ MapFlags |= ZX_VM_SPECIFIC;
+ Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
+ // No need to track the Vmo if we don't intend on resizing it. Close it.
+ if (Flags & MAP_RESIZABLE) {
+ DCHECK(Data);
+ DCHECK_EQ(Data->Vmo, ZX_HANDLE_INVALID);
+ Data->Vmo = Vmo;
+ } else {
+ CHECK_EQ(_zx_handle_close(Vmo), ZX_OK);
+ }
+ if (Status != ZX_OK) {
+ if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
+ dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
+ return nullptr;
+ }
+ if (Data)
+ Data->VmoSize += Size;
+
+ return reinterpret_cast<void *>(P);
+}
+
+void unmap(void *Addr, uptr Size, uptr Flags, MapPlatformData *Data) {
+ if (Flags & UNMAP_ALL) {
+ DCHECK_NE(Data, nullptr);
+ const zx_handle_t Vmar = Data->Vmar;
+ DCHECK_NE(Vmar, _zx_vmar_root_self());
+ // Destroying the vmar effectively unmaps the whole mapping.
+ CHECK_EQ(_zx_vmar_destroy(Vmar), ZX_OK);
+ CHECK_EQ(_zx_handle_close(Vmar), ZX_OK);
+ } else {
+ const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self();
+ const zx_status_t Status =
+ _zx_vmar_unmap(Vmar, reinterpret_cast<uintptr_t>(Addr), Size);
+ if (Status != ZX_OK)
+ dieOnMapUnmapError();
+ }
+ if (Data) {
+ if (Data->Vmo != ZX_HANDLE_INVALID)
+ CHECK_EQ(_zx_handle_close(Data->Vmo), ZX_OK);
+ memset(Data, 0, sizeof(*Data));
+ }
+}
+
+void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
+ MapPlatformData *Data) {
+ DCHECK(Data);
+ DCHECK_NE(Data->Vmar, ZX_HANDLE_INVALID);
+ DCHECK_NE(Data->Vmo, ZX_HANDLE_INVALID);
+ const zx_status_t Status =
+ _zx_vmo_op_range(Data->Vmo, ZX_VMO_OP_DECOMMIT, Offset, Size, NULL, 0);
+ CHECK_EQ(Status, ZX_OK);
+}
+
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
+// because the Fuchsia implementation of sync_mutex_t has clang thread safety
+// annotations. Were we to apply proper capability annotations to the top level
+// HybridMutex class itself, they would not be needed. As it stands, the
+// thread analysis thinks that we are locking the mutex and accidentally leaving
+// it locked on the way out.
+bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ // Size and alignment must be compatible between both types.
+ return sync_mutex_trylock(&M) == ZX_OK;
+}
+
+void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_lock(&M);
+}
+
+void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+ sync_mutex_unlock(&M);
+}
+
+u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
+
+u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
+
+bool getRandom(void *Buffer, uptr Length, bool Blocking) {
+ COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN);
+ if (!Buffer || !Length || Length > MaxRandomLength)
+ return false;
+ _zx_cprng_draw(Buffer, Length);
+ return true;
+}
+
+void outputRaw(const char *Buffer) {
+ __sanitizer_log_write(Buffer, strlen(Buffer));
+}
+
+void setAbortMessage(const char *Message) {}
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/fuchsia.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/fuchsia.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/fuchsia.h (revision 351984)
@@ -0,0 +1,31 @@
+//===-- fuchsia.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FUCHSIA_H_
+#define SCUDO_FUCHSIA_H_
+
+#include "platform.h"
+
+#if SCUDO_FUCHSIA
+
+#include <zircon/process.h>
+
+namespace scudo {
+
+struct MapPlatformData {
+ zx_handle_t Vmar;
+ zx_handle_t Vmo;
+ uintptr_t VmarBase;
+ uint64_t VmoSize;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_FUCHSIA
+
+#endif // SCUDO_FUCHSIA_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/interface.h (revision 351984)
@@ -0,0 +1,29 @@
+//===-- interface.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERFACE_H_
+#define SCUDO_INTERFACE_H_
+
+#include "internal_defs.h"
+
+extern "C" {
+
+WEAK INTERFACE const char *__scudo_default_options();
+
+// Post-allocation & pre-deallocation hooks.
+// They must be thread-safe and not use heap related functions.
+WEAK INTERFACE void __scudo_allocate_hook(void *ptr, size_t size);
+WEAK INTERFACE void __scudo_deallocate_hook(void *ptr);
+
+WEAK INTERFACE void __scudo_print_stats(void);
+
+typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg);
+
+} // extern "C"
+
+#endif // SCUDO_INTERFACE_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/internal_defs.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/internal_defs.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/internal_defs.h (revision 351984)
@@ -0,0 +1,135 @@
+//===-- internal_defs.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERNAL_DEFS_H_
+#define SCUDO_INTERNAL_DEFS_H_
+
+#include "platform.h"
+
+#include <stdint.h>
+
+#ifndef SCUDO_DEBUG
+#define SCUDO_DEBUG 0
+#endif
+
+#define ARRAY_SIZE(A) (sizeof(A) / sizeof((A)[0]))
+
+// String related macros.
+
+#define STRINGIFY_(S) #S
+#define STRINGIFY(S) STRINGIFY_(S)
+#define CONCATENATE_(S, C) S##C
+#define CONCATENATE(S, C) CONCATENATE_(S, C)
+
+// Attributes & builtins related macros.
+
+#define INTERFACE __attribute__((visibility("default")))
+#define WEAK __attribute__((weak))
+#define INLINE inline
+#define ALWAYS_INLINE inline __attribute__((always_inline))
+#define ALIAS(X) __attribute__((alias(X)))
+// Please only use the ALIGNED macro before the type. Using ALIGNED after the
+// variable declaration is not portable.
+#define ALIGNED(X) __attribute__((aligned(X)))
+#define FORMAT(F, A) __attribute__((format(printf, F, A)))
+#define NOINLINE __attribute__((noinline))
+#define NORETURN __attribute__((noreturn))
+#define THREADLOCAL __thread
+#define LIKELY(X) __builtin_expect(!!(X), 1)
+#define UNLIKELY(X) __builtin_expect(!!(X), 0)
+#if defined(__i386__) || defined(__x86_64__)
+// __builtin_prefetch(X) generates prefetchnt0 on x86
+#define PREFETCH(X) __asm__("prefetchnta (%0)" : : "r"(X))
+#else
+#define PREFETCH(X) __builtin_prefetch(X)
+#endif
+#define UNUSED __attribute__((unused))
+#define USED __attribute__((used))
+#define NOEXCEPT noexcept
+
+namespace scudo {
+
+typedef unsigned long uptr;
+typedef signed long sptr;
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned int u32;
+typedef unsigned long long u64;
+typedef signed char s8;
+typedef signed short s16;
+typedef signed int s32;
+typedef signed long long s64;
+
+// The following two functions have platform specific implementations.
+void outputRaw(const char *Buffer);
+void NORETURN die();
+
+#define RAW_CHECK_MSG(Expr, Msg) \
+ do { \
+ if (UNLIKELY(!(Expr))) { \
+ outputRaw(Msg); \
+ die(); \
+ } \
+ } while (false)
+
+#define RAW_CHECK(Expr) RAW_CHECK_MSG(Expr, #Expr)
+
+void NORETURN reportCheckFailed(const char *File, int Line,
+ const char *Condition, u64 Value1, u64 Value2);
+
+#define CHECK_IMPL(C1, Op, C2) \
+ do { \
+ u64 V1 = (u64)(C1); \
+ u64 V2 = (u64)(C2); \
+ if (UNLIKELY(!(V1 Op V2))) { \
+ reportCheckFailed(__FILE__, __LINE__, "(" #C1 ") " #Op " (" #C2 ")", V1, \
+ V2); \
+ die(); \
+ } \
+ } while (false)
+
+#define CHECK(A) CHECK_IMPL((A), !=, 0)
+#define CHECK_EQ(A, B) CHECK_IMPL((A), ==, (B))
+#define CHECK_NE(A, B) CHECK_IMPL((A), !=, (B))
+#define CHECK_LT(A, B) CHECK_IMPL((A), <, (B))
+#define CHECK_LE(A, B) CHECK_IMPL((A), <=, (B))
+#define CHECK_GT(A, B) CHECK_IMPL((A), >, (B))
+#define CHECK_GE(A, B) CHECK_IMPL((A), >=, (B))
+
+#if SCUDO_DEBUG
+#define DCHECK(A) CHECK(A)
+#define DCHECK_EQ(A, B) CHECK_EQ(A, B)
+#define DCHECK_NE(A, B) CHECK_NE(A, B)
+#define DCHECK_LT(A, B) CHECK_LT(A, B)
+#define DCHECK_LE(A, B) CHECK_LE(A, B)
+#define DCHECK_GT(A, B) CHECK_GT(A, B)
+#define DCHECK_GE(A, B) CHECK_GE(A, B)
+#else
+#define DCHECK(A)
+#define DCHECK_EQ(A, B)
+#define DCHECK_NE(A, B)
+#define DCHECK_LT(A, B)
+#define DCHECK_LE(A, B)
+#define DCHECK_GT(A, B)
+#define DCHECK_GE(A, B)
+#endif
+
+// The superfluous die() call effectively makes this macro NORETURN.
+#define UNREACHABLE(Msg) \
+ do { \
+ CHECK(0 && Msg); \
+ die(); \
+ } while (0)
+
+#define COMPILER_CHECK(Pred) static_assert(Pred, "")
+
+enum LinkerInitialized { LINKER_INITIALIZED = 0 };
+
+} // namespace scudo
+
+#endif // SCUDO_INTERNAL_DEFS_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/linux.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/linux.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/linux.cc (revision 351984)
@@ -0,0 +1,171 @@
+//===-- linux.cc ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+#include "common.h"
+#include "linux.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <linux/futex.h>
+#include <sched.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#if SCUDO_ANDROID
+#include <sys/prctl.h>
+// Definitions of prctl arguments to set a vma name in Android kernels.
+#define ANDROID_PR_SET_VMA 0x53564d41
+#define ANDROID_PR_SET_VMA_ANON_NAME 0
+#endif
+
+namespace scudo {
+
+uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
+
+void NORETURN die() { abort(); }
+
+void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ int MmapFlags = MAP_PRIVATE | MAP_ANON;
+ int MmapProt;
+ if (Flags & MAP_NOACCESS) {
+ MmapFlags |= MAP_NORESERVE;
+ MmapProt = PROT_NONE;
+ } else {
+ MmapProt = PROT_READ | PROT_WRITE;
+ }
+ if (Addr) {
+ // Currently no scenario for a noaccess mapping with a fixed address.
+ DCHECK_EQ(Flags & MAP_NOACCESS, 0);
+ MmapFlags |= MAP_FIXED;
+ }
+ void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
+ if (P == MAP_FAILED) {
+ if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
+ dieOnMapUnmapError(errno == ENOMEM);
+ return nullptr;
+ }
+#if SCUDO_ANDROID
+ if (!(Flags & MAP_NOACCESS))
+ prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, P, Size, Name);
+#endif
+ return P;
+}
+
+void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
+ UNUSED MapPlatformData *Data) {
+ if (munmap(Addr, Size) != 0)
+ dieOnMapUnmapError();
+}
+
+void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
+ UNUSED MapPlatformData *Data) {
+ void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
+ while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
+ }
+}
+
+// Calling getenv should be fine (c)(tm) at any time.
+const char *getEnv(const char *Name) { return getenv(Name); }
+
+namespace {
+enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
+}
+
+bool HybridMutex::tryLock() {
+ return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
+}
+
+// The following is based on https://akkadia.org/drepper/futex.pdf.
+void HybridMutex::lockSlow() {
+ u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
+ if (V == Unlocked)
+ return;
+ if (V != Sleeping)
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ while (V != Unlocked) {
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
+ nullptr, nullptr, 0);
+ V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+ }
+}
+
+void HybridMutex::unlock() {
+ if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
+ atomic_store(&M, Unlocked, memory_order_release);
+ syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
+ nullptr, nullptr, 0);
+ }
+}
+
+u64 getMonotonicTime() {
+ timespec TS;
+ clock_gettime(CLOCK_MONOTONIC, &TS);
+ return static_cast<u64>(TS.tv_sec) * (1000ULL * 1000 * 1000) +
+ static_cast<u64>(TS.tv_nsec);
+}
+
+u32 getNumberOfCPUs() {
+ cpu_set_t CPUs;
+ CHECK_EQ(sched_getaffinity(0, sizeof(cpu_set_t), &CPUs), 0);
+ return static_cast<u32>(CPU_COUNT(&CPUs));
+}
+
+// Blocking is possibly unused if the getrandom block is not compiled in.
+bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
+ if (!Buffer || !Length || Length > MaxRandomLength)
+ return false;
+ ssize_t ReadBytes;
+#if defined(SYS_getrandom)
+#if !defined(GRND_NONBLOCK)
+#define GRND_NONBLOCK 1
+#endif
+ // Up to 256 bytes, getrandom will not be interrupted.
+ ReadBytes =
+ syscall(SYS_getrandom, Buffer, Length, Blocking ? 0 : GRND_NONBLOCK);
+ if (ReadBytes == static_cast<ssize_t>(Length))
+ return true;
+#endif // defined(SYS_getrandom)
+ // Up to 256 bytes, a read off /dev/urandom will not be interrupted.
+ // Blocking is moot here, O_NONBLOCK has no effect when opening /dev/urandom.
+ const int FileDesc = open("/dev/urandom", O_RDONLY);
+ if (FileDesc == -1)
+ return false;
+ ReadBytes = read(FileDesc, Buffer, Length);
+ close(FileDesc);
+ return (ReadBytes == static_cast<ssize_t>(Length));
+}
+
+void outputRaw(const char *Buffer) {
+ static HybridMutex Mutex;
+ ScopedLock L(Mutex);
+ write(2, Buffer, strlen(Buffer));
+}
+
+extern "C" WEAK void android_set_abort_message(const char *);
+
+void setAbortMessage(const char *Message) {
+ if (&android_set_abort_message)
+ android_set_abort_message(Message);
+}
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/linux.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/linux.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/linux.h (revision 351984)
@@ -0,0 +1,70 @@
+//===-- linux.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LINUX_H_
+#define SCUDO_LINUX_H_
+
+#include "platform.h"
+
+#if SCUDO_LINUX
+
+namespace scudo {
+
+// MapPlatformData is unused on Linux, define it as a minimally sized structure.
+struct MapPlatformData {};
+
+#if SCUDO_ANDROID
+
+#if defined(__aarch64__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrs %0, tpidr_el0" : "=r"(__v)); \
+ __v; \
+ })
+#elif defined(__arm__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mrc p15, 0, %0, c13, c0, 3" : "=r"(__v)); \
+ __v; \
+ })
+#elif defined(__i386__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("movl %%gs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+#elif defined(__x86_64__)
+#define __get_tls() \
+ ({ \
+ void **__v; \
+ __asm__("mov %%fs:0, %0" : "=r"(__v)); \
+ __v; \
+ })
+#else
+#error "Unsupported architecture."
+#endif
+
+// The Android Bionic team has allocated a TLS slot for sanitizers starting
+// with Q, given that Android currently doesn't support ELF TLS. It is used to
+// store sanitizer thread specific data.
+static const int TLS_SLOT_SANITIZER = 8; // TODO(kostyak): 6 for Q!!
+
+ALWAYS_INLINE uptr *getAndroidTlsPtr() {
+ return reinterpret_cast<uptr *>(&__get_tls()[TLS_SLOT_SANITIZER]);
+}
+
+#endif // SCUDO_ANDROID
+
+} // namespace scudo
+
+#endif // SCUDO_LINUX
+
+#endif // SCUDO_LINUX_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/list.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/list.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/list.h (revision 351984)
@@ -0,0 +1,156 @@
+//===-- list.h --------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LIST_H_
+#define SCUDO_LIST_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Intrusive POD singly-linked list.
+// An object with all zero fields should represent a valid empty list. clear()
+// should be called on all non-zero-initialized objects before using.
+template <class Item> struct IntrusiveList {
+ friend class Iterator;
+
+ void clear() {
+ First = Last = nullptr;
+ Size = 0;
+ }
+
+ bool empty() const { return Size == 0; }
+ uptr size() const { return Size; }
+
+ void push_back(Item *X) {
+ if (empty()) {
+ X->Next = nullptr;
+ First = Last = X;
+ Size = 1;
+ } else {
+ X->Next = nullptr;
+ Last->Next = X;
+ Last = X;
+ Size++;
+ }
+ }
+
+ void push_front(Item *X) {
+ if (empty()) {
+ X->Next = nullptr;
+ First = Last = X;
+ Size = 1;
+ } else {
+ X->Next = First;
+ First = X;
+ Size++;
+ }
+ }
+
+ void pop_front() {
+ DCHECK(!empty());
+ First = First->Next;
+ if (!First)
+ Last = nullptr;
+ Size--;
+ }
+
+ void extract(Item *Prev, Item *X) {
+ DCHECK(!empty());
+ DCHECK_NE(Prev, nullptr);
+ DCHECK_NE(X, nullptr);
+ DCHECK_EQ(Prev->Next, X);
+ Prev->Next = X->Next;
+ if (Last == X)
+ Last = Prev;
+ Size--;
+ }
+
+ Item *front() { return First; }
+ const Item *front() const { return First; }
+ Item *back() { return Last; }
+ const Item *back() const { return Last; }
+
+ void append_front(IntrusiveList<Item> *L) {
+ DCHECK_NE(this, L);
+ if (L->empty())
+ return;
+ if (empty()) {
+ *this = *L;
+ } else if (!L->empty()) {
+ L->Last->Next = First;
+ First = L->First;
+ Size += L->size();
+ }
+ L->clear();
+ }
+
+ void append_back(IntrusiveList<Item> *L) {
+ DCHECK_NE(this, L);
+ if (L->empty())
+ return;
+ if (empty()) {
+ *this = *L;
+ } else {
+ Last->Next = L->First;
+ Last = L->Last;
+ Size += L->size();
+ }
+ L->clear();
+ }
+
+ void checkConsistency() {
+ if (Size == 0) {
+ CHECK_EQ(First, 0);
+ CHECK_EQ(Last, 0);
+ } else {
+ uptr count = 0;
+ for (Item *I = First;; I = I->Next) {
+ count++;
+ if (I == Last)
+ break;
+ }
+ CHECK_EQ(size(), count);
+ CHECK_EQ(Last->Next, 0);
+ }
+ }
+
+ template <class ItemT> class IteratorBase {
+ public:
+ explicit IteratorBase(ItemT *CurrentItem) : Current(CurrentItem) {}
+ IteratorBase &operator++() {
+ Current = Current->Next;
+ return *this;
+ }
+ bool operator!=(IteratorBase Other) const {
+ return Current != Other.Current;
+ }
+ ItemT &operator*() { return *Current; }
+
+ private:
+ ItemT *Current;
+ };
+
+ typedef IteratorBase<Item> Iterator;
+ typedef IteratorBase<const Item> ConstIterator;
+
+ Iterator begin() { return Iterator(First); }
+ Iterator end() { return Iterator(nullptr); }
+
+ ConstIterator begin() const { return ConstIterator(First); }
+ ConstIterator end() const { return ConstIterator(nullptr); }
+
+private:
+ uptr Size;
+ Item *First;
+ Item *Last;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LIST_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/local_cache.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/local_cache.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/local_cache.h (revision 351984)
@@ -0,0 +1,181 @@
+//===-- local_cache.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_LOCAL_CACHE_H_
+#define SCUDO_LOCAL_CACHE_H_
+
+#include "internal_defs.h"
+#include "report.h"
+#include "stats.h"
+
+namespace scudo {
+
+template <class SizeClassAllocator> struct SizeClassAllocatorLocalCache {
+ typedef typename SizeClassAllocator::SizeClassMap SizeClassMap;
+
+ struct TransferBatch {
+ static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint;
+ void setFromArray(void **Array, u32 N) {
+ DCHECK_LE(N, MaxNumCached);
+ for (u32 I = 0; I < N; I++)
+ Batch[I] = Array[I];
+ Count = N;
+ }
+ void clear() { Count = 0; }
+ void add(void *P) {
+ DCHECK_LT(Count, MaxNumCached);
+ Batch[Count++] = P;
+ }
+ void copyToArray(void **Array) const {
+ for (u32 I = 0; I < Count; I++)
+ Array[I] = Batch[I];
+ }
+ u32 getCount() const { return Count; }
+ void *get(u32 I) const {
+ DCHECK_LE(I, Count);
+ return Batch[I];
+ }
+ static u32 getMaxCached(uptr Size) {
+ return Min(MaxNumCached, SizeClassMap::getMaxCachedHint(Size));
+ }
+ TransferBatch *Next;
+
+ private:
+ u32 Count;
+ void *Batch[MaxNumCached];
+ };
+
+ void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) {
+ Stats.initLinkerInitialized();
+ if (S)
+ S->link(&Stats);
+ Allocator = A;
+ }
+
+ void init(GlobalStats *S, SizeClassAllocator *A) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(S, A);
+ }
+
+ void destroy(GlobalStats *S) {
+ drain();
+ if (S)
+ S->unlink(&Stats);
+ }
+
+ void *allocate(uptr ClassId) {
+ CHECK_LT(ClassId, NumClasses);
+ PerClass *C = &PerClassArray[ClassId];
+ if (C->Count == 0) {
+ if (UNLIKELY(!refill(C, ClassId)))
+ return nullptr;
+ DCHECK_GT(C->Count, 0);
+ }
+ // We read ClassSize first before accessing Chunks because it's adjacent to
+ // Count, while Chunks might be further off (depending on Count). That keeps
+ // the memory accesses in close quarters.
+ const uptr ClassSize = C->ClassSize;
+ void *P = C->Chunks[--C->Count];
+ // The jury is still out as to whether any kind of PREFETCH here increases
+ // performance. It definitely decreases performance on Android though.
+ // if (!SCUDO_ANDROID) PREFETCH(P);
+ Stats.add(StatAllocated, ClassSize);
+ return P;
+ }
+
+ void deallocate(uptr ClassId, void *P) {
+ CHECK_LT(ClassId, NumClasses);
+ PerClass *C = &PerClassArray[ClassId];
+ // We still have to initialize the cache in the event that the first heap
+ // operation in a thread is a deallocation.
+ initCacheMaybe(C);
+ if (C->Count == C->MaxCount)
+ drain(C, ClassId);
+ // See comment in allocate() about memory accesses.
+ const uptr ClassSize = C->ClassSize;
+ C->Chunks[C->Count++] = P;
+ Stats.sub(StatAllocated, ClassSize);
+ }
+
+ void drain() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ PerClass *C = &PerClassArray[I];
+ while (C->Count > 0)
+ drain(C, I);
+ }
+ }
+
+ TransferBatch *createBatch(uptr ClassId, void *B) {
+ if (ClassId != SizeClassMap::BatchClassId)
+ B = allocate(SizeClassMap::BatchClassId);
+ return reinterpret_cast<TransferBatch *>(B);
+ }
+
+ LocalStats &getStats() { return Stats; }
+
+private:
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ struct PerClass {
+ u32 Count;
+ u32 MaxCount;
+ uptr ClassSize;
+ void *Chunks[2 * TransferBatch::MaxNumCached];
+ };
+ PerClass PerClassArray[NumClasses];
+ LocalStats Stats;
+ SizeClassAllocator *Allocator;
+
+ ALWAYS_INLINE void initCacheMaybe(PerClass *C) {
+ if (LIKELY(C->MaxCount))
+ return;
+ initCache();
+ DCHECK_NE(C->MaxCount, 0U);
+ }
+
+ NOINLINE void initCache() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ PerClass *P = &PerClassArray[I];
+ const uptr Size = SizeClassAllocator::getSizeByClassId(I);
+ P->MaxCount = 2 * TransferBatch::getMaxCached(Size);
+ P->ClassSize = Size;
+ }
+ }
+
+ void destroyBatch(uptr ClassId, void *B) {
+ if (ClassId != SizeClassMap::BatchClassId)
+ deallocate(SizeClassMap::BatchClassId, B);
+ }
+
+ NOINLINE bool refill(PerClass *C, uptr ClassId) {
+ initCacheMaybe(C);
+ TransferBatch *B = Allocator->popBatch(this, ClassId);
+ if (UNLIKELY(!B))
+ return false;
+ DCHECK_GT(B->getCount(), 0);
+ B->copyToArray(C->Chunks);
+ C->Count = B->getCount();
+ destroyBatch(ClassId, B);
+ return true;
+ }
+
+ NOINLINE void drain(PerClass *C, uptr ClassId) {
+ const u32 Count = Min(C->MaxCount / 2, C->Count);
+ const uptr FirstIndexToDrain = C->Count - Count;
+ TransferBatch *B = createBatch(ClassId, C->Chunks[FirstIndexToDrain]);
+ if (UNLIKELY(!B))
+ reportOutOfMemory(
+ SizeClassAllocator::getSizeByClassId(SizeClassMap::BatchClassId));
+ B->setFromArray(&C->Chunks[FirstIndexToDrain], Count);
+ C->Count -= Count;
+ Allocator->pushBatch(ClassId, B);
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_LOCAL_CACHE_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/mutex.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/mutex.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/mutex.h (revision 351984)
@@ -0,0 +1,73 @@
+//===-- mutex.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_MUTEX_H_
+#define SCUDO_MUTEX_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+
+#include <string.h>
+
+#if SCUDO_FUCHSIA
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#endif
+
+namespace scudo {
+
+class HybridMutex {
+public:
+ void init() { memset(this, 0, sizeof(*this)); }
+ bool tryLock();
+ NOINLINE void lock() {
+ if (tryLock())
+ return;
+ // The compiler may try to fully unroll the loop, ending up in a
+ // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
+ // is large, ugly and unneeded, a compact loop is better for our purpose
+ // here. Use a pragma to tell the compiler not to unroll the loop.
+#ifdef __clang__
+#pragma nounroll
+#endif
+ for (u8 I = 0U; I < NumberOfTries; I++) {
+ yieldProcessor(NumberOfYields);
+ if (tryLock())
+ return;
+ }
+ lockSlow();
+ }
+ void unlock();
+
+private:
+ static constexpr u8 NumberOfTries = 10U;
+ static constexpr u8 NumberOfYields = 10U;
+
+#if SCUDO_LINUX
+ atomic_u32 M;
+#elif SCUDO_FUCHSIA
+ sync_mutex_t M;
+#endif
+
+ void lockSlow();
+};
+
+class ScopedLock {
+public:
+ explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
+ ~ScopedLock() { Mutex.unlock(); }
+
+private:
+ HybridMutex &Mutex;
+
+ ScopedLock(const ScopedLock &) = delete;
+ void operator=(const ScopedLock &) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_MUTEX_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/platform.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/platform.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/platform.h (revision 351984)
@@ -0,0 +1,70 @@
+//===-- platform.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PLATFORM_H_
+#define SCUDO_PLATFORM_H_
+
+#if defined(__linux__)
+#define SCUDO_LINUX 1
+#else
+#define SCUDO_LINUX 0
+#endif
+
+#if defined(__ANDROID__)
+#define SCUDO_ANDROID 1
+#else
+#define SCUDO_ANDROID 0
+#endif
+
+#if defined(__Fuchsia__)
+#define SCUDO_FUCHSIA 1
+#else
+#define SCUDO_FUCHSIA 0
+#endif
+
+#if __LP64__
+#define SCUDO_WORDSIZE 64U
+#else
+#define SCUDO_WORDSIZE 32U
+#endif
+
+#if SCUDO_WORDSIZE == 64U
+#define FIRST_32_SECOND_64(a, b) (b)
+#else
+#define FIRST_32_SECOND_64(a, b) (a)
+#endif
+
+#ifndef SCUDO_CAN_USE_PRIMARY64
+#define SCUDO_CAN_USE_PRIMARY64 (SCUDO_WORDSIZE == 64U)
+#endif
+
+#ifndef SCUDO_MIN_ALIGNMENT_LOG
+// We force malloc-type functions to be aligned to std::max_align_t, but there
+// is no reason why the minimum alignment for all other functions can't be 8
+// bytes. Except obviously for applications making incorrect assumptions.
+// TODO(kostyak): define SCUDO_MIN_ALIGNMENT_LOG 3
+#define SCUDO_MIN_ALIGNMENT_LOG FIRST_32_SECOND_64(3, 4)
+#endif
+
+#if defined(__aarch64__)
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 48)
+#else
+#define SCUDO_MMAP_RANGE_SIZE FIRST_32_SECOND_64(1ULL << 32, 1ULL << 47)
+#endif
+
+// Older gcc have issues aligning to a constexpr, and require an integer.
+// See https://gcc.gnu.org/bugzilla/show_bug.cgi?id=56859 among others.
+#if defined(__powerpc__) || defined(__powerpc64__)
+#define SCUDO_CACHE_LINE_SIZE 128
+#else
+#define SCUDO_CACHE_LINE_SIZE 64
+#endif
+
+#define SCUDO_POINTER_FORMAT_LENGTH FIRST_32_SECOND_64(8, 12)
+
+#endif // SCUDO_PLATFORM_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/primary32.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/primary32.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/primary32.h (revision 351984)
@@ -0,0 +1,401 @@
+//===-- primary32.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY32_H_
+#define SCUDO_PRIMARY32_H_
+
+#include "bytemap.h"
+#include "common.h"
+#include "list.h"
+#include "local_cache.h"
+#include "release.h"
+#include "report.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassAllocator32 is an allocator for 32 or 64-bit address space.
+//
+// It maps Regions of 2^RegionSizeLog bytes aligned on a 2^RegionSizeLog bytes
+// boundary, and keeps a bytemap of the mappable address space to track the size
+// class they are associated with.
+//
+// Mapped regions are split into equally sized Blocks according to the size
+// class they belong to, and the associated pointers are shuffled to prevent any
+// predictable address pattern (the predictability increases with the block
+// size).
+//
+// Regions for size class 0 are special and used to hold TransferBatches, which
+// allow to transfer arrays of pointers from the global size class freelist to
+// the thread specific freelist for said class, and back.
+//
+// Memory used by this allocator is never unmapped but can be partially
+// reclaimed if the platform allows for it.
+
+template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
+public:
+ typedef SizeClassMapT SizeClassMap;
+ // Regions should be large enough to hold the largest Block.
+ COMPILER_CHECK((1UL << RegionSizeLog) >= SizeClassMap::MaxSize);
+ typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+ typedef typename CacheT::TransferBatch TransferBatch;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ return (ClassId == SizeClassMap::BatchClassId)
+ ? sizeof(TransferBatch)
+ : SizeClassMap::getSizeByClassId(ClassId);
+ }
+
+ static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+ void initLinkerInitialized(s32 ReleaseToOsInterval) {
+ if (SCUDO_FUCHSIA)
+ reportError("SizeClassAllocator32 is not supported on Fuchsia");
+
+ PossibleRegions.initLinkerInitialized();
+ MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0.
+
+ u32 Seed;
+ if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+ Seed =
+ static_cast<u32>(getMonotonicTime() ^
+ (reinterpret_cast<uptr>(SizeClassInfoArray) >> 6));
+ const uptr PageSize = getPageSizeCached();
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ Sci->RandState = getRandomU32(&Seed);
+ // See comment in the 64-bit primary about releasing smaller size classes.
+ Sci->CanRelease = (ReleaseToOsInterval > 0) &&
+ (I != SizeClassMap::BatchClassId) &&
+ (getSizeByClassId(I) >= (PageSize / 32));
+ }
+ ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ }
+ void init(s32 ReleaseToOsInterval) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(ReleaseToOsInterval);
+ }
+
+ void unmapTestOnly() {
+ while (NumberOfStashedRegions > 0)
+ unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
+ RegionSize);
+ // TODO(kostyak): unmap the TransferBatch regions as well.
+ for (uptr I = 0; I < NumRegions; I++)
+ if (PossibleRegions[I])
+ unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
+ PossibleRegions.unmapTestOnly();
+ }
+
+ TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ ScopedLock L(Sci->Mutex);
+ TransferBatch *B = Sci->FreeList.front();
+ if (B)
+ Sci->FreeList.pop_front();
+ else {
+ B = populateFreeList(C, ClassId, Sci);
+ if (UNLIKELY(!B))
+ return nullptr;
+ }
+ DCHECK_GT(B->getCount(), 0);
+ Sci->Stats.PoppedBlocks += B->getCount();
+ return B;
+ }
+
+ void pushBatch(uptr ClassId, TransferBatch *B) {
+ DCHECK_LT(ClassId, NumClasses);
+ DCHECK_GT(B->getCount(), 0);
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ ScopedLock L(Sci->Mutex);
+ Sci->FreeList.push_front(B);
+ Sci->Stats.PushedBlocks += B->getCount();
+ if (Sci->CanRelease)
+ releaseToOSMaybe(Sci, ClassId);
+ }
+
+ void disable() {
+ for (uptr I = 0; I < NumClasses; I++)
+ getSizeClassInfo(I)->Mutex.lock();
+ }
+
+ void enable() {
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
+ getSizeClassInfo(I)->Mutex.unlock();
+ }
+
+ template <typename F> void iterateOverBlocks(F Callback) {
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
+ if (PossibleRegions[I]) {
+ const uptr BlockSize = getSizeByClassId(PossibleRegions[I]);
+ const uptr From = I * RegionSize;
+ const uptr To = From + (RegionSize / BlockSize) * BlockSize;
+ for (uptr Block = From; Block < To; Block += BlockSize)
+ Callback(Block);
+ }
+ }
+
+ void printStats() {
+ // TODO(kostyak): get the RSS per region.
+ uptr TotalMapped = 0;
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ TotalMapped += Sci->AllocatedUser;
+ PoppedBlocks += Sci->Stats.PoppedBlocks;
+ PushedBlocks += Sci->Stats.PushedBlocks;
+ }
+ Printf("Stats: SizeClassAllocator32: %zuM mapped in %zu allocations; "
+ "remains %zu\n",
+ TotalMapped >> 20, PoppedBlocks, PoppedBlocks - PushedBlocks);
+ for (uptr I = 0; I < NumClasses; I++)
+ printStats(I, 0);
+ }
+
+ void releaseToOS() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ SizeClassInfo *Sci = getSizeClassInfo(I);
+ ScopedLock L(Sci->Mutex);
+ releaseToOSMaybe(Sci, I, /*Force=*/true);
+ }
+ }
+
+private:
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr RegionSize = 1UL << RegionSizeLog;
+ static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >> RegionSizeLog;
+#if SCUDO_WORDSIZE == 32U
+ typedef FlatByteMap<NumRegions> ByteMap;
+#else
+ typedef TwoLevelByteMap<(NumRegions >> 12), 1UL << 12> ByteMap;
+#endif
+
+ struct SizeClassStats {
+ uptr PoppedBlocks;
+ uptr PushedBlocks;
+ };
+
+ struct ReleaseToOsInfo {
+ uptr PushedBlocksAtLastRelease;
+ uptr RangesReleased;
+ uptr LastReleasedBytes;
+ u64 LastReleaseAtNs;
+ };
+
+ struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
+ HybridMutex Mutex;
+ IntrusiveList<TransferBatch> FreeList;
+ SizeClassStats Stats;
+ bool CanRelease;
+ u32 RandState;
+ uptr AllocatedUser;
+ ReleaseToOsInfo ReleaseInfo;
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+
+ uptr computeRegionId(uptr Mem) {
+ const uptr Id = Mem >> RegionSizeLog;
+ CHECK_LT(Id, NumRegions);
+ return Id;
+ }
+
+ uptr allocateRegionSlow() {
+ uptr MapSize = 2 * RegionSize;
+ const uptr MapBase = reinterpret_cast<uptr>(
+ map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM));
+ if (UNLIKELY(!MapBase))
+ return 0;
+ const uptr MapEnd = MapBase + MapSize;
+ uptr Region = MapBase;
+ if (isAligned(Region, RegionSize)) {
+ ScopedLock L(RegionsStashMutex);
+ if (NumberOfStashedRegions < MaxStashedRegions)
+ RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
+ else
+ MapSize = RegionSize;
+ } else {
+ Region = roundUpTo(MapBase, RegionSize);
+ unmap(reinterpret_cast<void *>(MapBase), Region - MapBase);
+ MapSize = RegionSize;
+ }
+ const uptr End = Region + MapSize;
+ if (End != MapEnd)
+ unmap(reinterpret_cast<void *>(End), MapEnd - End);
+ return Region;
+ }
+
+ uptr allocateRegion(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ uptr Region = 0;
+ {
+ ScopedLock L(RegionsStashMutex);
+ if (NumberOfStashedRegions > 0)
+ Region = RegionsStash[--NumberOfStashedRegions];
+ }
+ if (!Region)
+ Region = allocateRegionSlow();
+ if (LIKELY(Region)) {
+ if (ClassId) {
+ const uptr RegionIndex = computeRegionId(Region);
+ if (RegionIndex < MinRegionIndex)
+ MinRegionIndex = RegionIndex;
+ if (RegionIndex > MaxRegionIndex)
+ MaxRegionIndex = RegionIndex;
+ PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId));
+ }
+ }
+ return Region;
+ }
+
+ SizeClassInfo *getSizeClassInfo(uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ return &SizeClassInfoArray[ClassId];
+ }
+
+ bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId,
+ TransferBatch **CurrentBatch, u32 MaxCount,
+ void **PointersArray, u32 Count) {
+ if (ClassId != SizeClassMap::BatchClassId)
+ shuffle(PointersArray, Count, &Sci->RandState);
+ TransferBatch *B = *CurrentBatch;
+ for (uptr I = 0; I < Count; I++) {
+ if (B && B->getCount() == MaxCount) {
+ Sci->FreeList.push_back(B);
+ B = nullptr;
+ }
+ if (!B) {
+ B = C->createBatch(ClassId, PointersArray[I]);
+ if (UNLIKELY(!B))
+ return false;
+ B->clear();
+ }
+ B->add(PointersArray[I]);
+ }
+ *CurrentBatch = B;
+ return true;
+ }
+
+ NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
+ SizeClassInfo *Sci) {
+ const uptr Region = allocateRegion(ClassId);
+ if (UNLIKELY(!Region))
+ return nullptr;
+ C->getStats().add(StatMapped, RegionSize);
+ const uptr Size = getSizeByClassId(ClassId);
+ const u32 MaxCount = TransferBatch::getMaxCached(Size);
+ DCHECK_GT(MaxCount, 0);
+ const uptr NumberOfBlocks = RegionSize / Size;
+ DCHECK_GT(NumberOfBlocks, 0);
+ TransferBatch *B = nullptr;
+ constexpr uptr ShuffleArraySize = 48;
+ void *ShuffleArray[ShuffleArraySize];
+ u32 Count = 0;
+ const uptr AllocatedUser = NumberOfBlocks * Size;
+ for (uptr I = Region; I < Region + AllocatedUser; I += Size) {
+ ShuffleArray[Count++] = reinterpret_cast<void *>(I);
+ if (Count == ShuffleArraySize) {
+ if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount,
+ ShuffleArray, Count)))
+ return nullptr;
+ Count = 0;
+ }
+ }
+ if (Count) {
+ if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray,
+ Count)))
+ return nullptr;
+ }
+ DCHECK(B);
+ DCHECK_GT(B->getCount(), 0);
+ Sci->AllocatedUser += AllocatedUser;
+ if (Sci->CanRelease)
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ return B;
+ }
+
+ void printStats(uptr ClassId, uptr Rss) {
+ SizeClassInfo *Sci = getSizeClassInfo(ClassId);
+ if (Sci->AllocatedUser == 0)
+ return;
+ const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
+ const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
+ Printf(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu inuse: %6zu"
+ " avail: %6zu rss: %6zuK\n",
+ ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
+ Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
+ AvailableChunks, Rss >> 10);
+ }
+
+ NOINLINE void releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
+ bool Force = false) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr PageSize = getPageSizeCached();
+
+ CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks);
+ const uptr N = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
+ if (N * BlockSize < PageSize)
+ return; // No chance to release anything.
+ if ((Sci->Stats.PushedBlocks - Sci->ReleaseInfo.PushedBlocksAtLastRelease) *
+ BlockSize <
+ PageSize) {
+ return; // Nothing new to release.
+ }
+
+ if (!Force) {
+ const s32 IntervalMs = ReleaseToOsIntervalMs;
+ if (IntervalMs < 0)
+ return;
+ if (Sci->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+ getMonotonicTime()) {
+ return; // Memory was returned recently.
+ }
+ }
+
+ // TODO(kostyak): currently not ideal as we loop over all regions and
+ // iterate multiple times over the same freelist if a ClassId spans multiple
+ // regions. But it will have to do for now.
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
+ if (PossibleRegions[I] == ClassId) {
+ ReleaseRecorder Recorder(I * RegionSize);
+ releaseFreeMemoryToOS(&Sci->FreeList, I * RegionSize,
+ RegionSize / PageSize, BlockSize, &Recorder);
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
+ Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ }
+ }
+ }
+ Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ }
+
+ SizeClassInfo SizeClassInfoArray[NumClasses];
+
+ ByteMap PossibleRegions;
+ // Keep track of the lowest & highest regions allocated to avoid looping
+ // through the whole NumRegions.
+ uptr MinRegionIndex;
+ uptr MaxRegionIndex;
+ s32 ReleaseToOsIntervalMs;
+ // Unless several threads request regions simultaneously from different size
+ // classes, the stash rarely contains more than 1 entry.
+ static constexpr uptr MaxStashedRegions = 4;
+ HybridMutex RegionsStashMutex;
+ uptr NumberOfStashedRegions;
+ uptr RegionsStash[MaxStashedRegions];
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY32_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/primary64.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/primary64.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/primary64.h (revision 351984)
@@ -0,0 +1,381 @@
+//===-- primary64.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PRIMARY64_H_
+#define SCUDO_PRIMARY64_H_
+
+#include "bytemap.h"
+#include "common.h"
+#include "list.h"
+#include "local_cache.h"
+#include "release.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassAllocator64 is an allocator tuned for 64-bit address space.
+//
+// It starts by reserving NumClasses * 2^RegionSizeLog bytes, equally divided in
+// Regions, specific to each size class. Note that the base of that mapping is
+// random (based to the platform specific map() capabilities), and that each
+// Region actually starts at a random offset from its base.
+//
+// Regions are mapped incrementally on demand to fulfill allocation requests,
+// those mappings being split into equally sized Blocks based on the size class
+// they belong to. The Blocks created are shuffled to prevent predictable
+// address patterns (the predictability increases with the size of the Blocks).
+//
+// The 1st Region (for size class 0) holds the TransferBatches. This is a
+// structure used to transfer arrays of available pointers from the class size
+// freelist to the thread specific freelist, and back.
+//
+// The memory used by this allocator is never unmapped, but can be partially
+// released it the platform allows for it.
+
+template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
+public:
+ typedef SizeClassMapT SizeClassMap;
+ typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog> ThisT;
+ typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
+ typedef typename CacheT::TransferBatch TransferBatch;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ return (ClassId == SizeClassMap::BatchClassId)
+ ? sizeof(TransferBatch)
+ : SizeClassMap::getSizeByClassId(ClassId);
+ }
+
+ static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
+
+ void initLinkerInitialized(s32 ReleaseToOsInterval) {
+ // Reserve the space required for the Primary.
+ PrimaryBase = reinterpret_cast<uptr>(
+ map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
+
+ RegionInfoArray = reinterpret_cast<RegionInfo *>(
+ map(nullptr, sizeof(RegionInfo) * NumClasses, "scudo:regioninfo"));
+ DCHECK_EQ(reinterpret_cast<uptr>(RegionInfoArray) % SCUDO_CACHE_LINE_SIZE,
+ 0);
+
+ u32 Seed;
+ if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
+ Seed = static_cast<u32>(getMonotonicTime() ^ (PrimaryBase >> 12));
+ const uptr PageSize = getPageSizeCached();
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ // The actual start of a region is offseted by a random number of pages.
+ Region->RegionBeg =
+ getRegionBaseByClassId(I) + (getRandomModN(&Seed, 16) + 1) * PageSize;
+ // Releasing smaller size classes doesn't necessarily yield to a
+ // meaningful RSS impact: there are more blocks per page, they are
+ // randomized around, and thus pages are less likely to be entirely empty.
+ // On top of this, attempting to release those require more iterations and
+ // memory accesses which ends up being fairly costly. The current lower
+ // limit is mostly arbitrary and based on empirical observations.
+ // TODO(kostyak): make the lower limit a runtime option
+ Region->CanRelease = (ReleaseToOsInterval > 0) &&
+ (I != SizeClassMap::BatchClassId) &&
+ (getSizeByClassId(I) >= (PageSize / 32));
+ Region->RandState = getRandomU32(&Seed);
+ }
+ ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ }
+ void init(s32 ReleaseToOsInterval) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(ReleaseToOsInterval);
+ }
+
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
+ unmap(reinterpret_cast<void *>(RegionInfoArray),
+ sizeof(RegionInfo) * NumClasses);
+ }
+
+ TransferBatch *popBatch(CacheT *C, uptr ClassId) {
+ DCHECK_LT(ClassId, NumClasses);
+ RegionInfo *Region = getRegionInfo(ClassId);
+ ScopedLock L(Region->Mutex);
+ TransferBatch *B = Region->FreeList.front();
+ if (B)
+ Region->FreeList.pop_front();
+ else {
+ B = populateFreeList(C, ClassId, Region);
+ if (UNLIKELY(!B))
+ return nullptr;
+ }
+ DCHECK_GT(B->getCount(), 0);
+ Region->Stats.PoppedBlocks += B->getCount();
+ return B;
+ }
+
+ void pushBatch(uptr ClassId, TransferBatch *B) {
+ DCHECK_GT(B->getCount(), 0);
+ RegionInfo *Region = getRegionInfo(ClassId);
+ ScopedLock L(Region->Mutex);
+ Region->FreeList.push_front(B);
+ Region->Stats.PushedBlocks += B->getCount();
+ if (Region->CanRelease)
+ releaseToOSMaybe(Region, ClassId);
+ }
+
+ void disable() {
+ for (uptr I = 0; I < NumClasses; I++)
+ getRegionInfo(I)->Mutex.lock();
+ }
+
+ void enable() {
+ for (sptr I = static_cast<sptr>(NumClasses) - 1; I >= 0; I--)
+ getRegionInfo(I)->Mutex.unlock();
+ }
+
+ template <typename F> void iterateOverBlocks(F Callback) const {
+ for (uptr I = 1; I < NumClasses; I++) {
+ const RegionInfo *Region = getRegionInfo(I);
+ const uptr BlockSize = getSizeByClassId(I);
+ const uptr From = Region->RegionBeg;
+ const uptr To = From + Region->AllocatedUser;
+ for (uptr Block = From; Block < To; Block += BlockSize)
+ Callback(Block);
+ }
+ }
+
+ void printStats() const {
+ // TODO(kostyak): get the RSS per region.
+ uptr TotalMapped = 0;
+ uptr PoppedBlocks = 0;
+ uptr PushedBlocks = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ RegionInfo *Region = getRegionInfo(I);
+ if (Region->MappedUser)
+ TotalMapped += Region->MappedUser;
+ PoppedBlocks += Region->Stats.PoppedBlocks;
+ PushedBlocks += Region->Stats.PushedBlocks;
+ }
+ Printf("Stats: Primary64: %zuM mapped (%zuM rss) in %zu allocations; "
+ "remains %zu\n",
+ TotalMapped >> 20, 0, PoppedBlocks, PoppedBlocks - PushedBlocks);
+
+ for (uptr I = 0; I < NumClasses; I++)
+ printStats(I, 0);
+ }
+
+ void releaseToOS() {
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == SizeClassMap::BatchClassId)
+ continue;
+ RegionInfo *Region = getRegionInfo(I);
+ ScopedLock L(Region->Mutex);
+ releaseToOSMaybe(Region, I, /*Force=*/true);
+ }
+ }
+
+private:
+ static const uptr RegionSize = 1UL << RegionSizeLog;
+ static const uptr NumClasses = SizeClassMap::NumClasses;
+ static const uptr PrimarySize = RegionSize * NumClasses;
+
+ // Call map for user memory with at least this size.
+ static const uptr MapSizeIncrement = 1UL << 16;
+
+ struct RegionStats {
+ uptr PoppedBlocks;
+ uptr PushedBlocks;
+ };
+
+ struct ReleaseToOsInfo {
+ uptr PushedBlocksAtLastRelease;
+ uptr RangesReleased;
+ uptr LastReleasedBytes;
+ u64 LastReleaseAtNs;
+ };
+
+ struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
+ HybridMutex Mutex;
+ IntrusiveList<TransferBatch> FreeList;
+ RegionStats Stats;
+ bool CanRelease;
+ bool Exhausted;
+ u32 RandState;
+ uptr RegionBeg;
+ uptr MappedUser; // Bytes mapped for user memory.
+ uptr AllocatedUser; // Bytes allocated for user memory.
+ MapPlatformData Data;
+ ReleaseToOsInfo ReleaseInfo;
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0);
+
+ uptr PrimaryBase;
+ RegionInfo *RegionInfoArray;
+ MapPlatformData Data;
+ s32 ReleaseToOsIntervalMs;
+
+ RegionInfo *getRegionInfo(uptr ClassId) const {
+ DCHECK_LT(ClassId, NumClasses);
+ return &RegionInfoArray[ClassId];
+ }
+
+ uptr getRegionBaseByClassId(uptr ClassId) const {
+ return PrimaryBase + (ClassId << RegionSizeLog);
+ }
+
+ bool populateBatches(CacheT *C, RegionInfo *Region, uptr ClassId,
+ TransferBatch **CurrentBatch, u32 MaxCount,
+ void **PointersArray, u32 Count) {
+ // No need to shuffle the batches size class.
+ if (ClassId != SizeClassMap::BatchClassId)
+ shuffle(PointersArray, Count, &Region->RandState);
+ TransferBatch *B = *CurrentBatch;
+ for (uptr I = 0; I < Count; I++) {
+ if (B && B->getCount() == MaxCount) {
+ Region->FreeList.push_back(B);
+ B = nullptr;
+ }
+ if (!B) {
+ B = C->createBatch(ClassId, PointersArray[I]);
+ if (UNLIKELY(!B))
+ return false;
+ B->clear();
+ }
+ B->add(PointersArray[I]);
+ }
+ *CurrentBatch = B;
+ return true;
+ }
+
+ NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId,
+ RegionInfo *Region) {
+ const uptr Size = getSizeByClassId(ClassId);
+ const u32 MaxCount = TransferBatch::getMaxCached(Size);
+
+ const uptr RegionBeg = Region->RegionBeg;
+ const uptr MappedUser = Region->MappedUser;
+ const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size;
+ // Map more space for blocks, if necessary.
+ if (LIKELY(TotalUserBytes > MappedUser)) {
+ // Do the mmap for the user memory.
+ const uptr UserMapSize =
+ roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement);
+ const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId);
+ if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) {
+ if (!Region->Exhausted) {
+ Region->Exhausted = true;
+ printStats();
+ Printf(
+ "Scudo OOM: The process has Exhausted %zuM for size class %zu.\n",
+ RegionSize >> 20, Size);
+ }
+ return nullptr;
+ }
+ if (MappedUser == 0)
+ Region->Data = Data;
+ if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
+ UserMapSize, "scudo:primary",
+ MAP_ALLOWNOMEM | MAP_RESIZABLE, &Region->Data)))
+ return nullptr;
+ Region->MappedUser += UserMapSize;
+ C->getStats().add(StatMapped, UserMapSize);
+ }
+
+ const uptr NumberOfBlocks = Min(
+ 8UL * MaxCount, (Region->MappedUser - Region->AllocatedUser) / Size);
+ DCHECK_GT(NumberOfBlocks, 0);
+
+ TransferBatch *B = nullptr;
+ constexpr uptr ShuffleArraySize = 48;
+ void *ShuffleArray[ShuffleArraySize];
+ u32 Count = 0;
+ const uptr P = RegionBeg + Region->AllocatedUser;
+ const uptr AllocatedUser = NumberOfBlocks * Size;
+ for (uptr I = P; I < P + AllocatedUser; I += Size) {
+ ShuffleArray[Count++] = reinterpret_cast<void *>(I);
+ if (Count == ShuffleArraySize) {
+ if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
+ ShuffleArray, Count)))
+ return nullptr;
+ Count = 0;
+ }
+ }
+ if (Count) {
+ if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount,
+ ShuffleArray, Count)))
+ return nullptr;
+ }
+ DCHECK(B);
+ CHECK_GT(B->getCount(), 0);
+
+ Region->AllocatedUser += AllocatedUser;
+ Region->Exhausted = false;
+ if (Region->CanRelease)
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+
+ return B;
+ }
+
+ void printStats(uptr ClassId, uptr Rss) const {
+ RegionInfo *Region = getRegionInfo(ClassId);
+ if (Region->MappedUser == 0)
+ return;
+ const uptr InUse = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
+ const uptr AvailableChunks =
+ Region->AllocatedUser / getSizeByClassId(ClassId);
+ Printf("%s %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu inuse: "
+ "%6zu avail: %6zu rss: %6zuK releases: %6zu last released: %6zuK "
+ "region: 0x%zx (0x%zx)\n",
+ Region->Exhausted ? "F" : " ", ClassId, getSizeByClassId(ClassId),
+ Region->MappedUser >> 10, Region->Stats.PoppedBlocks,
+ Region->Stats.PushedBlocks, InUse, AvailableChunks, Rss >> 10,
+ Region->ReleaseInfo.RangesReleased,
+ Region->ReleaseInfo.LastReleasedBytes >> 10, Region->RegionBeg,
+ getRegionBaseByClassId(ClassId));
+ }
+
+ NOINLINE void releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
+ bool Force = false) {
+ const uptr BlockSize = getSizeByClassId(ClassId);
+ const uptr PageSize = getPageSizeCached();
+
+ CHECK_GE(Region->Stats.PoppedBlocks, Region->Stats.PushedBlocks);
+ const uptr N = Region->Stats.PoppedBlocks - Region->Stats.PushedBlocks;
+ if (N * BlockSize < PageSize)
+ return; // No chance to release anything.
+ if ((Region->Stats.PushedBlocks -
+ Region->ReleaseInfo.PushedBlocksAtLastRelease) *
+ BlockSize <
+ PageSize) {
+ return; // Nothing new to release.
+ }
+
+ if (!Force) {
+ const s32 IntervalMs = ReleaseToOsIntervalMs;
+ if (IntervalMs < 0)
+ return;
+ if (Region->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL >
+ getMonotonicTime()) {
+ return; // Memory was returned recently.
+ }
+ }
+
+ ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
+ releaseFreeMemoryToOS(&Region->FreeList, Region->RegionBeg,
+ roundUpTo(Region->AllocatedUser, PageSize) / PageSize,
+ BlockSize, &Recorder);
+
+ if (Recorder.getReleasedRangesCount() > 0) {
+ Region->ReleaseInfo.PushedBlocksAtLastRelease =
+ Region->Stats.PushedBlocks;
+ Region->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
+ Region->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes();
+ }
+ Region->ReleaseInfo.LastReleaseAtNs = getMonotonicTime();
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_PRIMARY64_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/quarantine.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/quarantine.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/quarantine.h (revision 351984)
@@ -0,0 +1,289 @@
+//===-- quarantine.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_QUARANTINE_H_
+#define SCUDO_QUARANTINE_H_
+
+#include "list.h"
+#include "mutex.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+struct QuarantineBatch {
+ // With the following count, a batch (and the header that protects it) occupy
+ // 4096 bytes on 32-bit platforms, and 8192 bytes on 64-bit.
+ static const u32 MaxCount = 1019;
+ QuarantineBatch *Next;
+ uptr Size;
+ u32 Count;
+ void *Batch[MaxCount];
+
+ void init(void *Ptr, uptr Size) {
+ Count = 1;
+ Batch[0] = Ptr;
+ this->Size = Size + sizeof(QuarantineBatch); // Account for the Batch Size.
+ }
+
+ // The total size of quarantined nodes recorded in this batch.
+ uptr getQuarantinedSize() const { return Size - sizeof(QuarantineBatch); }
+
+ void push_back(void *Ptr, uptr Size) {
+ DCHECK_LT(Count, MaxCount);
+ Batch[Count++] = Ptr;
+ this->Size += Size;
+ }
+
+ bool canMerge(const QuarantineBatch *const From) const {
+ return Count + From->Count <= MaxCount;
+ }
+
+ void merge(QuarantineBatch *const From) {
+ DCHECK_LE(Count + From->Count, MaxCount);
+ DCHECK_GE(Size, sizeof(QuarantineBatch));
+
+ for (uptr I = 0; I < From->Count; ++I)
+ Batch[Count + I] = From->Batch[I];
+ Count += From->Count;
+ Size += From->getQuarantinedSize();
+
+ From->Count = 0;
+ From->Size = sizeof(QuarantineBatch);
+ }
+
+ void shuffle(u32 State) { ::scudo::shuffle(Batch, Count, &State); }
+};
+
+COMPILER_CHECK(sizeof(QuarantineBatch) <= (1U << 13)); // 8Kb.
+
+// Per-thread cache of memory blocks.
+template <typename Callback> class QuarantineCache {
+public:
+ void initLinkerInitialized() {}
+ void init() {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized();
+ }
+
+ // Total memory used, including internal accounting.
+ uptr getSize() const { return atomic_load_relaxed(&Size); }
+ // Memory used for internal accounting.
+ uptr getOverheadSize() const { return List.size() * sizeof(QuarantineBatch); }
+
+ void enqueue(Callback Cb, void *Ptr, uptr Size) {
+ if (List.empty() || List.back()->Count == QuarantineBatch::MaxCount) {
+ QuarantineBatch *B =
+ reinterpret_cast<QuarantineBatch *>(Cb.allocate(sizeof(*B)));
+ DCHECK(B);
+ B->init(Ptr, Size);
+ enqueueBatch(B);
+ } else {
+ List.back()->push_back(Ptr, Size);
+ addToSize(Size);
+ }
+ }
+
+ void transfer(QuarantineCache *From) {
+ List.append_back(&From->List);
+ addToSize(From->getSize());
+ atomic_store_relaxed(&From->Size, 0);
+ }
+
+ void enqueueBatch(QuarantineBatch *B) {
+ List.push_back(B);
+ addToSize(B->Size);
+ }
+
+ QuarantineBatch *dequeueBatch() {
+ if (List.empty())
+ return nullptr;
+ QuarantineBatch *B = List.front();
+ List.pop_front();
+ subFromSize(B->Size);
+ return B;
+ }
+
+ void mergeBatches(QuarantineCache *ToDeallocate) {
+ uptr ExtractedSize = 0;
+ QuarantineBatch *Current = List.front();
+ while (Current && Current->Next) {
+ if (Current->canMerge(Current->Next)) {
+ QuarantineBatch *Extracted = Current->Next;
+ // Move all the chunks into the current batch.
+ Current->merge(Extracted);
+ DCHECK_EQ(Extracted->Count, 0);
+ DCHECK_EQ(Extracted->Size, sizeof(QuarantineBatch));
+ // Remove the next batch From the list and account for its Size.
+ List.extract(Current, Extracted);
+ ExtractedSize += Extracted->Size;
+ // Add it to deallocation list.
+ ToDeallocate->enqueueBatch(Extracted);
+ } else {
+ Current = Current->Next;
+ }
+ }
+ subFromSize(ExtractedSize);
+ }
+
+ void printStats() const {
+ uptr BatchCount = 0;
+ uptr TotalOverheadBytes = 0;
+ uptr TotalBytes = 0;
+ uptr TotalQuarantineChunks = 0;
+ for (const QuarantineBatch &Batch : List) {
+ BatchCount++;
+ TotalBytes += Batch.Size;
+ TotalOverheadBytes += Batch.Size - Batch.getQuarantinedSize();
+ TotalQuarantineChunks += Batch.Count;
+ }
+ const uptr QuarantineChunksCapacity =
+ BatchCount * QuarantineBatch::MaxCount;
+ const uptr ChunksUsagePercent =
+ (QuarantineChunksCapacity == 0)
+ ? 0
+ : TotalQuarantineChunks * 100 / QuarantineChunksCapacity;
+ const uptr TotalQuarantinedBytes = TotalBytes - TotalOverheadBytes;
+ const uptr MemoryOverheadPercent =
+ (TotalQuarantinedBytes == 0)
+ ? 0
+ : TotalOverheadBytes * 100 / TotalQuarantinedBytes;
+ Printf("Global quarantine stats: batches: %zd; bytes: %zd (user: %zd); "
+ "chunks: %zd (capacity: %zd); %zd%% chunks used; %zd%% memory "
+ "overhead\n",
+ BatchCount, TotalBytes, TotalQuarantinedBytes, TotalQuarantineChunks,
+ QuarantineChunksCapacity, ChunksUsagePercent, MemoryOverheadPercent);
+ }
+
+private:
+ IntrusiveList<QuarantineBatch> List;
+ atomic_uptr Size;
+
+ void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); }
+ void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); }
+};
+
+// The callback interface is:
+// void Callback::recycle(Node *Ptr);
+// void *Callback::allocate(uptr Size);
+// void Callback::deallocate(void *Ptr);
+template <typename Callback, typename Node> class GlobalQuarantine {
+public:
+ typedef QuarantineCache<Callback> CacheT;
+
+ void initLinkerInitialized(uptr Size, uptr CacheSize) {
+ // Thread local quarantine size can be zero only when global quarantine size
+ // is zero (it allows us to perform just one atomic read per put() call).
+ CHECK((Size == 0 && CacheSize == 0) || CacheSize != 0);
+
+ atomic_store_relaxed(&MaxSize, Size);
+ atomic_store_relaxed(&MinSize, Size / 10 * 9); // 90% of max size.
+ atomic_store_relaxed(&MaxCacheSize, CacheSize);
+
+ Cache.initLinkerInitialized();
+ }
+ void init(uptr Size, uptr CacheSize) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Size, CacheSize);
+ }
+
+ uptr getMaxSize() const { return atomic_load_relaxed(&MaxSize); }
+ uptr getCacheSize() const { return atomic_load_relaxed(&MaxCacheSize); }
+
+ void put(CacheT *C, Callback Cb, Node *Ptr, uptr Size) {
+ C->enqueue(Cb, Ptr, Size);
+ if (C->getSize() > getCacheSize())
+ drain(C, Cb);
+ }
+
+ void NOINLINE drain(CacheT *C, Callback Cb) {
+ {
+ ScopedLock L(CacheMutex);
+ Cache.transfer(C);
+ }
+ if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock())
+ recycle(atomic_load_relaxed(&MinSize), Cb);
+ }
+
+ void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
+ {
+ ScopedLock L(CacheMutex);
+ Cache.transfer(C);
+ }
+ RecyleMutex.lock();
+ recycle(0, Cb);
+ }
+
+ void printStats() const {
+ // It assumes that the world is stopped, just as the allocator's printStats.
+ Printf("Quarantine limits: global: %zdM; thread local: %zdK\n",
+ getMaxSize() >> 20, getCacheSize() >> 10);
+ Cache.printStats();
+ }
+
+private:
+ // Read-only data.
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
+ CacheT Cache;
+ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex;
+ atomic_uptr MinSize;
+ atomic_uptr MaxSize;
+ alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
+
+ void NOINLINE recycle(uptr MinSize, Callback Cb) {
+ CacheT Tmp;
+ Tmp.init();
+ {
+ ScopedLock L(CacheMutex);
+ // Go over the batches and merge partially filled ones to
+ // save some memory, otherwise batches themselves (since the memory used
+ // by them is counted against quarantine limit) can overcome the actual
+ // user's quarantined chunks, which diminishes the purpose of the
+ // quarantine.
+ const uptr CacheSize = Cache.getSize();
+ const uptr OverheadSize = Cache.getOverheadSize();
+ DCHECK_GE(CacheSize, OverheadSize);
+ // Do the merge only when overhead exceeds this predefined limit (might
+ // require some tuning). It saves us merge attempt when the batch list
+ // quarantine is unlikely to contain batches suitable for merge.
+ constexpr uptr OverheadThresholdPercents = 100;
+ if (CacheSize > OverheadSize &&
+ OverheadSize * (100 + OverheadThresholdPercents) >
+ CacheSize * OverheadThresholdPercents) {
+ Cache.mergeBatches(&Tmp);
+ }
+ // Extract enough chunks from the quarantine to get below the max
+ // quarantine size and leave some leeway for the newly quarantined chunks.
+ while (Cache.getSize() > MinSize)
+ Tmp.enqueueBatch(Cache.dequeueBatch());
+ }
+ RecyleMutex.unlock();
+ doRecycle(&Tmp, Cb);
+ }
+
+ void NOINLINE doRecycle(CacheT *C, Callback Cb) {
+ while (QuarantineBatch *B = C->dequeueBatch()) {
+ const u32 Seed = static_cast<u32>(
+ (reinterpret_cast<uptr>(B) ^ reinterpret_cast<uptr>(C)) >> 4);
+ B->shuffle(Seed);
+ constexpr uptr NumberOfPrefetch = 8UL;
+ CHECK(NumberOfPrefetch <= ARRAY_SIZE(B->Batch));
+ for (uptr I = 0; I < NumberOfPrefetch; I++)
+ PREFETCH(B->Batch[I]);
+ for (uptr I = 0, Count = B->Count; I < Count; I++) {
+ if (I + NumberOfPrefetch < Count)
+ PREFETCH(B->Batch[I + NumberOfPrefetch]);
+ Cb.recycle(reinterpret_cast<Node *>(B->Batch[I]));
+ }
+ Cb.deallocate(B);
+ }
+ }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_QUARANTINE_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/release.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/release.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/release.h (revision 351984)
@@ -0,0 +1,262 @@
+//===-- release.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_RELEASE_H_
+#define SCUDO_RELEASE_H_
+
+#include "common.h"
+#include "list.h"
+
+namespace scudo {
+
+class ReleaseRecorder {
+public:
+ ReleaseRecorder(uptr BaseAddress, MapPlatformData *Data = nullptr)
+ : BaseAddress(BaseAddress), Data(Data) {}
+
+ uptr getReleasedRangesCount() const { return ReleasedRangesCount; }
+
+ uptr getReleasedBytes() const { return ReleasedBytes; }
+
+ // Releases [From, To) range of pages back to OS.
+ void releasePageRangeToOS(uptr From, uptr To) {
+ const uptr Size = To - From;
+ releasePagesToOS(BaseAddress, From, Size, Data);
+ ReleasedRangesCount++;
+ ReleasedBytes += Size;
+ }
+
+private:
+ uptr ReleasedRangesCount = 0;
+ uptr ReleasedBytes = 0;
+ uptr BaseAddress = 0;
+ MapPlatformData *Data = nullptr;
+};
+
+// A packed array of Counters. Each counter occupies 2^N bits, enough to store
+// counter's MaxValue. Ctor will try to allocate the required Buffer via map()
+// and the caller is expected to check whether the initialization was successful
+// by checking isAllocated() result. For the performance sake, none of the
+// accessors check the validity of the arguments, It is assumed that Index is
+// always in [0, N) range and the value is not incremented past MaxValue.
+class PackedCounterArray {
+public:
+ PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) {
+ CHECK_GT(NumCounters, 0);
+ CHECK_GT(MaxValue, 0);
+ constexpr uptr MaxCounterBits = sizeof(*Buffer) * 8UL;
+ // Rounding counter storage size up to the power of two allows for using
+ // bit shifts calculating particular counter's Index and offset.
+ const uptr CounterSizeBits =
+ roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1);
+ CHECK_LE(CounterSizeBits, MaxCounterBits);
+ CounterSizeBitsLog = getLog2(CounterSizeBits);
+ CounterMask = ~(static_cast<uptr>(0)) >> (MaxCounterBits - CounterSizeBits);
+
+ const uptr PackingRatio = MaxCounterBits >> CounterSizeBitsLog;
+ CHECK_GT(PackingRatio, 0);
+ PackingRatioLog = getLog2(PackingRatio);
+ BitOffsetMask = PackingRatio - 1;
+
+ BufferSize = (roundUpTo(N, static_cast<uptr>(1U) << PackingRatioLog) >>
+ PackingRatioLog) *
+ sizeof(*Buffer);
+ Buffer = reinterpret_cast<uptr *>(
+ map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
+ }
+ ~PackedCounterArray() {
+ if (isAllocated())
+ unmap(reinterpret_cast<void *>(Buffer), BufferSize);
+ }
+
+ bool isAllocated() const { return !!Buffer; }
+
+ uptr getCount() const { return N; }
+
+ uptr get(uptr I) const {
+ DCHECK_LT(I, N);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ return (Buffer[Index] >> BitOffset) & CounterMask;
+ }
+
+ void inc(uptr I) const {
+ DCHECK_LT(get(I), CounterMask);
+ const uptr Index = I >> PackingRatioLog;
+ const uptr BitOffset = (I & BitOffsetMask) << CounterSizeBitsLog;
+ DCHECK_LT(BitOffset, SCUDO_WORDSIZE);
+ Buffer[Index] += static_cast<uptr>(1U) << BitOffset;
+ }
+
+ void incRange(uptr From, uptr To) const {
+ DCHECK_LE(From, To);
+ for (uptr I = From; I <= To; I++)
+ inc(I);
+ }
+
+ uptr getBufferSize() const { return BufferSize; }
+
+private:
+ const uptr N;
+ uptr CounterSizeBitsLog;
+ uptr CounterMask;
+ uptr PackingRatioLog;
+ uptr BitOffsetMask;
+
+ uptr BufferSize;
+ uptr *Buffer;
+};
+
+template <class ReleaseRecorderT> class FreePagesRangeTracker {
+public:
+ explicit FreePagesRangeTracker(ReleaseRecorderT *Recorder)
+ : Recorder(Recorder), PageSizeLog(getLog2(getPageSizeCached())) {}
+
+ void processNextPage(bool Freed) {
+ if (Freed) {
+ if (!InRange) {
+ CurrentRangeStatePage = CurrentPage;
+ InRange = true;
+ }
+ } else {
+ closeOpenedRange();
+ }
+ CurrentPage++;
+ }
+
+ void finish() { closeOpenedRange(); }
+
+private:
+ void closeOpenedRange() {
+ if (InRange) {
+ Recorder->releasePageRangeToOS((CurrentRangeStatePage << PageSizeLog),
+ (CurrentPage << PageSizeLog));
+ InRange = false;
+ }
+ }
+
+ ReleaseRecorderT *const Recorder;
+ const uptr PageSizeLog;
+ bool InRange = false;
+ uptr CurrentPage = 0;
+ uptr CurrentRangeStatePage = 0;
+};
+
+template <class TransferBatchT, class ReleaseRecorderT>
+NOINLINE void
+releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> *FreeList, uptr Base,
+ uptr AllocatedPagesCount, uptr BlockSize,
+ ReleaseRecorderT *Recorder) {
+ const uptr PageSize = getPageSizeCached();
+
+ // Figure out the number of chunks per page and whether we can take a fast
+ // path (the number of chunks per page is the same for all pages).
+ uptr FullPagesBlockCountMax;
+ bool SameBlockCountPerPage;
+ if (BlockSize <= PageSize) {
+ if (PageSize % BlockSize == 0) {
+ // Same number of chunks per page, no cross overs.
+ FullPagesBlockCountMax = PageSize / BlockSize;
+ SameBlockCountPerPage = true;
+ } else if (BlockSize % (PageSize % BlockSize) == 0) {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks, but all pages contain the same
+ // number of chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // Some chunks are crossing page boundaries, which means that the page
+ // contains one or two partial chunks.
+ FullPagesBlockCountMax = PageSize / BlockSize + 2;
+ SameBlockCountPerPage = false;
+ }
+ } else {
+ if (BlockSize % PageSize == 0) {
+ // One chunk covers multiple pages, no cross overs.
+ FullPagesBlockCountMax = 1;
+ SameBlockCountPerPage = true;
+ } else {
+ // One chunk covers multiple pages, Some chunks are crossing page
+ // boundaries. Some pages contain one chunk, some contain two.
+ FullPagesBlockCountMax = 2;
+ SameBlockCountPerPage = false;
+ }
+ }
+
+ PackedCounterArray Counters(AllocatedPagesCount, FullPagesBlockCountMax);
+ if (!Counters.isAllocated())
+ return;
+
+ const uptr PageSizeLog = getLog2(PageSize);
+ const uptr End = Base + AllocatedPagesCount * PageSize;
+
+ // Iterate over free chunks and count how many free chunks affect each
+ // allocated page.
+ if (BlockSize <= PageSize && PageSize % BlockSize == 0) {
+ // Each chunk affects one page only.
+ for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+ for (u32 I = 0; I < (*It).getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ if (P >= Base && P < End)
+ Counters.inc((P - Base) >> PageSizeLog);
+ }
+ }
+ } else {
+ // In all other cases chunks might affect more than one page.
+ for (auto It = FreeList->begin(); It != FreeList->end(); ++It) {
+ for (u32 I = 0; I < (*It).getCount(); I++) {
+ const uptr P = reinterpret_cast<uptr>((*It).get(I));
+ if (P >= Base && P < End)
+ Counters.incRange((P - Base) >> PageSizeLog,
+ (P - Base + BlockSize - 1) >> PageSizeLog);
+ }
+ }
+ }
+
+ // Iterate over pages detecting ranges of pages with chunk Counters equal
+ // to the expected number of chunks for the particular page.
+ FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
+ if (SameBlockCountPerPage) {
+ // Fast path, every page has the same number of chunks affecting it.
+ for (uptr I = 0; I < Counters.getCount(); I++)
+ RangeTracker.processNextPage(Counters.get(I) == FullPagesBlockCountMax);
+ } else {
+ // Slow path, go through the pages keeping count how many chunks affect
+ // each page.
+ const uptr Pn = BlockSize < PageSize ? PageSize / BlockSize : 1;
+ const uptr Pnc = Pn * BlockSize;
+ // The idea is to increment the current page pointer by the first chunk
+ // size, middle portion size (the portion of the page covered by chunks
+ // except the first and the last one) and then the last chunk size, adding
+ // up the number of chunks on the current page and checking on every step
+ // whether the page boundary was crossed.
+ uptr PrevPageBoundary = 0;
+ uptr CurrentBoundary = 0;
+ for (uptr I = 0; I < Counters.getCount(); I++) {
+ const uptr PageBoundary = PrevPageBoundary + PageSize;
+ uptr BlocksPerPage = Pn;
+ if (CurrentBoundary < PageBoundary) {
+ if (CurrentBoundary > PrevPageBoundary)
+ BlocksPerPage++;
+ CurrentBoundary += Pnc;
+ if (CurrentBoundary < PageBoundary) {
+ BlocksPerPage++;
+ CurrentBoundary += BlockSize;
+ }
+ }
+ PrevPageBoundary = PageBoundary;
+
+ RangeTracker.processNextPage(Counters.get(I) == BlocksPerPage);
+ }
+ }
+ RangeTracker.finish();
+}
+
+} // namespace scudo
+
+#endif // SCUDO_RELEASE_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/report.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/report.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/report.cc (revision 351984)
@@ -0,0 +1,192 @@
+//===-- report.cc -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "report.h"
+
+#include "atomic_helpers.h"
+#include "string_utils.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedErrorReport {
+public:
+ ScopedErrorReport() : Message(512) { Message.append("Scudo ERROR: "); }
+ void append(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ Message.append(Format, Args);
+ va_end(Args);
+ }
+ NORETURN ~ScopedErrorReport() {
+ outputRaw(Message.data());
+ setAbortMessage(Message.data());
+ die();
+ }
+
+private:
+ ScopedString Message;
+};
+
+INLINE void NORETURN trap() { __builtin_trap(); }
+
+// This could potentially be called recursively if a CHECK fails in the reports.
+void NORETURN reportCheckFailed(const char *File, int Line,
+ const char *Condition, u64 Value1, u64 Value2) {
+ static atomic_u32 NumberOfCalls;
+ if (atomic_fetch_add(&NumberOfCalls, 1, memory_order_relaxed) > 2) {
+ // TODO(kostyak): maybe sleep here?
+ trap();
+ }
+ ScopedErrorReport Report;
+ Report.append("CHECK failed @ %s:%d %s (%llu, %llu)\n", File, Line, Condition,
+ Value1, Value2);
+}
+
+// Generic string fatal error message.
+void NORETURN reportError(const char *Message) {
+ ScopedErrorReport Report;
+ Report.append("%s\n", Message);
+}
+
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value) {
+ ScopedErrorReport Report;
+ Report.append("invalid value for %s option: '%s'\n", FlagType, Value);
+}
+
+// The checksum of a chunk header is invalid. This could be caused by an
+// {over,under}write of the header, a pointer that is not an actual chunk.
+void NORETURN reportHeaderCorruption(void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("corrupted chunk header at address %p\n", Ptr);
+}
+
+// Two threads have attempted to modify a chunk header at the same time. This is
+// symptomatic of a race-condition in the application code, or general lack of
+// proper locking.
+void NORETURN reportHeaderRace(void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("race on chunk header at address %p\n", Ptr);
+}
+
+// The allocator was compiled with parameters that conflict with field size
+// requirements.
+void NORETURN reportSanityCheckError(const char *Field) {
+ ScopedErrorReport Report;
+ Report.append("maximum possible %s doesn't fit in header\n", Field);
+}
+
+// We enforce a maximum alignment, to keep fields smaller and generally prevent
+// integer overflows, or unexpected corner cases.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment) {
+ ScopedErrorReport Report;
+ Report.append("invalid allocation alignment: %zu exceeds maximum supported "
+ "alignment of %zu\n",
+ Alignment, MaxAlignment);
+}
+
+// See above, we also enforce a maximum size.
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize) {
+ ScopedErrorReport Report;
+ Report.append("requested allocation size %zu (%zu after adjustments) exceeds "
+ "maximum supported size of %zu\n",
+ UserSize, TotalSize, MaxSize);
+}
+
+void NORETURN reportOutOfMemory(uptr RequestedSize) {
+ ScopedErrorReport Report;
+ Report.append("out of memory trying to allocate %zu bytes\n", RequestedSize);
+}
+
+static const char *stringifyAction(AllocatorAction Action) {
+ switch (Action) {
+ case AllocatorAction::Recycling:
+ return "recycling";
+ case AllocatorAction::Deallocating:
+ return "deallocating";
+ case AllocatorAction::Reallocating:
+ return "reallocating";
+ case AllocatorAction::Sizing:
+ return "sizing";
+ }
+ return "<invalid action>";
+}
+
+// The chunk is not in a state congruent with the operation we want to perform.
+// This is usually the case with a double-free, a realloc of a freed pointer.
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("invalid chunk state when %s address %p\n",
+ stringifyAction(Action), Ptr);
+}
+
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr) {
+ ScopedErrorReport Report;
+ Report.append("misaligned pointer when %s address %p\n",
+ stringifyAction(Action), Ptr);
+}
+
+// The deallocation function used is at odds with the one used to allocate the
+// chunk (eg: new[]/delete or malloc/delete, and so on).
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+ u8 TypeA, u8 TypeB) {
+ ScopedErrorReport Report;
+ Report.append("allocation type mismatch when %s address %p (%d vs %d)\n",
+ stringifyAction(Action), Ptr, TypeA, TypeB);
+}
+
+// The size specified to the delete operator does not match the one that was
+// passed to new when allocating the chunk.
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size,
+ uptr ExpectedSize) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid sized delete when deallocating address %p (%zu vs %zu)\n", Ptr,
+ Size, ExpectedSize);
+}
+
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid allocation alignment: %zu, alignment must be a power of two\n",
+ Alignment);
+}
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("calloc parameters overflow: count * size (%zu * %zu) cannot "
+ "be represented with type size_t\n",
+ Count, Size);
+}
+
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
+ ScopedErrorReport Report;
+ Report.append(
+ "invalid alignment requested in posix_memalign: %zu, alignment must be a "
+ "power of two and a multiple of sizeof(void *) == %zu\n",
+ Alignment, sizeof(void *));
+}
+
+void NORETURN reportPvallocOverflow(uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("pvalloc parameters overflow: size %zu rounded up to system "
+ "page size %zu cannot be represented in type size_t\n",
+ Size, getPageSizeCached());
+}
+
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Alignment, uptr Size) {
+ ScopedErrorReport Report;
+ Report.append("invalid alignment requested in aligned_alloc: %zu, alignment "
+ "must be a power of two and the requested size %zu must be a "
+ "multiple of alignment\n",
+ Alignment, Size);
+}
+
+} // namespace scudo
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/report.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/report.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/report.h (revision 351984)
@@ -0,0 +1,57 @@
+//===-- report.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_REPORT_H_
+#define SCUDO_REPORT_H_
+
+#include "internal_defs.h"
+
+namespace scudo {
+
+// Reports are *fatal* unless stated otherwise.
+
+// Generic error.
+void NORETURN reportError(const char *Message);
+
+// Flags related errors.
+void NORETURN reportInvalidFlag(const char *FlagType, const char *Value);
+
+// Chunk header related errors.
+void NORETURN reportHeaderCorruption(void *Ptr);
+void NORETURN reportHeaderRace(void *Ptr);
+
+// Sanity checks related error.
+void NORETURN reportSanityCheckError(const char *Field);
+
+// Combined allocator errors.
+void NORETURN reportAlignmentTooBig(uptr Alignment, uptr MaxAlignment);
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+ uptr MaxSize);
+void NORETURN reportOutOfMemory(uptr RequestedSize);
+enum class AllocatorAction : u8 {
+ Recycling,
+ Deallocating,
+ Reallocating,
+ Sizing,
+};
+void NORETURN reportInvalidChunkState(AllocatorAction Action, void *Ptr);
+void NORETURN reportMisalignedPointer(AllocatorAction Action, void *Ptr);
+void NORETURN reportDeallocTypeMismatch(AllocatorAction Action, void *Ptr,
+ u8 TypeA, u8 TypeB);
+void NORETURN reportDeleteSizeMismatch(void *Ptr, uptr Size, uptr ExpectedSize);
+
+// C wrappers errors.
+void NORETURN reportAlignmentNotPowerOfTwo(uptr Alignment);
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
+void NORETURN reportCallocOverflow(uptr Count, uptr Size);
+void NORETURN reportPvallocOverflow(uptr Size);
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
+
+} // namespace scudo
+
+#endif // SCUDO_REPORT_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/secondary.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/secondary.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/secondary.cc (revision 351984)
@@ -0,0 +1,136 @@
+//===-- secondary.cc --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "secondary.h"
+
+#include "string_utils.h"
+
+namespace scudo {
+
+// As with the Primary, the size passed to this function includes any desired
+// alignment, so that the frontend can align the user allocation. The hint
+// parameter allows us to unmap spurious memory when dealing with larger
+// (greater than a page) alignments on 32-bit platforms.
+// Due to the sparsity of address space available on those platforms, requesting
+// an allocation from the Secondary with a large alignment would end up wasting
+// VA space (even though we are not committing the whole thing), hence the need
+// to trim off some of the reserved space.
+// For allocations requested with an alignment greater than or equal to a page,
+// the committed memory will amount to something close to Size - AlignmentHint
+// (pending rounding and headers).
+void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
+ DCHECK_GT(Size, AlignmentHint);
+ const uptr PageSize = getPageSizeCached();
+ const uptr MapSize =
+ roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize;
+ MapPlatformData Data = {};
+ uptr MapBase =
+ reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
+ MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+ if (!MapBase)
+ return nullptr;
+ uptr CommitBase = MapBase + PageSize;
+ uptr MapEnd = MapBase + MapSize;
+
+ // In the unlikely event of alignments larger than a page, adjust the amount
+ // of memory we want to commit, and trim the extra memory.
+ if (AlignmentHint >= PageSize) {
+ // For alignments greater than or equal to a page, the user pointer (eg: the
+ // pointer that is returned by the C or C++ allocation APIs) ends up on a
+ // page boundary , and our headers will live in the preceding page.
+ CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+ const uptr NewMapBase = CommitBase - PageSize;
+ DCHECK_GE(NewMapBase, MapBase);
+ // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+ // are less constrained memory wise, and that saves us two syscalls.
+ if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+ unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+ MapBase = NewMapBase;
+ }
+ const uptr NewMapEnd = CommitBase + PageSize +
+ roundUpTo((Size - AlignmentHint), PageSize) +
+ PageSize;
+ DCHECK_LE(NewMapEnd, MapEnd);
+ if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+ unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+ MapEnd = NewMapEnd;
+ }
+ }
+
+ const uptr CommitSize = MapEnd - PageSize - CommitBase;
+ const uptr Ptr =
+ reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
+ CommitSize, "scudo:secondary", 0, &Data));
+ LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+ H->MapBase = MapBase;
+ H->MapSize = MapEnd - MapBase;
+ H->BlockEnd = CommitBase + CommitSize;
+ H->Data = Data;
+ {
+ ScopedLock L(Mutex);
+ if (!Tail) {
+ Tail = H;
+ } else {
+ Tail->Next = H;
+ H->Prev = Tail;
+ Tail = H;
+ }
+ AllocatedBytes += CommitSize;
+ if (LargestSize < CommitSize)
+ LargestSize = CommitSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, CommitSize);
+ Stats.add(StatMapped, H->MapSize);
+ }
+ if (BlockEnd)
+ *BlockEnd = CommitBase + CommitSize;
+ return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+}
+
+void MapAllocator::deallocate(void *Ptr) {
+ LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
+ {
+ ScopedLock L(Mutex);
+ LargeBlock::Header *Prev = H->Prev;
+ LargeBlock::Header *Next = H->Next;
+ if (Prev) {
+ CHECK_EQ(Prev->Next, H);
+ Prev->Next = Next;
+ }
+ if (Next) {
+ CHECK_EQ(Next->Prev, H);
+ Next->Prev = Prev;
+ }
+ if (Tail == H) {
+ CHECK(!Next);
+ Tail = Prev;
+ } else {
+ CHECK(Next);
+ }
+ const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
+ FreedBytes += CommitSize;
+ NumberOfFrees++;
+ Stats.sub(StatAllocated, CommitSize);
+ Stats.sub(StatMapped, H->MapSize);
+ }
+ void *Addr = reinterpret_cast<void *>(H->MapBase);
+ const uptr Size = H->MapSize;
+ MapPlatformData Data;
+ Data = H->Data;
+ unmap(Addr, Size, UNMAP_ALL, &Data);
+}
+
+void MapAllocator::printStats() const {
+ Printf("Stats: MapAllocator: allocated %zd times (%zdK), freed %zd times "
+ "(%zdK), remains %zd (%zdK) max %zdM\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
+ NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
+ LargestSize >> 20);
+}
+
+} // namespace scudo
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/secondary.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/secondary.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/secondary.h (revision 351984)
@@ -0,0 +1,97 @@
+//===-- secondary.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SECONDARY_H_
+#define SCUDO_SECONDARY_H_
+
+#include "common.h"
+#include "mutex.h"
+#include "stats.h"
+
+namespace scudo {
+
+// This allocator wraps the platform allocation primitives, and as such is on
+// the slower side and should preferably be used for larger sized allocations.
+// Blocks allocated will be preceded and followed by a guard page, and hold
+// their own header that is not checksummed: the guard pages and the Combined
+// header should be enough for our purpose.
+
+namespace LargeBlock {
+
+struct Header {
+ LargeBlock::Header *Prev;
+ LargeBlock::Header *Next;
+ uptr BlockEnd;
+ uptr MapBase;
+ uptr MapSize;
+ MapPlatformData Data;
+};
+
+constexpr uptr getHeaderSize() {
+ return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+static Header *getHeader(uptr Ptr) {
+ return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+}
+
+static Header *getHeader(const void *Ptr) {
+ return getHeader(reinterpret_cast<uptr>(Ptr));
+}
+
+} // namespace LargeBlock
+
+class MapAllocator {
+public:
+ void initLinkerInitialized(GlobalStats *S) {
+ Stats.initLinkerInitialized();
+ if (S)
+ S->link(&Stats);
+ }
+ void init(GlobalStats *S) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(S);
+ }
+
+ void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr);
+
+ void deallocate(void *Ptr);
+
+ static uptr getBlockEnd(void *Ptr) {
+ return LargeBlock::getHeader(Ptr)->BlockEnd;
+ }
+
+ static uptr getBlockSize(void *Ptr) {
+ return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
+ }
+
+ void printStats() const;
+
+ void disable() { Mutex.lock(); }
+
+ void enable() { Mutex.unlock(); }
+
+ template <typename F> void iterateOverBlocks(F Callback) const {
+ for (LargeBlock::Header *H = Tail; H != nullptr; H = H->Prev)
+ Callback(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize());
+ }
+
+private:
+ HybridMutex Mutex;
+ LargeBlock::Header *Tail;
+ uptr AllocatedBytes;
+ uptr FreedBytes;
+ uptr LargestSize;
+ u32 NumberOfAllocs;
+ u32 NumberOfFrees;
+ LocalStats Stats;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_SECONDARY_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/size_class_map.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/size_class_map.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/size_class_map.h (revision 351984)
@@ -0,0 +1,149 @@
+//===-- size_class_map.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SIZE_CLASS_MAP_H_
+#define SCUDO_SIZE_CLASS_MAP_H_
+
+#include "common.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// SizeClassMap maps allocation sizes into size classes and back, in an
+// efficient table-free manner.
+//
+// Class 0 is a special class that doesn't abide by the same rules as other
+// classes. The allocator uses it to hold batches.
+//
+// The other sizes are controlled by the template parameters:
+// - MinSizeLog: defines the first class as 2^MinSizeLog bytes.
+// - MaxSizeLog: defines the last class as 2^MaxSizeLog bytes.
+// - MidSizeLog: classes increase with step 2^MinSizeLog from 2^MinSizeLog to
+// 2^MidSizeLog bytes.
+// - NumBits: the number of non-zero bits in sizes after 2^MidSizeLog.
+// eg. with NumBits==3 all size classes after 2^MidSizeLog look like
+// 0b1xx0..0 (where x is either 0 or 1).
+//
+// This class also gives a hint to a thread-caching allocator about the amount
+// of chunks that can be cached per-thread:
+// - MaxNumCachedHint is a hint for the max number of chunks cached per class.
+// - 2^MaxBytesCachedLog is the max number of bytes cached per class.
+
+template <u8 NumBits, u8 MinSizeLog, u8 MidSizeLog, u8 MaxSizeLog,
+ u32 MaxNumCachedHintT, u8 MaxBytesCachedLog>
+class SizeClassMap {
+ static const uptr MinSize = 1UL << MinSizeLog;
+ static const uptr MidSize = 1UL << MidSizeLog;
+ static const uptr MidClass = MidSize / MinSize;
+ static const u8 S = NumBits - 1;
+ static const uptr M = (1UL << S) - 1;
+
+public:
+ static const u32 MaxNumCachedHint = MaxNumCachedHintT;
+
+ static const uptr MaxSize = 1UL << MaxSizeLog;
+ static const uptr NumClasses =
+ MidClass + ((MaxSizeLog - MidSizeLog) << S) + 1;
+ COMPILER_CHECK(NumClasses <= 256);
+ static const uptr LargestClassId = NumClasses - 1;
+ static const uptr BatchClassId = 0;
+
+ static uptr getSizeByClassId(uptr ClassId) {
+ DCHECK_NE(ClassId, BatchClassId);
+ if (ClassId <= MidClass)
+ return ClassId << MinSizeLog;
+ ClassId -= MidClass;
+ const uptr T = MidSize << (ClassId >> S);
+ return T + (T >> S) * (ClassId & M);
+ }
+
+ static uptr getClassIdBySize(uptr Size) {
+ DCHECK_LE(Size, MaxSize);
+ if (Size <= MidSize)
+ return (Size + MinSize - 1) >> MinSizeLog;
+ const uptr L = getMostSignificantSetBitIndex(Size);
+ const uptr HBits = (Size >> (L - S)) & M;
+ const uptr LBits = Size & ((1UL << (L - S)) - 1);
+ const uptr L1 = L - MidSizeLog;
+ return MidClass + (L1 << S) + HBits + (LBits > 0);
+ }
+
+ static u32 getMaxCachedHint(uptr Size) {
+ DCHECK_LE(Size, MaxSize);
+ DCHECK_NE(Size, 0);
+ u32 N;
+ // Force a 32-bit division if the template parameters allow for it.
+ if (MaxBytesCachedLog > 31 || MaxSizeLog > 31)
+ N = static_cast<u32>((1UL << MaxBytesCachedLog) / Size);
+ else
+ N = (1U << MaxBytesCachedLog) / static_cast<u32>(Size);
+ return Max(1U, Min(MaxNumCachedHint, N));
+ }
+
+ static void print() {
+ uptr PrevS = 0;
+ uptr TotalCached = 0;
+ for (uptr I = 0; I < NumClasses; I++) {
+ if (I == BatchClassId)
+ continue;
+ const uptr S = getSizeByClassId(I);
+ if (S >= MidSize / 2 && (S & (S - 1)) == 0)
+ Printf("\n");
+ const uptr D = S - PrevS;
+ const uptr P = PrevS ? (D * 100 / PrevS) : 0;
+ const uptr L = S ? getMostSignificantSetBitIndex(S) : 0;
+ const uptr Cached = getMaxCachedHint(S) * S;
+ Printf(
+ "C%02zu => S: %zu diff: +%zu %02zu%% L %zu Cached: %zu %zu; id %zu\n",
+ I, getSizeByClassId(I), D, P, L, getMaxCachedHint(S), Cached,
+ getClassIdBySize(S));
+ TotalCached += Cached;
+ PrevS = S;
+ }
+ Printf("Total Cached: %zu\n", TotalCached);
+ }
+
+ static void validate() {
+ for (uptr C = 0; C < NumClasses; C++) {
+ if (C == BatchClassId)
+ continue;
+ const uptr S = getSizeByClassId(C);
+ CHECK_NE(S, 0U);
+ CHECK_EQ(getClassIdBySize(S), C);
+ if (C < LargestClassId)
+ CHECK_EQ(getClassIdBySize(S + 1), C + 1);
+ CHECK_EQ(getClassIdBySize(S - 1), C);
+ CHECK_GT(getSizeByClassId(C), getSizeByClassId(C - 1));
+ }
+ // Do not perform the loop if the maximum size is too large.
+ if (MaxSizeLog > 19)
+ return;
+ for (uptr S = 1; S <= MaxSize; S++) {
+ const uptr C = getClassIdBySize(S);
+ CHECK_LT(C, NumClasses);
+ CHECK_GE(getSizeByClassId(C), S);
+ if (C > 0)
+ CHECK_LT(getSizeByClassId(C - 1), S);
+ }
+ }
+};
+
+typedef SizeClassMap<3, 5, 8, 17, 8, 10> DefaultSizeClassMap;
+
+// TODO(kostyak): further tune class maps for Android & Fuchsia.
+#if SCUDO_WORDSIZE == 64U
+typedef SizeClassMap<3, 5, 8, 15, 8, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 5, 8, 16, 14, 12> AndroidSizeClassMap;
+#else
+typedef SizeClassMap<3, 4, 7, 15, 8, 10> SvelteSizeClassMap;
+typedef SizeClassMap<3, 4, 7, 16, 14, 12> AndroidSizeClassMap;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_SIZE_CLASS_MAP_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/stats.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/stats.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/stats.h (revision 351984)
@@ -0,0 +1,105 @@
+//===-- stats.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STATS_H_
+#define SCUDO_STATS_H_
+
+#include "atomic_helpers.h"
+#include "mutex.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// Memory allocator statistics
+enum StatType { StatAllocated, StatMapped, StatCount };
+
+typedef uptr StatCounters[StatCount];
+
+// Per-thread stats, live in per-thread cache. We use atomics so that the
+// numbers themselves are consistent. But we don't use atomic_{add|sub} or a
+// lock, because those are expensive operations , and we only care for the stats
+// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is
+// LocalStats::add'ing, this is OK, we will still get a meaningful number.
+class LocalStats {
+public:
+ void initLinkerInitialized() {}
+ void init() { memset(this, 0, sizeof(*this)); }
+
+ void add(StatType I, uptr V) {
+ V += atomic_load_relaxed(&StatsArray[I]);
+ atomic_store_relaxed(&StatsArray[I], V);
+ }
+
+ void sub(StatType I, uptr V) {
+ V = atomic_load_relaxed(&StatsArray[I]) - V;
+ atomic_store_relaxed(&StatsArray[I], V);
+ }
+
+ void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); }
+
+ uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); }
+
+private:
+ friend class GlobalStats;
+ atomic_uptr StatsArray[StatCount];
+ LocalStats *Next;
+ LocalStats *Prev;
+};
+
+// Global stats, used for aggregation and querying.
+class GlobalStats : public LocalStats {
+public:
+ void initLinkerInitialized() {
+ Next = this;
+ Prev = this;
+ }
+ void init() {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized();
+ }
+
+ void link(LocalStats *S) {
+ ScopedLock L(Mutex);
+ S->Next = Next;
+ S->Prev = this;
+ Next->Prev = S;
+ Next = S;
+ }
+
+ void unlink(LocalStats *S) {
+ ScopedLock L(Mutex);
+ S->Prev->Next = S->Next;
+ S->Next->Prev = S->Prev;
+ for (uptr I = 0; I < StatCount; I++)
+ add(static_cast<StatType>(I), S->get(static_cast<StatType>(I)));
+ }
+
+ void get(uptr *S) const {
+ memset(S, 0, StatCount * sizeof(uptr));
+ ScopedLock L(Mutex);
+ const LocalStats *Stats = this;
+ for (;;) {
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] += Stats->get(static_cast<StatType>(I));
+ Stats = Stats->Next;
+ if (Stats == this)
+ break;
+ }
+ // All stats must be non-negative.
+ for (uptr I = 0; I < StatCount; I++)
+ S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0;
+ }
+
+private:
+ mutable HybridMutex Mutex;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_STATS_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/string_utils.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/string_utils.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/string_utils.cc (revision 351984)
@@ -0,0 +1,236 @@
+//===-- string_utils.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "string_utils.h"
+#include "common.h"
+
+#include <ctype.h>
+#include <stdarg.h>
+#include <string.h>
+
+namespace scudo {
+
+static int appendChar(char **Buffer, const char *BufferEnd, char C) {
+ if (*Buffer < BufferEnd) {
+ **Buffer = C;
+ (*Buffer)++;
+ }
+ return 1;
+}
+
+// Appends number in a given Base to buffer. If its length is less than
+// |MinNumberLength|, it is padded with leading zeroes or spaces, depending
+// on the value of |PadWithZero|.
+static int appendNumber(char **Buffer, const char *BufferEnd, u64 AbsoluteValue,
+ u8 Base, u8 MinNumberLength, bool PadWithZero,
+ bool Negative, bool Upper) {
+ constexpr uptr MaxLen = 30;
+ RAW_CHECK(Base == 10 || Base == 16);
+ RAW_CHECK(Base == 10 || !Negative);
+ RAW_CHECK(AbsoluteValue || !Negative);
+ RAW_CHECK(MinNumberLength < MaxLen);
+ int Res = 0;
+ if (Negative && MinNumberLength)
+ --MinNumberLength;
+ if (Negative && PadWithZero)
+ Res += appendChar(Buffer, BufferEnd, '-');
+ uptr NumBuffer[MaxLen];
+ int Pos = 0;
+ do {
+ RAW_CHECK_MSG(static_cast<uptr>(Pos) < MaxLen,
+ "appendNumber buffer overflow");
+ NumBuffer[Pos++] = AbsoluteValue % Base;
+ AbsoluteValue /= Base;
+ } while (AbsoluteValue > 0);
+ if (Pos < MinNumberLength) {
+ memset(&NumBuffer[Pos], 0,
+ sizeof(NumBuffer[0]) * static_cast<uptr>(MinNumberLength - Pos));
+ Pos = MinNumberLength;
+ }
+ RAW_CHECK(Pos > 0);
+ Pos--;
+ for (; Pos >= 0 && NumBuffer[Pos] == 0; Pos--) {
+ char c = (PadWithZero || Pos == 0) ? '0' : ' ';
+ Res += appendChar(Buffer, BufferEnd, c);
+ }
+ if (Negative && !PadWithZero)
+ Res += appendChar(Buffer, BufferEnd, '-');
+ for (; Pos >= 0; Pos--) {
+ char Digit = static_cast<char>(NumBuffer[Pos]);
+ Digit = static_cast<char>((Digit < 10) ? '0' + Digit
+ : (Upper ? 'A' : 'a') + Digit - 10);
+ Res += appendChar(Buffer, BufferEnd, Digit);
+ }
+ return Res;
+}
+
+static int appendUnsigned(char **Buffer, const char *BufferEnd, u64 Num,
+ u8 Base, u8 MinNumberLength, bool PadWithZero,
+ bool Upper) {
+ return appendNumber(Buffer, BufferEnd, Num, Base, MinNumberLength,
+ PadWithZero, /*Negative=*/false, Upper);
+}
+
+static int appendSignedDecimal(char **Buffer, const char *BufferEnd, s64 Num,
+ u8 MinNumberLength, bool PadWithZero) {
+ const bool Negative = (Num < 0);
+ return appendNumber(Buffer, BufferEnd,
+ static_cast<u64>(Negative ? -Num : Num), 10,
+ MinNumberLength, PadWithZero, Negative,
+ /*Upper=*/false);
+}
+
+// Use the fact that explicitly requesting 0 Width (%0s) results in UB and
+// interpret Width == 0 as "no Width requested":
+// Width == 0 - no Width requested
+// Width < 0 - left-justify S within and pad it to -Width chars, if necessary
+// Width > 0 - right-justify S, not implemented yet
+static int appendString(char **Buffer, const char *BufferEnd, int Width,
+ int MaxChars, const char *S) {
+ if (!S)
+ S = "<null>";
+ int Res = 0;
+ for (; *S; S++) {
+ if (MaxChars >= 0 && Res >= MaxChars)
+ break;
+ Res += appendChar(Buffer, BufferEnd, *S);
+ }
+ // Only the left justified strings are supported.
+ while (Width < -Res)
+ Res += appendChar(Buffer, BufferEnd, ' ');
+ return Res;
+}
+
+static int appendPointer(char **Buffer, const char *BufferEnd, u64 ptr_value) {
+ int Res = 0;
+ Res += appendString(Buffer, BufferEnd, 0, -1, "0x");
+ Res += appendUnsigned(Buffer, BufferEnd, ptr_value, 16,
+ SCUDO_POINTER_FORMAT_LENGTH, /*PadWithZero=*/true,
+ /*Upper=*/false);
+ return Res;
+}
+
+int formatString(char *Buffer, uptr BufferLength, const char *Format,
+ va_list Args) {
+ UNUSED static const char *PrintfFormatsHelp =
+ "Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; "
+ "%[-]([0-9]*)?(\\.\\*)?s; %c\n";
+ RAW_CHECK(Format);
+ RAW_CHECK(BufferLength > 0);
+ const char *BufferEnd = &Buffer[BufferLength - 1];
+ const char *Cur = Format;
+ int Res = 0;
+ for (; *Cur; Cur++) {
+ if (*Cur != '%') {
+ Res += appendChar(&Buffer, BufferEnd, *Cur);
+ continue;
+ }
+ Cur++;
+ const bool LeftJustified = *Cur == '-';
+ if (LeftJustified)
+ Cur++;
+ bool HaveWidth = (*Cur >= '0' && *Cur <= '9');
+ const bool PadWithZero = (*Cur == '0');
+ u8 Width = 0;
+ if (HaveWidth) {
+ while (*Cur >= '0' && *Cur <= '9')
+ Width = static_cast<u8>(Width * 10 + *Cur++ - '0');
+ }
+ const bool HavePrecision = (Cur[0] == '.' && Cur[1] == '*');
+ int Precision = -1;
+ if (HavePrecision) {
+ Cur += 2;
+ Precision = va_arg(Args, int);
+ }
+ const bool HaveZ = (*Cur == 'z');
+ Cur += HaveZ;
+ const bool HaveLL = !HaveZ && (Cur[0] == 'l' && Cur[1] == 'l');
+ Cur += HaveLL * 2;
+ s64 DVal;
+ u64 UVal;
+ const bool HaveLength = HaveZ || HaveLL;
+ const bool HaveFlags = HaveWidth || HaveLength;
+ // At the moment only %s supports precision and left-justification.
+ CHECK(!((Precision >= 0 || LeftJustified) && *Cur != 's'));
+ switch (*Cur) {
+ case 'd': {
+ DVal = HaveLL ? va_arg(Args, s64)
+ : HaveZ ? va_arg(Args, sptr) : va_arg(Args, int);
+ Res += appendSignedDecimal(&Buffer, BufferEnd, DVal, Width, PadWithZero);
+ break;
+ }
+ case 'u':
+ case 'x':
+ case 'X': {
+ UVal = HaveLL ? va_arg(Args, u64)
+ : HaveZ ? va_arg(Args, uptr) : va_arg(Args, unsigned);
+ const bool Upper = (*Cur == 'X');
+ Res += appendUnsigned(&Buffer, BufferEnd, UVal, (*Cur == 'u') ? 10 : 16,
+ Width, PadWithZero, Upper);
+ break;
+ }
+ case 'p': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ Res += appendPointer(&Buffer, BufferEnd, va_arg(Args, uptr));
+ break;
+ }
+ case 's': {
+ RAW_CHECK_MSG(!HaveLength, PrintfFormatsHelp);
+ // Only left-justified Width is supported.
+ CHECK(!HaveWidth || LeftJustified);
+ Res += appendString(&Buffer, BufferEnd, LeftJustified ? -Width : Width,
+ Precision, va_arg(Args, char *));
+ break;
+ }
+ case 'c': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ Res +=
+ appendChar(&Buffer, BufferEnd, static_cast<char>(va_arg(Args, int)));
+ break;
+ }
+ case '%': {
+ RAW_CHECK_MSG(!HaveFlags, PrintfFormatsHelp);
+ Res += appendChar(&Buffer, BufferEnd, '%');
+ break;
+ }
+ default: {
+ RAW_CHECK_MSG(false, PrintfFormatsHelp);
+ }
+ }
+ }
+ RAW_CHECK(Buffer <= BufferEnd);
+ appendChar(&Buffer, BufferEnd + 1, '\0');
+ return Res;
+}
+
+void ScopedString::append(const char *Format, va_list Args) {
+ CHECK_LT(Length, String.size());
+ formatString(String.data() + Length, String.size() - Length, Format, Args);
+ Length += strlen(String.data() + Length);
+ CHECK_LT(Length, String.size());
+}
+
+FORMAT(2, 3)
+void ScopedString::append(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ append(Format, Args);
+ va_end(Args);
+}
+
+FORMAT(1, 2)
+void Printf(const char *Format, ...) {
+ va_list Args;
+ va_start(Args, Format);
+ ScopedString Msg(512);
+ Msg.append(Format, Args);
+ outputRaw(Msg.data());
+ va_end(Args);
+}
+
+} // namespace scudo
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/string_utils.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/string_utils.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/string_utils.h (revision 351984)
@@ -0,0 +1,42 @@
+//===-- string_utils.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_STRING_UTILS_H_
+#define SCUDO_STRING_UTILS_H_
+
+#include "internal_defs.h"
+#include "vector.h"
+
+#include <stdarg.h>
+
+namespace scudo {
+
+class ScopedString {
+public:
+ explicit ScopedString(uptr MaxLength) : String(MaxLength), Length(0) {
+ String[0] = '\0';
+ }
+ uptr length() { return Length; }
+ const char *data() { return String.data(); }
+ void clear() {
+ String[0] = '\0';
+ Length = 0;
+ }
+ void append(const char *Format, va_list Args);
+ void append(const char *Format, ...);
+
+private:
+ Vector<char> String;
+ uptr Length;
+};
+
+void Printf(const char *Format, ...);
+
+} // namespace scudo
+
+#endif // SCUDO_STRING_UTILS_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/tsd.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/tsd.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/tsd.h (revision 351984)
@@ -0,0 +1,66 @@
+//===-- tsd.h ---------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+#define SCUDO_TSD_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+#include "mutex.h"
+
+#include <limits.h> // for PTHREAD_DESTRUCTOR_ITERATIONS
+
+// With some build setups, this might still not be defined.
+#ifndef PTHREAD_DESTRUCTOR_ITERATIONS
+#define PTHREAD_DESTRUCTOR_ITERATIONS 4
+#endif
+
+namespace scudo {
+
+template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
+ typename Allocator::CacheT Cache;
+ typename Allocator::QuarantineCacheT QuarantineCache;
+ u8 DestructorIterations;
+
+ void initLinkerInitialized(Allocator *Instance) {
+ Instance->initCache(&Cache);
+ DestructorIterations = PTHREAD_DESTRUCTOR_ITERATIONS;
+ }
+ void init(Allocator *Instance) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Instance);
+ }
+
+ void commitBack(Allocator *Instance) { Instance->commitBack(this); }
+
+ INLINE bool tryLock() {
+ if (Mutex.tryLock()) {
+ atomic_store_relaxed(&Precedence, 0);
+ return true;
+ }
+ if (atomic_load_relaxed(&Precedence) == 0)
+ atomic_store_relaxed(
+ &Precedence,
+ static_cast<uptr>(getMonotonicTime() >> FIRST_32_SECOND_64(16, 0)));
+ return false;
+ }
+ INLINE void lock() {
+ atomic_store_relaxed(&Precedence, 0);
+ Mutex.lock();
+ }
+ INLINE void unlock() { Mutex.unlock(); }
+ INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+
+private:
+ HybridMutex Mutex;
+ atomic_uptr Precedence;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/tsd_exclusive.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/tsd_exclusive.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/tsd_exclusive.h (revision 351984)
@@ -0,0 +1,118 @@
+//===-- tsd_exclusive.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_EXCLUSIVE_H_
+#define SCUDO_TSD_EXCLUSIVE_H_
+
+#include "tsd.h"
+
+#include <pthread.h>
+
+namespace scudo {
+
+enum class ThreadState : u8 {
+ NotInitialized = 0,
+ Initialized,
+ TornDown,
+};
+
+template <class Allocator> void teardownThread(void *Ptr);
+
+template <class Allocator> struct TSDRegistryExT {
+ void initLinkerInitialized(Allocator *Instance) {
+ Instance->initLinkerInitialized();
+ CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
+ FallbackTSD = reinterpret_cast<TSD<Allocator> *>(
+ map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd"));
+ FallbackTSD->initLinkerInitialized(Instance);
+ Initialized = true;
+ }
+ void init(Allocator *Instance) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Instance);
+ }
+
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>));
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
+ if (LIKELY(State != ThreadState::NotInitialized))
+ return;
+ initThread(Instance, MinimalInit);
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ if (LIKELY(State == ThreadState::Initialized)) {
+ *UnlockRequired = false;
+ return &ThreadTSD;
+ }
+ DCHECK(FallbackTSD);
+ FallbackTSD->lock();
+ *UnlockRequired = true;
+ return FallbackTSD;
+ }
+
+private:
+ void initOnceMaybe(Allocator *Instance) {
+ ScopedLock L(Mutex);
+ if (Initialized)
+ return;
+ initLinkerInitialized(Instance); // Sets Initialized.
+ }
+
+ // Using minimal initialization allows for global initialization while keeping
+ // the thread specific structure untouched. The fallback structure will be
+ // used instead.
+ NOINLINE void initThread(Allocator *Instance, bool MinimalInit) {
+ initOnceMaybe(Instance);
+ if (MinimalInit)
+ return;
+ CHECK_EQ(
+ pthread_setspecific(PThreadKey, reinterpret_cast<void *>(Instance)), 0);
+ ThreadTSD.initLinkerInitialized(Instance);
+ State = ThreadState::Initialized;
+ }
+
+ pthread_key_t PThreadKey;
+ bool Initialized;
+ TSD<Allocator> *FallbackTSD;
+ HybridMutex Mutex;
+ static THREADLOCAL ThreadState State;
+ static THREADLOCAL TSD<Allocator> ThreadTSD;
+
+ friend void teardownThread<Allocator>(void *Ptr);
+};
+
+template <class Allocator>
+THREADLOCAL TSD<Allocator> TSDRegistryExT<Allocator>::ThreadTSD;
+template <class Allocator>
+THREADLOCAL ThreadState TSDRegistryExT<Allocator>::State;
+
+template <class Allocator> void teardownThread(void *Ptr) {
+ typedef TSDRegistryExT<Allocator> TSDRegistryT;
+ Allocator *Instance = reinterpret_cast<Allocator *>(Ptr);
+ // The glibc POSIX thread-local-storage deallocation routine calls user
+ // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
+ // We want to be called last since other destructors might call free and the
+ // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
+ // quarantine and swallowing the cache.
+ if (TSDRegistryT::ThreadTSD.DestructorIterations > 1) {
+ TSDRegistryT::ThreadTSD.DestructorIterations--;
+ // If pthread_setspecific fails, we will go ahead with the teardown.
+ if (LIKELY(pthread_setspecific(Instance->getTSDRegistry()->PThreadKey,
+ Ptr) == 0))
+ return;
+ }
+ TSDRegistryT::ThreadTSD.commitBack(Instance);
+ TSDRegistryT::State = ThreadState::TornDown;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_EXCLUSIVE_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/tsd_shared.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/tsd_shared.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/tsd_shared.h (revision 351984)
@@ -0,0 +1,169 @@
+//===-- tsd_shared.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_SHARED_H_
+#define SCUDO_TSD_SHARED_H_
+
+#include "linux.h" // for getAndroidTlsPtr()
+#include "tsd.h"
+
+#include <pthread.h>
+
+namespace scudo {
+
+template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
+ void initLinkerInitialized(Allocator *Instance) {
+ Instance->initLinkerInitialized();
+ CHECK_EQ(pthread_key_create(&PThreadKey, nullptr), 0); // For non-TLS
+ NumberOfTSDs = Min(Max(1U, getNumberOfCPUs()), MaxTSDCount);
+ TSDs = reinterpret_cast<TSD<Allocator> *>(
+ map(nullptr, sizeof(TSD<Allocator>) * NumberOfTSDs, "scudo:tsd"));
+ for (u32 I = 0; I < NumberOfTSDs; I++)
+ TSDs[I].initLinkerInitialized(Instance);
+ // Compute all the coprimes of NumberOfTSDs. This will be used to walk the
+ // array of TSDs in a random order. For details, see:
+ // https://lemire.me/blog/2017/09/18/visiting-all-values-in-an-array-exactly-once-in-random-order/
+ for (u32 I = 0; I < NumberOfTSDs; I++) {
+ u32 A = I + 1;
+ u32 B = NumberOfTSDs;
+ // Find the GCD between I + 1 and NumberOfTSDs. If 1, they are coprimes.
+ while (B != 0) {
+ const u32 T = A;
+ A = B;
+ B = T % B;
+ }
+ if (A == 1)
+ CoPrimes[NumberOfCoPrimes++] = I + 1;
+ }
+ Initialized = true;
+ }
+ void init(Allocator *Instance) {
+ memset(this, 0, sizeof(*this));
+ initLinkerInitialized(Instance);
+ }
+
+ void unmapTestOnly() {
+ unmap(reinterpret_cast<void *>(TSDs),
+ sizeof(TSD<Allocator>) * NumberOfTSDs);
+ }
+
+ ALWAYS_INLINE void initThreadMaybe(Allocator *Instance,
+ UNUSED bool MinimalInit) {
+ if (LIKELY(getCurrentTSD()))
+ return;
+ initThread(Instance);
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getTSDAndLock(bool *UnlockRequired) {
+ TSD<Allocator> *TSD = getCurrentTSD();
+ DCHECK(TSD);
+ *UnlockRequired = true;
+ // Try to lock the currently associated context.
+ if (TSD->tryLock())
+ return TSD;
+ // If that fails, go down the slow path.
+ return getTSDAndLockSlow(TSD);
+ }
+
+private:
+ ALWAYS_INLINE void setCurrentTSD(TSD<Allocator> *CurrentTSD) {
+#if SCUDO_ANDROID
+ *getAndroidTlsPtr() = reinterpret_cast<uptr>(CurrentTSD);
+#elif SCUDO_LINUX
+ ThreadTSD = CurrentTSD;
+#else
+ CHECK_EQ(
+ pthread_setspecific(PThreadKey, reinterpret_cast<void *>(CurrentTSD)),
+ 0);
+#endif
+ }
+
+ ALWAYS_INLINE TSD<Allocator> *getCurrentTSD() {
+#if SCUDO_ANDROID
+ return reinterpret_cast<TSD<Allocator> *>(*getAndroidTlsPtr());
+#elif SCUDO_LINUX
+ return ThreadTSD;
+#else
+ return reinterpret_cast<TSD<Allocator> *>(pthread_getspecific(PThreadKey));
+#endif
+ }
+
+ void initOnceMaybe(Allocator *Instance) {
+ ScopedLock L(Mutex);
+ if (Initialized)
+ return;
+ initLinkerInitialized(Instance); // Sets Initialized.
+ }
+
+ NOINLINE void initThread(Allocator *Instance) {
+ initOnceMaybe(Instance);
+ // Initial context assignment is done in a plain round-robin fashion.
+ const u32 Index = atomic_fetch_add(&CurrentIndex, 1U, memory_order_relaxed);
+ setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
+ }
+
+ NOINLINE TSD<Allocator> *getTSDAndLockSlow(TSD<Allocator> *CurrentTSD) {
+ if (MaxTSDCount > 1U && NumberOfTSDs > 1U) {
+ // Use the Precedence of the current TSD as our random seed. Since we are
+ // in the slow path, it means that tryLock failed, and as a result it's
+ // very likely that said Precedence is non-zero.
+ u32 RandState = static_cast<u32>(CurrentTSD->getPrecedence());
+ const u32 R = getRandomU32(&RandState);
+ const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
+ u32 Index = R % NumberOfTSDs;
+ uptr LowestPrecedence = UINTPTR_MAX;
+ TSD<Allocator> *CandidateTSD = nullptr;
+ // Go randomly through at most 4 contexts and find a candidate.
+ for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
+ if (TSDs[Index].tryLock()) {
+ setCurrentTSD(&TSDs[Index]);
+ return &TSDs[Index];
+ }
+ const uptr Precedence = TSDs[Index].getPrecedence();
+ // A 0 precedence here means another thread just locked this TSD.
+ if (Precedence && Precedence < LowestPrecedence) {
+ CandidateTSD = &TSDs[Index];
+ LowestPrecedence = Precedence;
+ }
+ Index += Inc;
+ if (Index >= NumberOfTSDs)
+ Index -= NumberOfTSDs;
+ }
+ if (CandidateTSD) {
+ CandidateTSD->lock();
+ setCurrentTSD(CandidateTSD);
+ return CandidateTSD;
+ }
+ }
+ // Last resort, stick with the current one.
+ CurrentTSD->lock();
+ return CurrentTSD;
+ }
+
+ pthread_key_t PThreadKey;
+ atomic_u32 CurrentIndex;
+ u32 NumberOfTSDs;
+ TSD<Allocator> *TSDs;
+ u32 NumberOfCoPrimes;
+ u32 CoPrimes[MaxTSDCount];
+ bool Initialized;
+ HybridMutex Mutex;
+#if SCUDO_LINUX && !SCUDO_ANDROID
+ static THREADLOCAL TSD<Allocator> *ThreadTSD;
+#endif
+};
+
+#if SCUDO_LINUX && !SCUDO_ANDROID
+template <class Allocator, u32 MaxTSDCount>
+THREADLOCAL TSD<Allocator>
+ *TSDRegistrySharedT<Allocator, MaxTSDCount>::ThreadTSD;
+#endif
+
+} // namespace scudo
+
+#endif // SCUDO_TSD_SHARED_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/vector.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/vector.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/vector.h (revision 351984)
@@ -0,0 +1,118 @@
+//===-- vector.h ------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_VECTOR_H_
+#define SCUDO_VECTOR_H_
+
+#include "common.h"
+
+#include <string.h>
+
+namespace scudo {
+
+// A low-level vector based on map. May incur a significant memory overhead for
+// small vectors. The current implementation supports only POD types.
+template <typename T> class VectorNoCtor {
+public:
+ void init(uptr InitialCapacity) {
+ CapacityBytes = 0;
+ Size = 0;
+ Data = nullptr;
+ reserve(InitialCapacity);
+ }
+ void destroy() {
+ if (Data)
+ unmap(Data, CapacityBytes);
+ }
+ T &operator[](uptr I) {
+ DCHECK_LT(I, Size);
+ return Data[I];
+ }
+ const T &operator[](uptr I) const {
+ DCHECK_LT(I, Size);
+ return Data[I];
+ }
+ void push_back(const T &Element) {
+ DCHECK_LE(Size, capacity());
+ if (Size == capacity()) {
+ const uptr NewCapacity = roundUpToPowerOfTwo(Size + 1);
+ reallocate(NewCapacity);
+ }
+ memcpy(&Data[Size++], &Element, sizeof(T));
+ }
+ T &back() {
+ DCHECK_GT(Size, 0);
+ return Data[Size - 1];
+ }
+ void pop_back() {
+ DCHECK_GT(Size, 0);
+ Size--;
+ }
+ uptr size() const { return Size; }
+ const T *data() const { return Data; }
+ T *data() { return Data; }
+ uptr capacity() const { return CapacityBytes / sizeof(T); }
+ void reserve(uptr NewSize) {
+ // Never downsize internal buffer.
+ if (NewSize > capacity())
+ reallocate(NewSize);
+ }
+ void resize(uptr NewSize) {
+ if (NewSize > Size) {
+ reserve(NewSize);
+ memset(&Data[Size], 0, sizeof(T) * (NewSize - Size));
+ }
+ Size = NewSize;
+ }
+
+ void clear() { Size = 0; }
+ bool empty() const { return size() == 0; }
+
+ const T *begin() const { return data(); }
+ T *begin() { return data(); }
+ const T *end() const { return data() + size(); }
+ T *end() { return data() + size(); }
+
+private:
+ void reallocate(uptr NewCapacity) {
+ DCHECK_GT(NewCapacity, 0);
+ DCHECK_LE(Size, NewCapacity);
+ const uptr NewCapacityBytes =
+ roundUpTo(NewCapacity * sizeof(T), getPageSizeCached());
+ T *NewData = (T *)map(nullptr, NewCapacityBytes, "scudo:vector");
+ if (Data) {
+ memcpy(NewData, Data, Size * sizeof(T));
+ unmap(Data, CapacityBytes);
+ }
+ Data = NewData;
+ CapacityBytes = NewCapacityBytes;
+ }
+
+ T *Data;
+ uptr CapacityBytes;
+ uptr Size;
+};
+
+template <typename T> class Vector : public VectorNoCtor<T> {
+public:
+ Vector() { VectorNoCtor<T>::init(1); }
+ explicit Vector(uptr Count) {
+ VectorNoCtor<T>::init(Count);
+ this->resize(Count);
+ }
+ ~Vector() { VectorNoCtor<T>::destroy(); }
+ // Disallow copies and moves.
+ Vector(const Vector &) = delete;
+ Vector &operator=(const Vector &) = delete;
+ Vector(Vector &&) = delete;
+ Vector &operator=(Vector &&) = delete;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_VECTOR_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c.cc (revision 351984)
@@ -0,0 +1,39 @@
+//===-- wrappers_c.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+static scudo::Allocator<scudo::Config> Allocator;
+// Pointer to the static allocator so that the C++ wrappers can access it.
+// Technically we could have a completely separated heap for C & C++ but in
+// reality the amount of cross pollination between the two is staggering.
+scudo::Allocator<scudo::Config> *AllocatorPtr = &Allocator;
+
+extern "C" {
+
+#define SCUDO_PREFIX(name) name
+#define SCUDO_ALLOCATOR Allocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
+
+} // extern "C"
+
+#endif // !SCUDO_ANDROID || !_BIONIC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c.h (revision 351984)
@@ -0,0 +1,52 @@
+//===-- wrappers_c.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_WRAPPERS_C_H_
+#define SCUDO_WRAPPERS_C_H_
+
+#include "platform.h"
+#include "stats.h"
+
+// Bionic's struct mallinfo consists of size_t (mallinfo(3) uses int).
+#if SCUDO_ANDROID
+typedef size_t __scudo_mallinfo_data_t;
+#else
+typedef int __scudo_mallinfo_data_t;
+#endif
+
+struct __scudo_mallinfo {
+ __scudo_mallinfo_data_t arena;
+ __scudo_mallinfo_data_t ordblks;
+ __scudo_mallinfo_data_t smblks;
+ __scudo_mallinfo_data_t hblks;
+ __scudo_mallinfo_data_t hblkhd;
+ __scudo_mallinfo_data_t usmblks;
+ __scudo_mallinfo_data_t fsmblks;
+ __scudo_mallinfo_data_t uordblks;
+ __scudo_mallinfo_data_t fordblks;
+ __scudo_mallinfo_data_t keepcost;
+};
+
+// Android sometimes includes malloc.h no matter what, which yields to
+// conflicting return types for mallinfo() if we use our own structure. So if
+// struct mallinfo is declared (#define courtesy of malloc.h), use it directly.
+#if STRUCT_MALLINFO_DECLARED
+#define SCUDO_MALLINFO mallinfo
+#else
+#define SCUDO_MALLINFO __scudo_mallinfo
+#endif
+
+#ifndef M_DECAY_TIME
+#define M_DECAY_TIME -100
+#endif
+
+#ifndef M_PURGE
+#define M_PURGE -101
+#endif
+
+#endif // SCUDO_WRAPPERS_C_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c.inc (revision 351984)
@@ -0,0 +1,176 @@
+//===-- wrappers_c.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PREFIX
+#error "Define SCUDO_PREFIX prior to including this file!"
+#endif
+
+// malloc-type functions have to be aligned to std::max_align_t. This is
+// distinct from (1U << SCUDO_MIN_ALIGNMENT_LOG), since C++ new-type functions
+// do not have to abide by the same requirement.
+#ifndef SCUDO_MALLOC_ALIGNMENT
+#define SCUDO_MALLOC_ALIGNMENT FIRST_32_SECOND_64(8U, 16U)
+#endif
+
+INTERFACE WEAK void *SCUDO_PREFIX(calloc)(size_t nmemb, size_t size) {
+ scudo::uptr Product;
+ if (UNLIKELY(scudo::checkForCallocOverflow(size, nmemb, &Product))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = ENOMEM;
+ return nullptr;
+ }
+ scudo::reportCallocOverflow(nmemb, size);
+ }
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ Product, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT, true));
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(free)(void *ptr) {
+ SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+}
+
+INTERFACE WEAK struct SCUDO_MALLINFO SCUDO_PREFIX(mallinfo)(void) {
+ struct SCUDO_MALLINFO Info = {};
+ scudo::StatCounters Stats;
+ SCUDO_ALLOCATOR.getStats(Stats);
+ Info.uordblks =
+ static_cast<__scudo_mallinfo_data_t>(Stats[scudo::StatAllocated]);
+ return Info;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(malloc)(size_t size) {
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+}
+
+#if SCUDO_ANDROID
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(const void *ptr) {
+#else
+INTERFACE WEAK size_t SCUDO_PREFIX(malloc_usable_size)(void *ptr) {
+#endif
+ return SCUDO_ALLOCATOR.getUsableSize(ptr);
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(memalign)(size_t alignment, size_t size) {
+ // Android rounds up the alignment to a power of two if it isn't one.
+ if (SCUDO_ANDROID) {
+ if (UNLIKELY(!alignment)) {
+ alignment = 1U;
+ } else {
+ if (UNLIKELY(!scudo::isPowerOfTwo(alignment)))
+ alignment = scudo::roundUpToPowerOfTwo(alignment);
+ }
+ } else {
+ if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = EINVAL;
+ return nullptr;
+ }
+ scudo::reportAlignmentNotPowerOfTwo(alignment);
+ }
+ }
+ return SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign,
+ alignment);
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(posix_memalign)(void **memptr, size_t alignment,
+ size_t size) {
+ if (UNLIKELY(scudo::checkPosixMemalignAlignment(alignment))) {
+ if (!SCUDO_ALLOCATOR.canReturnNull())
+ scudo::reportInvalidPosixMemalignAlignment(alignment);
+ return EINVAL;
+ }
+ void *Ptr =
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Memalign, alignment);
+ if (UNLIKELY(!Ptr))
+ return ENOMEM;
+ *memptr = Ptr;
+ return 0;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(pvalloc)(size_t size) {
+ const scudo::uptr PageSize = scudo::getPageSizeCached();
+ if (UNLIKELY(scudo::checkForPvallocOverflow(size, PageSize))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = ENOMEM;
+ return nullptr;
+ }
+ scudo::reportPvallocOverflow(size);
+ }
+ // pvalloc(0) should allocate one page.
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size ? scudo::roundUpTo(size, PageSize) : PageSize,
+ scudo::Chunk::Origin::Memalign, PageSize));
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) {
+ if (!ptr)
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size, scudo::Chunk::Origin::Malloc, SCUDO_MALLOC_ALIGNMENT));
+ if (size == 0) {
+ SCUDO_ALLOCATOR.deallocate(ptr, scudo::Chunk::Origin::Malloc);
+ return nullptr;
+ }
+ return scudo::setErrnoOnNull(
+ SCUDO_ALLOCATOR.reallocate(ptr, size, SCUDO_MALLOC_ALIGNMENT));
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(valloc)(size_t size) {
+ return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate(
+ size, scudo::Chunk::Origin::Memalign, scudo::getPageSizeCached()));
+}
+
+// Bionic wants a function named PREFIX_iterate and not PREFIX_malloc_iterate
+// which is somewhat inconsistent with the rest, workaround that.
+#if SCUDO_ANDROID && _BIONIC
+#define SCUDO_ITERATE iterate
+#else
+#define SCUDO_ITERATE malloc_iterate
+#endif
+
+INTERFACE WEAK int SCUDO_PREFIX(SCUDO_ITERATE)(
+ uintptr_t base, size_t size,
+ void (*callback)(uintptr_t base, size_t size, void *arg), void *arg) {
+ SCUDO_ALLOCATOR.iterateOverChunks(base, size, callback, arg);
+ return 0;
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_disable)() {
+ SCUDO_ALLOCATOR.disable();
+}
+
+INTERFACE WEAK void SCUDO_PREFIX(malloc_enable)() { SCUDO_ALLOCATOR.enable(); }
+
+INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
+ if (param == M_DECAY_TIME) {
+ // TODO(kostyak): set release_to_os_interval_ms accordingly.
+ return 1;
+ } else if (param == M_PURGE) {
+ SCUDO_ALLOCATOR.releaseToOS();
+ return 1;
+ }
+ return 0;
+}
+
+INTERFACE WEAK void *SCUDO_PREFIX(aligned_alloc)(size_t alignment,
+ size_t size) {
+ if (UNLIKELY(scudo::checkAlignedAllocAlignmentAndSize(alignment, size))) {
+ if (SCUDO_ALLOCATOR.canReturnNull()) {
+ errno = EINVAL;
+ return nullptr;
+ }
+ scudo::reportInvalidAlignedAllocAlignment(alignment, size);
+ }
+ return scudo::setErrnoOnNull(
+ SCUDO_ALLOCATOR.allocate(size, scudo::Chunk::Origin::Malloc, alignment));
+}
+
+INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(int, FILE *) {
+ errno = ENOTSUP;
+ return -1;
+}
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c_bionic.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c_bionic.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c_bionic.cc (revision 351984)
@@ -0,0 +1,49 @@
+//===-- wrappers_c_bionic.cc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// This is only used when compiled as part of Bionic.
+#if SCUDO_ANDROID && _BIONIC
+
+#include "allocator_config.h"
+#include "wrappers_c.h"
+#include "wrappers_c_checks.h"
+
+#include <stdint.h>
+#include <stdio.h>
+
+static scudo::Allocator<scudo::AndroidConfig> Allocator;
+static scudo::Allocator<scudo::AndroidSvelteConfig> SvelteAllocator;
+
+extern "C" {
+
+// Regular MallocDispatch definitions.
+#define SCUDO_PREFIX(name) CONCATENATE(scudo_, name)
+#define SCUDO_ALLOCATOR Allocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+// Svelte MallocDispatch definitions.
+#define SCUDO_PREFIX(name) CONCATENATE(scudo_svelte_, name)
+#define SCUDO_ALLOCATOR SvelteAllocator
+#include "wrappers_c.inc"
+#undef SCUDO_ALLOCATOR
+#undef SCUDO_PREFIX
+
+// The following is the only function that will end up initializing both
+// allocators, which will result in a slight increase in memory footprint.
+INTERFACE void __scudo_print_stats(void) {
+ Allocator.printStats();
+ SvelteAllocator.printStats();
+}
+
+} // extern "C"
+
+#endif // SCUDO_ANDROID && _BIONIC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c_checks.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c_checks.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_c_checks.h (revision 351984)
@@ -0,0 +1,67 @@
+//===-- wrappers_c_checks.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CHECKS_H_
+#define SCUDO_CHECKS_H_
+
+#include "common.h"
+
+#include <errno.h>
+
+#ifndef __has_builtin
+#define __has_builtin(X) 0
+#endif
+
+namespace scudo {
+
+// A common errno setting logic shared by almost all Scudo C wrappers.
+INLINE void *setErrnoOnNull(void *Ptr) {
+ if (UNLIKELY(!Ptr))
+ errno = ENOMEM;
+ return Ptr;
+}
+
+// Checks return true on failure.
+
+// Checks aligned_alloc() parameters, verifies that the alignment is a power of
+// two and that the size is a multiple of alignment.
+INLINE bool checkAlignedAllocAlignmentAndSize(uptr Alignment, uptr Size) {
+ return Alignment == 0 || !isPowerOfTwo(Alignment) ||
+ !isAligned(Size, Alignment);
+}
+
+// Checks posix_memalign() parameters, verifies that alignment is a power of two
+// and a multiple of sizeof(void *).
+INLINE bool checkPosixMemalignAlignment(uptr Alignment) {
+ return Alignment == 0 || !isPowerOfTwo(Alignment) ||
+ !isAligned(Alignment, sizeof(void *));
+}
+
+// Returns true if calloc(Size, N) overflows on Size*N calculation. Use a
+// builtin supported by recent clang & GCC if it exists, otherwise fallback to a
+// costly division.
+INLINE bool checkForCallocOverflow(uptr Size, uptr N, uptr *Product) {
+#if __has_builtin(__builtin_umull_overflow)
+ return __builtin_umull_overflow(Size, N, Product);
+#else
+ *Product = Size * N;
+ if (!Size)
+ return false;
+ return (*Product / Size) != N;
+#endif
+}
+
+// Returns true if the size passed to pvalloc overflows when rounded to the next
+// multiple of PageSize.
+INLINE bool checkForPvallocOverflow(uptr Size, uptr PageSize) {
+ return roundUpTo(Size, PageSize) < Size;
+}
+
+} // namespace scudo
+
+#endif // SCUDO_CHECKS_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_cpp.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_cpp.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/scudo/standalone/wrappers_cpp.cc (revision 351984)
@@ -0,0 +1,107 @@
+//===-- wrappers_cpp.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "platform.h"
+
+// Skip this compilation unit if compiled as part of Bionic.
+#if !SCUDO_ANDROID || !_BIONIC
+
+#include "allocator_config.h"
+
+#include <stdint.h>
+
+extern scudo::Allocator<scudo::Config> *AllocatorPtr;
+
+namespace std {
+struct nothrow_t {};
+enum class align_val_t : size_t {};
+} // namespace std
+
+INTERFACE WEAK void *operator new(size_t size) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void *operator new[](size_t size) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void *operator new(size_t size,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void *operator new[](size_t size,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align) {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new(size_t size, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::New,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void *operator new[](size_t size, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ return AllocatorPtr->allocate(size, scudo::Chunk::Origin::NewArray,
+ static_cast<scudo::uptr>(align));
+}
+
+INTERFACE WEAK void operator delete(void *ptr)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr, std::nothrow_t const &)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New);
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+ std::nothrow_t const &) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray);
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size);
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size);
+}
+INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr,
+ std::align_val_t align) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, std::align_val_t align,
+ std::nothrow_t const &)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, std::align_val_t align,
+ std::nothrow_t const &) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, 0,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete(void *ptr, size_t size,
+ std::align_val_t align)NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::New, size,
+ static_cast<scudo::uptr>(align));
+}
+INTERFACE WEAK void operator delete[](void *ptr, size_t size,
+ std::align_val_t align) NOEXCEPT {
+ AllocatorPtr->deallocate(ptr, scudo::Chunk::Origin::NewArray, size,
+ static_cast<scudo::uptr>(align));
+}
+
+#endif // !SCUDO_ANDROID || !_BIONIC
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats.cc (revision 351984)
@@ -0,0 +1,136 @@
+//===-- stats.cc ----------------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer statistics gathering. Manages statistics for a process and is
+// responsible for writing the report file.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#if SANITIZER_POSIX
+#include "sanitizer_common/sanitizer_posix.h"
+#endif
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "stats/stats.h"
+#if SANITIZER_POSIX
+#include <signal.h>
+#endif
+
+using namespace __sanitizer;
+
+namespace {
+
+InternalMmapVectorNoCtor<StatModule **> modules;
+StaticSpinMutex modules_mutex;
+
+fd_t stats_fd;
+
+void WriteLE(fd_t fd, uptr val) {
+ char chars[sizeof(uptr)];
+ for (unsigned i = 0; i != sizeof(uptr); ++i) {
+ chars[i] = val >> (i * 8);
+ }
+ WriteToFile(fd, chars, sizeof(uptr));
+}
+
+void OpenStatsFile(const char *path_env) {
+ InternalMmapVector<char> path(kMaxPathLength);
+ SubstituteForFlagValue(path_env, path.data(), kMaxPathLength);
+
+ error_t err;
+ stats_fd = OpenFile(path.data(), WrOnly, &err);
+ if (stats_fd == kInvalidFd) {
+ Report("stats: failed to open %s for writing (reason: %d)\n", path.data(),
+ err);
+ return;
+ }
+ char sizeof_uptr = sizeof(uptr);
+ WriteToFile(stats_fd, &sizeof_uptr, 1);
+}
+
+void WriteModuleReport(StatModule **smodp) {
+ CHECK(smodp);
+ const char *path_env = GetEnv("SANITIZER_STATS_PATH");
+ if (!path_env || stats_fd == kInvalidFd)
+ return;
+ if (!stats_fd)
+ OpenStatsFile(path_env);
+ const LoadedModule *mod = Symbolizer::GetOrInit()->FindModuleForAddress(
+ reinterpret_cast<uptr>(smodp));
+ WriteToFile(stats_fd, mod->full_name(),
+ internal_strlen(mod->full_name()) + 1);
+ for (StatModule *smod = *smodp; smod; smod = smod->next) {
+ for (u32 i = 0; i != smod->size; ++i) {
+ StatInfo *s = &smod->infos[i];
+ if (!s->addr)
+ continue;
+ WriteLE(stats_fd, s->addr - mod->base_address());
+ WriteLE(stats_fd, s->data);
+ }
+ }
+ WriteLE(stats_fd, 0);
+ WriteLE(stats_fd, 0);
+}
+
+} // namespace
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+unsigned __sanitizer_stats_register(StatModule **mod) {
+ SpinMutexLock l(&modules_mutex);
+ modules.push_back(mod);
+ return modules.size() - 1;
+}
+
+extern "C"
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_stats_unregister(unsigned index) {
+ SpinMutexLock l(&modules_mutex);
+ WriteModuleReport(modules[index]);
+ modules[index] = 0;
+}
+
+namespace {
+
+void WriteFullReport() {
+ SpinMutexLock l(&modules_mutex);
+ for (StatModule **mod : modules) {
+ if (!mod)
+ continue;
+ WriteModuleReport(mod);
+ }
+ if (stats_fd != 0 && stats_fd != kInvalidFd) {
+ CloseFile(stats_fd);
+ stats_fd = kInvalidFd;
+ }
+}
+
+#if SANITIZER_POSIX
+void USR2Handler(int sig) {
+ WriteFullReport();
+}
+#endif
+
+struct WriteReportOnExitOrSignal {
+ WriteReportOnExitOrSignal() {
+#if SANITIZER_POSIX
+ struct sigaction sigact;
+ internal_memset(&sigact, 0, sizeof(sigact));
+ sigact.sa_handler = USR2Handler;
+ internal_sigaction(SIGUSR2, &sigact, nullptr);
+#endif
+ }
+
+ ~WriteReportOnExitOrSignal() {
+ WriteFullReport();
+ }
+} wr;
+
+} // namespace
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats.h (revision 351984)
@@ -0,0 +1,42 @@
+//===-- stats.h -------------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Data definitions for sanitizer statistics gathering.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_STATS_STATS_H
+#define SANITIZER_STATS_STATS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+
+// Number of bits in data that are used for the sanitizer kind. Needs to match
+// llvm::kSanitizerStatKindBits in
+// llvm/include/llvm/Transforms/Utils/SanitizerStats.h
+enum { kKindBits = 3 };
+
+struct StatInfo {
+ uptr addr;
+ uptr data;
+};
+
+struct StatModule {
+ StatModule *next;
+ u32 size;
+ StatInfo infos[1];
+};
+
+inline uptr CountFromData(uptr data) {
+ return data & ((1ull << (sizeof(uptr) * 8 - kKindBits)) - 1);
+}
+
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats_client.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats_client.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats_client.cc (revision 351984)
@@ -0,0 +1,83 @@
+//===-- stats_client.cc ---------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Sanitizer statistics gathering. Manages statistics for a module (executable
+// or DSO) and registers statistics with the process.
+//
+// This is linked into each individual modle and cannot directly use functions
+// declared in sanitizer_common.
+//
+//===----------------------------------------------------------------------===//
+
+#ifdef _WIN32
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#else
+#include <dlfcn.h>
+#endif
+#include <stdint.h>
+#include <stdio.h>
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "stats/stats.h"
+
+using namespace __sanitizer;
+
+namespace {
+
+void *LookupSymbolFromMain(const char *name) {
+#ifdef _WIN32
+ return reinterpret_cast<void *>(GetProcAddress(GetModuleHandle(0), name));
+#else
+ return dlsym(RTLD_DEFAULT, name);
+#endif
+}
+
+StatModule *list;
+
+struct RegisterSanStats {
+ unsigned module_id;
+
+ RegisterSanStats() {
+ typedef unsigned (*reg_func_t)(StatModule **);
+ reg_func_t reg_func = reinterpret_cast<reg_func_t>(
+ LookupSymbolFromMain("__sanitizer_stats_register"));
+ if (reg_func)
+ module_id = reg_func(&list);
+ }
+
+ ~RegisterSanStats() {
+ typedef void (*unreg_func_t)(unsigned);
+ unreg_func_t unreg_func = reinterpret_cast<unreg_func_t>(
+ LookupSymbolFromMain("__sanitizer_stats_unregister"));
+ if (unreg_func)
+ unreg_func(module_id);
+ }
+} reg;
+
+}
+
+extern "C" void __sanitizer_stat_init(StatModule *mod) {
+ mod->next = list;
+ list = mod;
+}
+
+extern "C" void __sanitizer_stat_report(StatInfo *s) {
+ s->addr = GET_CALLER_PC();
+#if defined(_WIN64) && !defined(__clang__)
+ uptr old_data = InterlockedIncrement64(reinterpret_cast<LONG64 *>(&s->data));
+#elif defined(_WIN32) && !defined(__clang__)
+ uptr old_data = InterlockedIncrement(&s->data);
+#else
+ uptr old_data = __sync_fetch_and_add(&s->data, 1);
+#endif
+
+ // Overflow check.
+ if (CountFromData(old_data + 1) == 0)
+ Trap();
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/stats/stats_client.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_checks.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_checks.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_checks.inc (revision 351984)
@@ -0,0 +1,59 @@
+//===-- ubsan_checks.inc ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// List of checks handled by UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_CHECK
+# error "Define UBSAN_CHECK prior to including this file!"
+#endif
+
+// UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName)
+// SummaryKind and FSanitizeFlagName should be string literals.
+
+UBSAN_CHECK(GenericUB, "undefined-behavior", "undefined")
+UBSAN_CHECK(NullPointerUse, "null-pointer-use", "null")
+UBSAN_CHECK(PointerOverflow, "pointer-overflow", "pointer-overflow")
+UBSAN_CHECK(MisalignedPointerUse, "misaligned-pointer-use", "alignment")
+UBSAN_CHECK(AlignmentAssumption, "alignment-assumption", "alignment")
+UBSAN_CHECK(InsufficientObjectSize, "insufficient-object-size", "object-size")
+UBSAN_CHECK(SignedIntegerOverflow, "signed-integer-overflow",
+ "signed-integer-overflow")
+UBSAN_CHECK(UnsignedIntegerOverflow, "unsigned-integer-overflow",
+ "unsigned-integer-overflow")
+UBSAN_CHECK(IntegerDivideByZero, "integer-divide-by-zero",
+ "integer-divide-by-zero")
+UBSAN_CHECK(FloatDivideByZero, "float-divide-by-zero", "float-divide-by-zero")
+UBSAN_CHECK(InvalidBuiltin, "invalid-builtin-use", "invalid-builtin-use")
+UBSAN_CHECK(ImplicitUnsignedIntegerTruncation,
+ "implicit-unsigned-integer-truncation",
+ "implicit-unsigned-integer-truncation")
+UBSAN_CHECK(ImplicitSignedIntegerTruncation,
+ "implicit-signed-integer-truncation",
+ "implicit-signed-integer-truncation")
+UBSAN_CHECK(ImplicitIntegerSignChange,
+ "implicit-integer-sign-change",
+ "implicit-integer-sign-change")
+UBSAN_CHECK(ImplicitSignedIntegerTruncationOrSignChange,
+ "implicit-signed-integer-truncation-or-sign-change",
+ "implicit-signed-integer-truncation,implicit-integer-sign-change")
+UBSAN_CHECK(InvalidShiftBase, "invalid-shift-base", "shift-base")
+UBSAN_CHECK(InvalidShiftExponent, "invalid-shift-exponent", "shift-exponent")
+UBSAN_CHECK(OutOfBoundsIndex, "out-of-bounds-index", "bounds")
+UBSAN_CHECK(UnreachableCall, "unreachable-call", "unreachable")
+UBSAN_CHECK(MissingReturn, "missing-return", "return")
+UBSAN_CHECK(NonPositiveVLAIndex, "non-positive-vla-index", "vla-bound")
+UBSAN_CHECK(FloatCastOverflow, "float-cast-overflow", "float-cast-overflow")
+UBSAN_CHECK(InvalidBoolLoad, "invalid-bool-load", "bool")
+UBSAN_CHECK(InvalidEnumLoad, "invalid-enum-load", "enum")
+UBSAN_CHECK(FunctionTypeMismatch, "function-type-mismatch", "function")
+UBSAN_CHECK(InvalidNullReturn, "invalid-null-return",
+ "returns-nonnull-attribute")
+UBSAN_CHECK(InvalidNullArgument, "invalid-null-argument", "nonnull-attribute")
+UBSAN_CHECK(DynamicTypeMismatch, "dynamic-type-mismatch", "vptr")
+UBSAN_CHECK(CFIBadType, "cfi-bad-type", "cfi")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_checks.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag.cc (revision 351984)
@@ -0,0 +1,443 @@
+//===-- ubsan_diag.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Diagnostic reporting for the UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_diag.h"
+#include "ubsan_init.h"
+#include "ubsan_flags.h"
+#include "ubsan_monitor.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
+#include "sanitizer_common/sanitizer_report_decorator.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_stacktrace_printer.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+#include <stdio.h>
+
+using namespace __ubsan;
+
+// UBSan is combined with runtimes that already provide this functionality
+// (e.g., ASan) as well as runtimes that lack it (e.g., scudo). Tried to use
+// weak linkage to resolve this issue which is not portable and breaks on
+// Windows.
+// TODO(yln): This is a temporary workaround. GetStackTrace functions will be
+// removed in the future.
+void ubsan_GetStackTrace(BufferedStackTrace *stack, uptr max_depth,
+ uptr pc, uptr bp, void *context, bool fast) {
+ uptr top = 0;
+ uptr bottom = 0;
+ if (StackTrace::WillUseFastUnwind(fast)) {
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ stack->Unwind(max_depth, pc, bp, nullptr, top, bottom, true);
+ } else
+ stack->Unwind(max_depth, pc, bp, context, 0, 0, false);
+}
+
+static void MaybePrintStackTrace(uptr pc, uptr bp) {
+ // We assume that flags are already parsed, as UBSan runtime
+ // will definitely be called when we print the first diagnostics message.
+ if (!flags()->print_stacktrace)
+ return;
+
+ BufferedStackTrace stack;
+ ubsan_GetStackTrace(&stack, kStackTraceMax, pc, bp, nullptr,
+ common_flags()->fast_unwind_on_fatal);
+ stack.Print();
+}
+
+static const char *ConvertTypeToString(ErrorType Type) {
+ switch (Type) {
+#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) \
+ case ErrorType::Name: \
+ return SummaryKind;
+#include "ubsan_checks.inc"
+#undef UBSAN_CHECK
+ }
+ UNREACHABLE("unknown ErrorType!");
+}
+
+static const char *ConvertTypeToFlagName(ErrorType Type) {
+ switch (Type) {
+#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) \
+ case ErrorType::Name: \
+ return FSanitizeFlagName;
+#include "ubsan_checks.inc"
+#undef UBSAN_CHECK
+ }
+ UNREACHABLE("unknown ErrorType!");
+}
+
+static void MaybeReportErrorSummary(Location Loc, ErrorType Type) {
+ if (!common_flags()->print_summary)
+ return;
+ if (!flags()->report_error_type)
+ Type = ErrorType::GenericUB;
+ const char *ErrorKind = ConvertTypeToString(Type);
+ if (Loc.isSourceLocation()) {
+ SourceLocation SLoc = Loc.getSourceLocation();
+ if (!SLoc.isInvalid()) {
+ AddressInfo AI;
+ AI.file = internal_strdup(SLoc.getFilename());
+ AI.line = SLoc.getLine();
+ AI.column = SLoc.getColumn();
+ AI.function = internal_strdup(""); // Avoid printing ?? as function name.
+ ReportErrorSummary(ErrorKind, AI, GetSanititizerToolName());
+ AI.Clear();
+ return;
+ }
+ } else if (Loc.isSymbolizedStack()) {
+ const AddressInfo &AI = Loc.getSymbolizedStack()->info;
+ ReportErrorSummary(ErrorKind, AI, GetSanititizerToolName());
+ return;
+ }
+ ReportErrorSummary(ErrorKind, GetSanititizerToolName());
+}
+
+namespace {
+class Decorator : public SanitizerCommonDecorator {
+ public:
+ Decorator() : SanitizerCommonDecorator() {}
+ const char *Highlight() const { return Green(); }
+ const char *Note() const { return Black(); }
+};
+}
+
+SymbolizedStack *__ubsan::getSymbolizedLocation(uptr PC) {
+ InitAsStandaloneIfNecessary();
+ return Symbolizer::GetOrInit()->SymbolizePC(PC);
+}
+
+Diag &Diag::operator<<(const TypeDescriptor &V) {
+ return AddArg(V.getTypeName());
+}
+
+Diag &Diag::operator<<(const Value &V) {
+ if (V.getType().isSignedIntegerTy())
+ AddArg(V.getSIntValue());
+ else if (V.getType().isUnsignedIntegerTy())
+ AddArg(V.getUIntValue());
+ else if (V.getType().isFloatTy())
+ AddArg(V.getFloatValue());
+ else
+ AddArg("<unknown>");
+ return *this;
+}
+
+/// Hexadecimal printing for numbers too large for Printf to handle directly.
+static void RenderHex(InternalScopedString *Buffer, UIntMax Val) {
+#if HAVE_INT128_T
+ Buffer->append("0x%08x%08x%08x%08x", (unsigned int)(Val >> 96),
+ (unsigned int)(Val >> 64), (unsigned int)(Val >> 32),
+ (unsigned int)(Val));
+#else
+ UNREACHABLE("long long smaller than 64 bits?");
+#endif
+}
+
+static void RenderLocation(InternalScopedString *Buffer, Location Loc) {
+ switch (Loc.getKind()) {
+ case Location::LK_Source: {
+ SourceLocation SLoc = Loc.getSourceLocation();
+ if (SLoc.isInvalid())
+ Buffer->append("<unknown>");
+ else
+ RenderSourceLocation(Buffer, SLoc.getFilename(), SLoc.getLine(),
+ SLoc.getColumn(), common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ return;
+ }
+ case Location::LK_Memory:
+ Buffer->append("%p", Loc.getMemoryLocation());
+ return;
+ case Location::LK_Symbolized: {
+ const AddressInfo &Info = Loc.getSymbolizedStack()->info;
+ if (Info.file)
+ RenderSourceLocation(Buffer, Info.file, Info.line, Info.column,
+ common_flags()->symbolize_vs_style,
+ common_flags()->strip_path_prefix);
+ else if (Info.module)
+ RenderModuleLocation(Buffer, Info.module, Info.module_offset,
+ Info.module_arch, common_flags()->strip_path_prefix);
+ else
+ Buffer->append("%p", Info.address);
+ return;
+ }
+ case Location::LK_Null:
+ Buffer->append("<unknown>");
+ return;
+ }
+}
+
+static void RenderText(InternalScopedString *Buffer, const char *Message,
+ const Diag::Arg *Args) {
+ for (const char *Msg = Message; *Msg; ++Msg) {
+ if (*Msg != '%') {
+ Buffer->append("%c", *Msg);
+ continue;
+ }
+ const Diag::Arg &A = Args[*++Msg - '0'];
+ switch (A.Kind) {
+ case Diag::AK_String:
+ Buffer->append("%s", A.String);
+ break;
+ case Diag::AK_TypeName: {
+ if (SANITIZER_WINDOWS)
+ // The Windows implementation demangles names early.
+ Buffer->append("'%s'", A.String);
+ else
+ Buffer->append("'%s'", Symbolizer::GetOrInit()->Demangle(A.String));
+ break;
+ }
+ case Diag::AK_SInt:
+ // 'long long' is guaranteed to be at least 64 bits wide.
+ if (A.SInt >= INT64_MIN && A.SInt <= INT64_MAX)
+ Buffer->append("%lld", (long long)A.SInt);
+ else
+ RenderHex(Buffer, A.SInt);
+ break;
+ case Diag::AK_UInt:
+ if (A.UInt <= UINT64_MAX)
+ Buffer->append("%llu", (unsigned long long)A.UInt);
+ else
+ RenderHex(Buffer, A.UInt);
+ break;
+ case Diag::AK_Float: {
+ // FIXME: Support floating-point formatting in sanitizer_common's
+ // printf, and stop using snprintf here.
+ char FloatBuffer[32];
+#if SANITIZER_WINDOWS
+ sprintf_s(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float);
+#else
+ snprintf(FloatBuffer, sizeof(FloatBuffer), "%Lg", (long double)A.Float);
+#endif
+ Buffer->append("%s", FloatBuffer);
+ break;
+ }
+ case Diag::AK_Pointer:
+ Buffer->append("%p", A.Pointer);
+ break;
+ }
+ }
+}
+
+/// Find the earliest-starting range in Ranges which ends after Loc.
+static Range *upperBound(MemoryLocation Loc, Range *Ranges,
+ unsigned NumRanges) {
+ Range *Best = 0;
+ for (unsigned I = 0; I != NumRanges; ++I)
+ if (Ranges[I].getEnd().getMemoryLocation() > Loc &&
+ (!Best ||
+ Best->getStart().getMemoryLocation() >
+ Ranges[I].getStart().getMemoryLocation()))
+ Best = &Ranges[I];
+ return Best;
+}
+
+static inline uptr subtractNoOverflow(uptr LHS, uptr RHS) {
+ return (LHS < RHS) ? 0 : LHS - RHS;
+}
+
+static inline uptr addNoOverflow(uptr LHS, uptr RHS) {
+ const uptr Limit = (uptr)-1;
+ return (LHS > Limit - RHS) ? Limit : LHS + RHS;
+}
+
+/// Render a snippet of the address space near a location.
+static void PrintMemorySnippet(const Decorator &Decor, MemoryLocation Loc,
+ Range *Ranges, unsigned NumRanges,
+ const Diag::Arg *Args) {
+ // Show at least the 8 bytes surrounding Loc.
+ const unsigned MinBytesNearLoc = 4;
+ MemoryLocation Min = subtractNoOverflow(Loc, MinBytesNearLoc);
+ MemoryLocation Max = addNoOverflow(Loc, MinBytesNearLoc);
+ MemoryLocation OrigMin = Min;
+ for (unsigned I = 0; I < NumRanges; ++I) {
+ Min = __sanitizer::Min(Ranges[I].getStart().getMemoryLocation(), Min);
+ Max = __sanitizer::Max(Ranges[I].getEnd().getMemoryLocation(), Max);
+ }
+
+ // If we have too many interesting bytes, prefer to show bytes after Loc.
+ const unsigned BytesToShow = 32;
+ if (Max - Min > BytesToShow)
+ Min = __sanitizer::Min(Max - BytesToShow, OrigMin);
+ Max = addNoOverflow(Min, BytesToShow);
+
+ if (!IsAccessibleMemoryRange(Min, Max - Min)) {
+ Printf("<memory cannot be printed>\n");
+ return;
+ }
+
+ // Emit data.
+ InternalScopedString Buffer(1024);
+ for (uptr P = Min; P != Max; ++P) {
+ unsigned char C = *reinterpret_cast<const unsigned char*>(P);
+ Buffer.append("%s%02x", (P % 8 == 0) ? " " : " ", C);
+ }
+ Buffer.append("\n");
+
+ // Emit highlights.
+ Buffer.append(Decor.Highlight());
+ Range *InRange = upperBound(Min, Ranges, NumRanges);
+ for (uptr P = Min; P != Max; ++P) {
+ char Pad = ' ', Byte = ' ';
+ if (InRange && InRange->getEnd().getMemoryLocation() == P)
+ InRange = upperBound(P, Ranges, NumRanges);
+ if (!InRange && P > Loc)
+ break;
+ if (InRange && InRange->getStart().getMemoryLocation() < P)
+ Pad = '~';
+ if (InRange && InRange->getStart().getMemoryLocation() <= P)
+ Byte = '~';
+ if (P % 8 == 0)
+ Buffer.append("%c", Pad);
+ Buffer.append("%c", Pad);
+ Buffer.append("%c", P == Loc ? '^' : Byte);
+ Buffer.append("%c", Byte);
+ }
+ Buffer.append("%s\n", Decor.Default());
+
+ // Go over the line again, and print names for the ranges.
+ InRange = 0;
+ unsigned Spaces = 0;
+ for (uptr P = Min; P != Max; ++P) {
+ if (!InRange || InRange->getEnd().getMemoryLocation() == P)
+ InRange = upperBound(P, Ranges, NumRanges);
+ if (!InRange)
+ break;
+
+ Spaces += (P % 8) == 0 ? 2 : 1;
+
+ if (InRange && InRange->getStart().getMemoryLocation() == P) {
+ while (Spaces--)
+ Buffer.append(" ");
+ RenderText(&Buffer, InRange->getText(), Args);
+ Buffer.append("\n");
+ // FIXME: We only support naming one range for now!
+ break;
+ }
+
+ Spaces += 2;
+ }
+
+ Printf("%s", Buffer.data());
+ // FIXME: Print names for anything we can identify within the line:
+ //
+ // * If we can identify the memory itself as belonging to a particular
+ // global, stack variable, or dynamic allocation, then do so.
+ //
+ // * If we have a pointer-size, pointer-aligned range highlighted,
+ // determine whether the value of that range is a pointer to an
+ // entity which we can name, and if so, print that name.
+ //
+ // This needs an external symbolizer, or (preferably) ASan instrumentation.
+}
+
+Diag::~Diag() {
+ // All diagnostics should be printed under report mutex.
+ ScopedReport::CheckLocked();
+ Decorator Decor;
+ InternalScopedString Buffer(1024);
+
+ // Prepare a report that a monitor process can inspect.
+ if (Level == DL_Error) {
+ RenderText(&Buffer, Message, Args);
+ UndefinedBehaviorReport UBR{ConvertTypeToString(ET), Loc, Buffer};
+ Buffer.clear();
+ }
+
+ Buffer.append(Decor.Bold());
+ RenderLocation(&Buffer, Loc);
+ Buffer.append(":");
+
+ switch (Level) {
+ case DL_Error:
+ Buffer.append("%s runtime error: %s%s", Decor.Warning(), Decor.Default(),
+ Decor.Bold());
+ break;
+
+ case DL_Note:
+ Buffer.append("%s note: %s", Decor.Note(), Decor.Default());
+ break;
+ }
+
+ RenderText(&Buffer, Message, Args);
+
+ Buffer.append("%s\n", Decor.Default());
+ Printf("%s", Buffer.data());
+
+ if (Loc.isMemoryLocation())
+ PrintMemorySnippet(Decor, Loc.getMemoryLocation(), Ranges, NumRanges, Args);
+}
+
+ScopedReport::Initializer::Initializer() { InitAsStandaloneIfNecessary(); }
+
+ScopedReport::ScopedReport(ReportOptions Opts, Location SummaryLoc,
+ ErrorType Type)
+ : Opts(Opts), SummaryLoc(SummaryLoc), Type(Type) {}
+
+ScopedReport::~ScopedReport() {
+ MaybePrintStackTrace(Opts.pc, Opts.bp);
+ MaybeReportErrorSummary(SummaryLoc, Type);
+ if (flags()->halt_on_error)
+ Die();
+}
+
+ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
+static SuppressionContext *suppression_ctx = nullptr;
+static const char kVptrCheck[] = "vptr_check";
+static const char *kSuppressionTypes[] = {
+#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) FSanitizeFlagName,
+#include "ubsan_checks.inc"
+#undef UBSAN_CHECK
+ kVptrCheck,
+};
+
+void __ubsan::InitializeSuppressions() {
+ CHECK_EQ(nullptr, suppression_ctx);
+ suppression_ctx = new (suppression_placeholder) // NOLINT
+ SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
+ suppression_ctx->ParseFromFile(flags()->suppressions);
+}
+
+bool __ubsan::IsVptrCheckSuppressed(const char *TypeName) {
+ InitAsStandaloneIfNecessary();
+ CHECK(suppression_ctx);
+ Suppression *s;
+ return suppression_ctx->Match(TypeName, kVptrCheck, &s);
+}
+
+bool __ubsan::IsPCSuppressed(ErrorType ET, uptr PC, const char *Filename) {
+ InitAsStandaloneIfNecessary();
+ CHECK(suppression_ctx);
+ const char *SuppType = ConvertTypeToFlagName(ET);
+ // Fast path: don't symbolize PC if there is no suppressions for given UB
+ // type.
+ if (!suppression_ctx->HasSuppressionType(SuppType))
+ return false;
+ Suppression *s = nullptr;
+ // Suppress by file name known to runtime.
+ if (Filename != nullptr && suppression_ctx->Match(Filename, SuppType, &s))
+ return true;
+ // Suppress by module name.
+ if (const char *Module = Symbolizer::GetOrInit()->GetModuleNameForPc(PC)) {
+ if (suppression_ctx->Match(Module, SuppType, &s))
+ return true;
+ }
+ // Suppress by function or source file name from debug info.
+ SymbolizedStackHolder Stack(Symbolizer::GetOrInit()->SymbolizePC(PC));
+ const AddressInfo &AI = Stack.get()->info;
+ return suppression_ctx->Match(AI.function, SuppType, &s) ||
+ suppression_ctx->Match(AI.file, SuppType, &s);
+}
+
+#endif // CAN_SANITIZE_UB
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag.h (revision 351984)
@@ -0,0 +1,266 @@
+//===-- ubsan_diag.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Diagnostics emission for Clang's undefined behavior sanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_DIAG_H
+#define UBSAN_DIAG_H
+
+#include "ubsan_value.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+namespace __ubsan {
+
+class SymbolizedStackHolder {
+ SymbolizedStack *Stack;
+
+ void clear() {
+ if (Stack)
+ Stack->ClearAll();
+ }
+
+public:
+ explicit SymbolizedStackHolder(SymbolizedStack *Stack = nullptr)
+ : Stack(Stack) {}
+ ~SymbolizedStackHolder() { clear(); }
+ void reset(SymbolizedStack *S) {
+ if (Stack != S)
+ clear();
+ Stack = S;
+ }
+ const SymbolizedStack *get() const { return Stack; }
+};
+
+SymbolizedStack *getSymbolizedLocation(uptr PC);
+
+inline SymbolizedStack *getCallerLocation(uptr CallerPC) {
+ CHECK(CallerPC);
+ uptr PC = StackTrace::GetPreviousInstructionPc(CallerPC);
+ return getSymbolizedLocation(PC);
+}
+
+/// A location of some data within the program's address space.
+typedef uptr MemoryLocation;
+
+/// \brief Location at which a diagnostic can be emitted. Either a
+/// SourceLocation, a MemoryLocation, or a SymbolizedStack.
+class Location {
+public:
+ enum LocationKind { LK_Null, LK_Source, LK_Memory, LK_Symbolized };
+
+private:
+ LocationKind Kind;
+ // FIXME: In C++11, wrap these in an anonymous union.
+ SourceLocation SourceLoc;
+ MemoryLocation MemoryLoc;
+ const SymbolizedStack *SymbolizedLoc; // Not owned.
+
+public:
+ Location() : Kind(LK_Null) {}
+ Location(SourceLocation Loc) :
+ Kind(LK_Source), SourceLoc(Loc) {}
+ Location(MemoryLocation Loc) :
+ Kind(LK_Memory), MemoryLoc(Loc) {}
+ // SymbolizedStackHolder must outlive Location object.
+ Location(const SymbolizedStackHolder &Stack) :
+ Kind(LK_Symbolized), SymbolizedLoc(Stack.get()) {}
+
+ LocationKind getKind() const { return Kind; }
+
+ bool isSourceLocation() const { return Kind == LK_Source; }
+ bool isMemoryLocation() const { return Kind == LK_Memory; }
+ bool isSymbolizedStack() const { return Kind == LK_Symbolized; }
+
+ SourceLocation getSourceLocation() const {
+ CHECK(isSourceLocation());
+ return SourceLoc;
+ }
+ MemoryLocation getMemoryLocation() const {
+ CHECK(isMemoryLocation());
+ return MemoryLoc;
+ }
+ const SymbolizedStack *getSymbolizedStack() const {
+ CHECK(isSymbolizedStack());
+ return SymbolizedLoc;
+ }
+};
+
+/// A diagnostic severity level.
+enum DiagLevel {
+ DL_Error, ///< An error.
+ DL_Note ///< A note, attached to a prior diagnostic.
+};
+
+/// \brief Annotation for a range of locations in a diagnostic.
+class Range {
+ Location Start, End;
+ const char *Text;
+
+public:
+ Range() : Start(), End(), Text() {}
+ Range(MemoryLocation Start, MemoryLocation End, const char *Text)
+ : Start(Start), End(End), Text(Text) {}
+ Location getStart() const { return Start; }
+ Location getEnd() const { return End; }
+ const char *getText() const { return Text; }
+};
+
+/// \brief A C++ type name. Really just a strong typedef for 'const char*'.
+class TypeName {
+ const char *Name;
+public:
+ TypeName(const char *Name) : Name(Name) {}
+ const char *getName() const { return Name; }
+};
+
+enum class ErrorType {
+#define UBSAN_CHECK(Name, SummaryKind, FSanitizeFlagName) Name,
+#include "ubsan_checks.inc"
+#undef UBSAN_CHECK
+};
+
+/// \brief Representation of an in-flight diagnostic.
+///
+/// Temporary \c Diag instances are created by the handler routines to
+/// accumulate arguments for a diagnostic. The destructor emits the diagnostic
+/// message.
+class Diag {
+ /// The location at which the problem occurred.
+ Location Loc;
+
+ /// The diagnostic level.
+ DiagLevel Level;
+
+ /// The error type.
+ ErrorType ET;
+
+ /// The message which will be emitted, with %0, %1, ... placeholders for
+ /// arguments.
+ const char *Message;
+
+public:
+ /// Kinds of arguments, corresponding to members of \c Arg's union.
+ enum ArgKind {
+ AK_String, ///< A string argument, displayed as-is.
+ AK_TypeName,///< A C++ type name, possibly demangled before display.
+ AK_UInt, ///< An unsigned integer argument.
+ AK_SInt, ///< A signed integer argument.
+ AK_Float, ///< A floating-point argument.
+ AK_Pointer ///< A pointer argument, displayed in hexadecimal.
+ };
+
+ /// An individual diagnostic message argument.
+ struct Arg {
+ Arg() {}
+ Arg(const char *String) : Kind(AK_String), String(String) {}
+ Arg(TypeName TN) : Kind(AK_TypeName), String(TN.getName()) {}
+ Arg(UIntMax UInt) : Kind(AK_UInt), UInt(UInt) {}
+ Arg(SIntMax SInt) : Kind(AK_SInt), SInt(SInt) {}
+ Arg(FloatMax Float) : Kind(AK_Float), Float(Float) {}
+ Arg(const void *Pointer) : Kind(AK_Pointer), Pointer(Pointer) {}
+
+ ArgKind Kind;
+ union {
+ const char *String;
+ UIntMax UInt;
+ SIntMax SInt;
+ FloatMax Float;
+ const void *Pointer;
+ };
+ };
+
+private:
+ static const unsigned MaxArgs = 8;
+ static const unsigned MaxRanges = 1;
+
+ /// The arguments which have been added to this diagnostic so far.
+ Arg Args[MaxArgs];
+ unsigned NumArgs;
+
+ /// The ranges which have been added to this diagnostic so far.
+ Range Ranges[MaxRanges];
+ unsigned NumRanges;
+
+ Diag &AddArg(Arg A) {
+ CHECK(NumArgs != MaxArgs);
+ Args[NumArgs++] = A;
+ return *this;
+ }
+
+ Diag &AddRange(Range A) {
+ CHECK(NumRanges != MaxRanges);
+ Ranges[NumRanges++] = A;
+ return *this;
+ }
+
+ /// \c Diag objects are not copyable.
+ Diag(const Diag &); // NOT IMPLEMENTED
+ Diag &operator=(const Diag &);
+
+public:
+ Diag(Location Loc, DiagLevel Level, ErrorType ET, const char *Message)
+ : Loc(Loc), Level(Level), ET(ET), Message(Message), NumArgs(0),
+ NumRanges(0) {}
+ ~Diag();
+
+ Diag &operator<<(const char *Str) { return AddArg(Str); }
+ Diag &operator<<(TypeName TN) { return AddArg(TN); }
+ Diag &operator<<(unsigned long long V) { return AddArg(UIntMax(V)); }
+ Diag &operator<<(const void *V) { return AddArg(V); }
+ Diag &operator<<(const TypeDescriptor &V);
+ Diag &operator<<(const Value &V);
+ Diag &operator<<(const Range &R) { return AddRange(R); }
+};
+
+struct ReportOptions {
+ // If FromUnrecoverableHandler is specified, UBSan runtime handler is not
+ // expected to return.
+ bool FromUnrecoverableHandler;
+ /// pc/bp are used to unwind the stack trace.
+ uptr pc;
+ uptr bp;
+};
+
+bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET);
+
+#define GET_REPORT_OPTIONS(unrecoverable_handler) \
+ GET_CALLER_PC_BP; \
+ ReportOptions Opts = {unrecoverable_handler, pc, bp}
+
+/// \brief Instantiate this class before printing diagnostics in the error
+/// report. This class ensures that reports from different threads and from
+/// different sanitizers won't be mixed.
+class ScopedReport {
+ struct Initializer {
+ Initializer();
+ };
+ Initializer initializer_;
+ ScopedErrorReportLock report_lock_;
+
+ ReportOptions Opts;
+ Location SummaryLoc;
+ ErrorType Type;
+
+public:
+ ScopedReport(ReportOptions Opts, Location SummaryLoc, ErrorType Type);
+ ~ScopedReport();
+
+ static void CheckLocked() { ScopedErrorReportLock::CheckLocked(); }
+};
+
+void InitializeSuppressions();
+bool IsVptrCheckSuppressed(const char *TypeName);
+// Sometimes UBSan runtime can know filename from handlers arguments, even if
+// debug info is missing.
+bool IsPCSuppressed(ErrorType ET, uptr PC, const char *Filename);
+
+} // namespace __ubsan
+
+#endif // UBSAN_DIAG_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag_standalone.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag_standalone.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag_standalone.cc (revision 351984)
@@ -0,0 +1,40 @@
+//===-- ubsan_diag_standalone.cc ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Diagnostic reporting for the standalone UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_diag.h"
+
+using namespace __ubsan;
+
+void __sanitizer::BufferedStackTrace::UnwindImpl(
+ uptr pc, uptr bp, void *context, bool request_fast, u32 max_depth) {
+ uptr top = 0;
+ uptr bottom = 0;
+ if (StackTrace::WillUseFastUnwind(request_fast)) {
+ GetThreadStackTopAndBottom(false, &top, &bottom);
+ Unwind(max_depth, pc, bp, nullptr, top, bottom, true);
+ } else
+ Unwind(max_depth, pc, bp, context, 0, 0, false);
+}
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE
+void __sanitizer_print_stack_trace() {
+ GET_CURRENT_PC_BP;
+ BufferedStackTrace stack;
+ stack.Unwind(pc, bp, nullptr, common_flags()->fast_unwind_on_fatal);
+ stack.Print();
+}
+} // extern "C"
+
+#endif // CAN_SANITIZE_UB
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_diag_standalone.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.cc (revision 351984)
@@ -0,0 +1,85 @@
+//===-- ubsan_flags.cc ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Runtime flags for UndefinedBehaviorSanitizer.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+#include <stdlib.h>
+
+namespace __ubsan {
+
+const char *MaybeCallUbsanDefaultOptions() {
+ return (&__ubsan_default_options) ? __ubsan_default_options() : "";
+}
+
+static const char *GetFlag(const char *flag) {
+ // We cannot call getenv() from inside a preinit array initializer
+ if (SANITIZER_CAN_USE_PREINIT_ARRAY) {
+ return GetEnv(flag);
+ } else {
+ return getenv(flag);
+ }
+}
+
+Flags ubsan_flags;
+
+void Flags::SetDefaults() {
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
+}
+
+void RegisterUbsanFlags(FlagParser *parser, Flags *f) {
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(parser, #Name, Description, &f->Name);
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
+}
+
+void InitializeFlags() {
+ SetCommonFlagsDefaults();
+ {
+ CommonFlags cf;
+ cf.CopyFrom(*common_flags());
+ cf.print_summary = false;
+ cf.external_symbolizer_path = GetFlag("UBSAN_SYMBOLIZER_PATH");
+ OverrideCommonFlags(cf);
+ }
+
+ Flags *f = flags();
+ f->SetDefaults();
+
+ FlagParser parser;
+ RegisterCommonFlags(&parser);
+ RegisterUbsanFlags(&parser, f);
+
+ // Override from user-specified string.
+ parser.ParseString(MaybeCallUbsanDefaultOptions());
+ // Override from environment variable.
+ parser.ParseStringFromEnv("UBSAN_OPTIONS");
+ InitializeCommonFlags();
+ if (Verbosity()) ReportUnrecognizedFlags();
+
+ if (common_flags()->help) parser.PrintFlagDescriptions();
+}
+
+} // namespace __ubsan
+
+SANITIZER_INTERFACE_WEAK_DEF(const char *, __ubsan_default_options, void) {
+ return "";
+}
+
+#endif // CAN_SANITIZE_UB
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.h (revision 351984)
@@ -0,0 +1,48 @@
+//===-- ubsan_flags.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Runtime flags for UndefinedBehaviorSanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_FLAGS_H
+#define UBSAN_FLAGS_H
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __sanitizer {
+class FlagParser;
+}
+
+namespace __ubsan {
+
+struct Flags {
+#define UBSAN_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "ubsan_flags.inc"
+#undef UBSAN_FLAG
+
+ void SetDefaults();
+};
+
+extern Flags ubsan_flags;
+inline Flags *flags() { return &ubsan_flags; }
+
+void InitializeFlags();
+void RegisterUbsanFlags(FlagParser *parser, Flags *f);
+
+const char *MaybeCallUbsanDefaultOptions();
+
+} // namespace __ubsan
+
+extern "C" {
+// Users may provide their own implementation of __ubsan_default_options to
+// override the default flag values.
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char *__ubsan_default_options();
+} // extern "C"
+
+#endif // UBSAN_FLAGS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.inc (revision 351984)
@@ -0,0 +1,28 @@
+//===-- ubsan_flags.inc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// UBSan runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_FLAG
+# error "Define UBSAN_FLAG prior to including this file!"
+#endif
+
+// UBSAN_FLAG(Type, Name, DefaultValue, Description)
+// See COMMON_FLAG in sanitizer_flags.inc for more details.
+
+UBSAN_FLAG(bool, halt_on_error, false,
+ "Crash the program after printing the first error report")
+UBSAN_FLAG(bool, print_stacktrace, false,
+ "Include full stacktrace into an error report")
+UBSAN_FLAG(const char *, suppressions, "", "Suppressions file name.")
+UBSAN_FLAG(bool, report_error_type, false,
+ "Print specific error type instead of 'undefined-behavior' in summary.")
+UBSAN_FLAG(bool, silence_unsigned_overflow, false,
+ "Do not print non-fatal error reports for unsigned integer overflow. "
+ "Used to provide fuzzing signal without blowing up logs.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers.cc (revision 351984)
@@ -0,0 +1,824 @@
+//===-- ubsan_handlers.cc -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Error logging entry points for the UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_handlers.h"
+#include "ubsan_diag.h"
+#include "ubsan_flags.h"
+#include "ubsan_monitor.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+
+using namespace __sanitizer;
+using namespace __ubsan;
+
+namespace __ubsan {
+bool ignoreReport(SourceLocation SLoc, ReportOptions Opts, ErrorType ET) {
+ // We are not allowed to skip error report: if we are in unrecoverable
+ // handler, we have to terminate the program right now, and therefore
+ // have to print some diagnostic.
+ //
+ // Even if source location is disabled, it doesn't mean that we have
+ // already report an error to the user: some concurrently running
+ // thread could have acquired it, but not yet printed the report.
+ if (Opts.FromUnrecoverableHandler)
+ return false;
+ return SLoc.isDisabled() || IsPCSuppressed(ET, Opts.pc, SLoc.getFilename());
+}
+
+const char *TypeCheckKinds[] = {
+ "load of", "store to", "reference binding to", "member access within",
+ "member call on", "constructor call on", "downcast of", "downcast of",
+ "upcast of", "cast to virtual base of", "_Nonnull binding to",
+ "dynamic operation on"};
+}
+
+static void handleTypeMismatchImpl(TypeMismatchData *Data, ValueHandle Pointer,
+ ReportOptions Opts) {
+ Location Loc = Data->Loc.acquire();
+
+ uptr Alignment = (uptr)1 << Data->LogAlignment;
+ ErrorType ET;
+ if (!Pointer)
+ ET = ErrorType::NullPointerUse;
+ else if (Pointer & (Alignment - 1))
+ ET = ErrorType::MisalignedPointerUse;
+ else
+ ET = ErrorType::InsufficientObjectSize;
+
+ // Use the SourceLocation from Data to track deduplication, even if it's
+ // invalid.
+ if (ignoreReport(Loc.getSourceLocation(), Opts, ET))
+ return;
+
+ SymbolizedStackHolder FallbackLoc;
+ if (Data->Loc.isInvalid()) {
+ FallbackLoc.reset(getCallerLocation(Opts.pc));
+ Loc = FallbackLoc;
+ }
+
+ ScopedReport R(Opts, Loc, ET);
+
+ switch (ET) {
+ case ErrorType::NullPointerUse:
+ Diag(Loc, DL_Error, ET, "%0 null pointer of type %1")
+ << TypeCheckKinds[Data->TypeCheckKind] << Data->Type;
+ break;
+ case ErrorType::MisalignedPointerUse:
+ Diag(Loc, DL_Error, ET, "%0 misaligned address %1 for type %3, "
+ "which requires %2 byte alignment")
+ << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Alignment
+ << Data->Type;
+ break;
+ case ErrorType::InsufficientObjectSize:
+ Diag(Loc, DL_Error, ET, "%0 address %1 with insufficient space "
+ "for an object of type %2")
+ << TypeCheckKinds[Data->TypeCheckKind] << (void *)Pointer << Data->Type;
+ break;
+ default:
+ UNREACHABLE("unexpected error type!");
+ }
+
+ if (Pointer)
+ Diag(Pointer, DL_Note, ET, "pointer points here");
+}
+
+void __ubsan::__ubsan_handle_type_mismatch_v1(TypeMismatchData *Data,
+ ValueHandle Pointer) {
+ GET_REPORT_OPTIONS(false);
+ handleTypeMismatchImpl(Data, Pointer, Opts);
+}
+void __ubsan::__ubsan_handle_type_mismatch_v1_abort(TypeMismatchData *Data,
+ ValueHandle Pointer) {
+ GET_REPORT_OPTIONS(true);
+ handleTypeMismatchImpl(Data, Pointer, Opts);
+ Die();
+}
+
+static void handleAlignmentAssumptionImpl(AlignmentAssumptionData *Data,
+ ValueHandle Pointer,
+ ValueHandle Alignment,
+ ValueHandle Offset,
+ ReportOptions Opts) {
+ Location Loc = Data->Loc.acquire();
+ SourceLocation AssumptionLoc = Data->AssumptionLoc.acquire();
+
+ ErrorType ET = ErrorType::AlignmentAssumption;
+
+ if (ignoreReport(Loc.getSourceLocation(), Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ uptr RealPointer = Pointer - Offset;
+ uptr LSB = LeastSignificantSetBitIndex(RealPointer);
+ uptr ActualAlignment = uptr(1) << LSB;
+
+ uptr Mask = Alignment - 1;
+ uptr MisAlignmentOffset = RealPointer & Mask;
+
+ if (!Offset) {
+ Diag(Loc, DL_Error, ET,
+ "assumption of %0 byte alignment for pointer of type %1 failed")
+ << Alignment << Data->Type;
+ } else {
+ Diag(Loc, DL_Error, ET,
+ "assumption of %0 byte alignment (with offset of %1 byte) for pointer "
+ "of type %2 failed")
+ << Alignment << Offset << Data->Type;
+ }
+
+ if (!AssumptionLoc.isInvalid())
+ Diag(AssumptionLoc, DL_Note, ET, "alignment assumption was specified here");
+
+ Diag(RealPointer, DL_Note, ET,
+ "%0address is %1 aligned, misalignment offset is %2 bytes")
+ << (Offset ? "offset " : "") << ActualAlignment << MisAlignmentOffset;
+}
+
+void __ubsan::__ubsan_handle_alignment_assumption(AlignmentAssumptionData *Data,
+ ValueHandle Pointer,
+ ValueHandle Alignment,
+ ValueHandle Offset) {
+ GET_REPORT_OPTIONS(false);
+ handleAlignmentAssumptionImpl(Data, Pointer, Alignment, Offset, Opts);
+}
+void __ubsan::__ubsan_handle_alignment_assumption_abort(
+ AlignmentAssumptionData *Data, ValueHandle Pointer, ValueHandle Alignment,
+ ValueHandle Offset) {
+ GET_REPORT_OPTIONS(true);
+ handleAlignmentAssumptionImpl(Data, Pointer, Alignment, Offset, Opts);
+ Die();
+}
+
+/// \brief Common diagnostic emission for various forms of integer overflow.
+template <typename T>
+static void handleIntegerOverflowImpl(OverflowData *Data, ValueHandle LHS,
+ const char *Operator, T RHS,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ bool IsSigned = Data->Type.isSignedIntegerTy();
+ ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow
+ : ErrorType::UnsignedIntegerOverflow;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ // If this is an unsigned overflow in non-fatal mode, potentially ignore it.
+ if (!IsSigned && !Opts.FromUnrecoverableHandler &&
+ flags()->silence_unsigned_overflow)
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Diag(Loc, DL_Error, ET, "%0 integer overflow: "
+ "%1 %2 %3 cannot be represented in type %4")
+ << (IsSigned ? "signed" : "unsigned") << Value(Data->Type, LHS)
+ << Operator << RHS << Data->Type;
+}
+
+#define UBSAN_OVERFLOW_HANDLER(handler_name, op, unrecoverable) \
+ void __ubsan::handler_name(OverflowData *Data, ValueHandle LHS, \
+ ValueHandle RHS) { \
+ GET_REPORT_OPTIONS(unrecoverable); \
+ handleIntegerOverflowImpl(Data, LHS, op, Value(Data->Type, RHS), Opts); \
+ if (unrecoverable) \
+ Die(); \
+ }
+
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow, "+", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_add_overflow_abort, "+", true)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow, "-", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_sub_overflow_abort, "-", true)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow, "*", false)
+UBSAN_OVERFLOW_HANDLER(__ubsan_handle_mul_overflow_abort, "*", true)
+
+static void handleNegateOverflowImpl(OverflowData *Data, ValueHandle OldVal,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ bool IsSigned = Data->Type.isSignedIntegerTy();
+ ErrorType ET = IsSigned ? ErrorType::SignedIntegerOverflow
+ : ErrorType::UnsignedIntegerOverflow;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ if (!IsSigned && flags()->silence_unsigned_overflow)
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ if (IsSigned)
+ Diag(Loc, DL_Error, ET,
+ "negation of %0 cannot be represented in type %1; "
+ "cast to an unsigned type to negate this value to itself")
+ << Value(Data->Type, OldVal) << Data->Type;
+ else
+ Diag(Loc, DL_Error, ET, "negation of %0 cannot be represented in type %1")
+ << Value(Data->Type, OldVal) << Data->Type;
+}
+
+void __ubsan::__ubsan_handle_negate_overflow(OverflowData *Data,
+ ValueHandle OldVal) {
+ GET_REPORT_OPTIONS(false);
+ handleNegateOverflowImpl(Data, OldVal, Opts);
+}
+void __ubsan::__ubsan_handle_negate_overflow_abort(OverflowData *Data,
+ ValueHandle OldVal) {
+ GET_REPORT_OPTIONS(true);
+ handleNegateOverflowImpl(Data, OldVal, Opts);
+ Die();
+}
+
+static void handleDivremOverflowImpl(OverflowData *Data, ValueHandle LHS,
+ ValueHandle RHS, ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ Value LHSVal(Data->Type, LHS);
+ Value RHSVal(Data->Type, RHS);
+
+ ErrorType ET;
+ if (RHSVal.isMinusOne())
+ ET = ErrorType::SignedIntegerOverflow;
+ else if (Data->Type.isIntegerTy())
+ ET = ErrorType::IntegerDivideByZero;
+ else
+ ET = ErrorType::FloatDivideByZero;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ switch (ET) {
+ case ErrorType::SignedIntegerOverflow:
+ Diag(Loc, DL_Error, ET,
+ "division of %0 by -1 cannot be represented in type %1")
+ << LHSVal << Data->Type;
+ break;
+ default:
+ Diag(Loc, DL_Error, ET, "division by zero");
+ break;
+ }
+}
+
+void __ubsan::__ubsan_handle_divrem_overflow(OverflowData *Data,
+ ValueHandle LHS, ValueHandle RHS) {
+ GET_REPORT_OPTIONS(false);
+ handleDivremOverflowImpl(Data, LHS, RHS, Opts);
+}
+void __ubsan::__ubsan_handle_divrem_overflow_abort(OverflowData *Data,
+ ValueHandle LHS,
+ ValueHandle RHS) {
+ GET_REPORT_OPTIONS(true);
+ handleDivremOverflowImpl(Data, LHS, RHS, Opts);
+ Die();
+}
+
+static void handleShiftOutOfBoundsImpl(ShiftOutOfBoundsData *Data,
+ ValueHandle LHS, ValueHandle RHS,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ Value LHSVal(Data->LHSType, LHS);
+ Value RHSVal(Data->RHSType, RHS);
+
+ ErrorType ET;
+ if (RHSVal.isNegative() ||
+ RHSVal.getPositiveIntValue() >= Data->LHSType.getIntegerBitWidth())
+ ET = ErrorType::InvalidShiftExponent;
+ else
+ ET = ErrorType::InvalidShiftBase;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ if (ET == ErrorType::InvalidShiftExponent) {
+ if (RHSVal.isNegative())
+ Diag(Loc, DL_Error, ET, "shift exponent %0 is negative") << RHSVal;
+ else
+ Diag(Loc, DL_Error, ET,
+ "shift exponent %0 is too large for %1-bit type %2")
+ << RHSVal << Data->LHSType.getIntegerBitWidth() << Data->LHSType;
+ } else {
+ if (LHSVal.isNegative())
+ Diag(Loc, DL_Error, ET, "left shift of negative value %0") << LHSVal;
+ else
+ Diag(Loc, DL_Error, ET,
+ "left shift of %0 by %1 places cannot be represented in type %2")
+ << LHSVal << RHSVal << Data->LHSType;
+ }
+}
+
+void __ubsan::__ubsan_handle_shift_out_of_bounds(ShiftOutOfBoundsData *Data,
+ ValueHandle LHS,
+ ValueHandle RHS) {
+ GET_REPORT_OPTIONS(false);
+ handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts);
+}
+void __ubsan::__ubsan_handle_shift_out_of_bounds_abort(
+ ShiftOutOfBoundsData *Data,
+ ValueHandle LHS,
+ ValueHandle RHS) {
+ GET_REPORT_OPTIONS(true);
+ handleShiftOutOfBoundsImpl(Data, LHS, RHS, Opts);
+ Die();
+}
+
+static void handleOutOfBoundsImpl(OutOfBoundsData *Data, ValueHandle Index,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::OutOfBoundsIndex;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Value IndexVal(Data->IndexType, Index);
+ Diag(Loc, DL_Error, ET, "index %0 out of bounds for type %1")
+ << IndexVal << Data->ArrayType;
+}
+
+void __ubsan::__ubsan_handle_out_of_bounds(OutOfBoundsData *Data,
+ ValueHandle Index) {
+ GET_REPORT_OPTIONS(false);
+ handleOutOfBoundsImpl(Data, Index, Opts);
+}
+void __ubsan::__ubsan_handle_out_of_bounds_abort(OutOfBoundsData *Data,
+ ValueHandle Index) {
+ GET_REPORT_OPTIONS(true);
+ handleOutOfBoundsImpl(Data, Index, Opts);
+ Die();
+}
+
+static void handleBuiltinUnreachableImpl(UnreachableData *Data,
+ ReportOptions Opts) {
+ ErrorType ET = ErrorType::UnreachableCall;
+ ScopedReport R(Opts, Data->Loc, ET);
+ Diag(Data->Loc, DL_Error, ET,
+ "execution reached an unreachable program point");
+}
+
+void __ubsan::__ubsan_handle_builtin_unreachable(UnreachableData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleBuiltinUnreachableImpl(Data, Opts);
+ Die();
+}
+
+static void handleMissingReturnImpl(UnreachableData *Data, ReportOptions Opts) {
+ ErrorType ET = ErrorType::MissingReturn;
+ ScopedReport R(Opts, Data->Loc, ET);
+ Diag(Data->Loc, DL_Error, ET,
+ "execution reached the end of a value-returning function "
+ "without returning a value");
+}
+
+void __ubsan::__ubsan_handle_missing_return(UnreachableData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleMissingReturnImpl(Data, Opts);
+ Die();
+}
+
+static void handleVLABoundNotPositive(VLABoundData *Data, ValueHandle Bound,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::NonPositiveVLAIndex;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Diag(Loc, DL_Error, ET, "variable length array bound evaluates to "
+ "non-positive value %0")
+ << Value(Data->Type, Bound);
+}
+
+void __ubsan::__ubsan_handle_vla_bound_not_positive(VLABoundData *Data,
+ ValueHandle Bound) {
+ GET_REPORT_OPTIONS(false);
+ handleVLABoundNotPositive(Data, Bound, Opts);
+}
+void __ubsan::__ubsan_handle_vla_bound_not_positive_abort(VLABoundData *Data,
+ ValueHandle Bound) {
+ GET_REPORT_OPTIONS(true);
+ handleVLABoundNotPositive(Data, Bound, Opts);
+ Die();
+}
+
+static bool looksLikeFloatCastOverflowDataV1(void *Data) {
+ // First field is either a pointer to filename or a pointer to a
+ // TypeDescriptor.
+ u8 *FilenameOrTypeDescriptor;
+ internal_memcpy(&FilenameOrTypeDescriptor, Data,
+ sizeof(FilenameOrTypeDescriptor));
+
+ // Heuristic: For float_cast_overflow, the TypeKind will be either TK_Integer
+ // (0x0), TK_Float (0x1) or TK_Unknown (0xff). If both types are known,
+ // adding both bytes will be 0 or 1 (for BE or LE). If it were a filename,
+ // adding two printable characters will not yield such a value. Otherwise,
+ // if one of them is 0xff, this is most likely TK_Unknown type descriptor.
+ u16 MaybeFromTypeKind =
+ FilenameOrTypeDescriptor[0] + FilenameOrTypeDescriptor[1];
+ return MaybeFromTypeKind < 2 || FilenameOrTypeDescriptor[0] == 0xff ||
+ FilenameOrTypeDescriptor[1] == 0xff;
+}
+
+static void handleFloatCastOverflow(void *DataPtr, ValueHandle From,
+ ReportOptions Opts) {
+ SymbolizedStackHolder CallerLoc;
+ Location Loc;
+ const TypeDescriptor *FromType, *ToType;
+ ErrorType ET = ErrorType::FloatCastOverflow;
+
+ if (looksLikeFloatCastOverflowDataV1(DataPtr)) {
+ auto Data = reinterpret_cast<FloatCastOverflowData *>(DataPtr);
+ CallerLoc.reset(getCallerLocation(Opts.pc));
+ Loc = CallerLoc;
+ FromType = &Data->FromType;
+ ToType = &Data->ToType;
+ } else {
+ auto Data = reinterpret_cast<FloatCastOverflowDataV2 *>(DataPtr);
+ SourceLocation SLoc = Data->Loc.acquire();
+ if (ignoreReport(SLoc, Opts, ET))
+ return;
+ Loc = SLoc;
+ FromType = &Data->FromType;
+ ToType = &Data->ToType;
+ }
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Diag(Loc, DL_Error, ET,
+ "%0 is outside the range of representable values of type %2")
+ << Value(*FromType, From) << *FromType << *ToType;
+}
+
+void __ubsan::__ubsan_handle_float_cast_overflow(void *Data, ValueHandle From) {
+ GET_REPORT_OPTIONS(false);
+ handleFloatCastOverflow(Data, From, Opts);
+}
+void __ubsan::__ubsan_handle_float_cast_overflow_abort(void *Data,
+ ValueHandle From) {
+ GET_REPORT_OPTIONS(true);
+ handleFloatCastOverflow(Data, From, Opts);
+ Die();
+}
+
+static void handleLoadInvalidValue(InvalidValueData *Data, ValueHandle Val,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ // This check could be more precise if we used different handlers for
+ // -fsanitize=bool and -fsanitize=enum.
+ bool IsBool = (0 == internal_strcmp(Data->Type.getTypeName(), "'bool'")) ||
+ (0 == internal_strncmp(Data->Type.getTypeName(), "'BOOL'", 6));
+ ErrorType ET =
+ IsBool ? ErrorType::InvalidBoolLoad : ErrorType::InvalidEnumLoad;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Diag(Loc, DL_Error, ET,
+ "load of value %0, which is not a valid value for type %1")
+ << Value(Data->Type, Val) << Data->Type;
+}
+
+void __ubsan::__ubsan_handle_load_invalid_value(InvalidValueData *Data,
+ ValueHandle Val) {
+ GET_REPORT_OPTIONS(false);
+ handleLoadInvalidValue(Data, Val, Opts);
+}
+void __ubsan::__ubsan_handle_load_invalid_value_abort(InvalidValueData *Data,
+ ValueHandle Val) {
+ GET_REPORT_OPTIONS(true);
+ handleLoadInvalidValue(Data, Val, Opts);
+ Die();
+}
+
+static void handleImplicitConversion(ImplicitConversionData *Data,
+ ReportOptions Opts, ValueHandle Src,
+ ValueHandle Dst) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::GenericUB;
+
+ const TypeDescriptor &SrcTy = Data->FromType;
+ const TypeDescriptor &DstTy = Data->ToType;
+
+ bool SrcSigned = SrcTy.isSignedIntegerTy();
+ bool DstSigned = DstTy.isSignedIntegerTy();
+
+ switch (Data->Kind) {
+ case ICCK_IntegerTruncation: { // Legacy, no longer used.
+ // Let's figure out what it should be as per the new types, and upgrade.
+ // If both types are unsigned, then it's an unsigned truncation.
+ // Else, it is a signed truncation.
+ if (!SrcSigned && !DstSigned) {
+ ET = ErrorType::ImplicitUnsignedIntegerTruncation;
+ } else {
+ ET = ErrorType::ImplicitSignedIntegerTruncation;
+ }
+ break;
+ }
+ case ICCK_UnsignedIntegerTruncation:
+ ET = ErrorType::ImplicitUnsignedIntegerTruncation;
+ break;
+ case ICCK_SignedIntegerTruncation:
+ ET = ErrorType::ImplicitSignedIntegerTruncation;
+ break;
+ case ICCK_IntegerSignChange:
+ ET = ErrorType::ImplicitIntegerSignChange;
+ break;
+ case ICCK_SignedIntegerTruncationOrSignChange:
+ ET = ErrorType::ImplicitSignedIntegerTruncationOrSignChange;
+ break;
+ }
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ // FIXME: is it possible to dump the values as hex with fixed width?
+
+ Diag(Loc, DL_Error, ET,
+ "implicit conversion from type %0 of value %1 (%2-bit, %3signed) to "
+ "type %4 changed the value to %5 (%6-bit, %7signed)")
+ << SrcTy << Value(SrcTy, Src) << SrcTy.getIntegerBitWidth()
+ << (SrcSigned ? "" : "un") << DstTy << Value(DstTy, Dst)
+ << DstTy.getIntegerBitWidth() << (DstSigned ? "" : "un");
+}
+
+void __ubsan::__ubsan_handle_implicit_conversion(ImplicitConversionData *Data,
+ ValueHandle Src,
+ ValueHandle Dst) {
+ GET_REPORT_OPTIONS(false);
+ handleImplicitConversion(Data, Opts, Src, Dst);
+}
+void __ubsan::__ubsan_handle_implicit_conversion_abort(
+ ImplicitConversionData *Data, ValueHandle Src, ValueHandle Dst) {
+ GET_REPORT_OPTIONS(true);
+ handleImplicitConversion(Data, Opts, Src, Dst);
+ Die();
+}
+
+static void handleInvalidBuiltin(InvalidBuiltinData *Data, ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::InvalidBuiltin;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Diag(Loc, DL_Error, ET,
+ "passing zero to %0, which is not a valid argument")
+ << ((Data->Kind == BCK_CTZPassedZero) ? "ctz()" : "clz()");
+}
+
+void __ubsan::__ubsan_handle_invalid_builtin(InvalidBuiltinData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleInvalidBuiltin(Data, Opts);
+}
+void __ubsan::__ubsan_handle_invalid_builtin_abort(InvalidBuiltinData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleInvalidBuiltin(Data, Opts);
+ Die();
+}
+
+static void handleNonNullReturn(NonNullReturnData *Data, SourceLocation *LocPtr,
+ ReportOptions Opts, bool IsAttr) {
+ if (!LocPtr)
+ UNREACHABLE("source location pointer is null!");
+
+ SourceLocation Loc = LocPtr->acquire();
+ ErrorType ET = ErrorType::InvalidNullReturn;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Diag(Loc, DL_Error, ET,
+ "null pointer returned from function declared to never return null");
+ if (!Data->AttrLoc.isInvalid())
+ Diag(Data->AttrLoc, DL_Note, ET, "%0 specified here")
+ << (IsAttr ? "returns_nonnull attribute"
+ : "_Nonnull return type annotation");
+}
+
+void __ubsan::__ubsan_handle_nonnull_return_v1(NonNullReturnData *Data,
+ SourceLocation *LocPtr) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullReturn(Data, LocPtr, Opts, true);
+}
+
+void __ubsan::__ubsan_handle_nonnull_return_v1_abort(NonNullReturnData *Data,
+ SourceLocation *LocPtr) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullReturn(Data, LocPtr, Opts, true);
+ Die();
+}
+
+void __ubsan::__ubsan_handle_nullability_return_v1(NonNullReturnData *Data,
+ SourceLocation *LocPtr) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullReturn(Data, LocPtr, Opts, false);
+}
+
+void __ubsan::__ubsan_handle_nullability_return_v1_abort(
+ NonNullReturnData *Data, SourceLocation *LocPtr) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullReturn(Data, LocPtr, Opts, false);
+ Die();
+}
+
+static void handleNonNullArg(NonNullArgData *Data, ReportOptions Opts,
+ bool IsAttr) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::InvalidNullArgument;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Diag(Loc, DL_Error, ET,
+ "null pointer passed as argument %0, which is declared to "
+ "never be null")
+ << Data->ArgIndex;
+ if (!Data->AttrLoc.isInvalid())
+ Diag(Data->AttrLoc, DL_Note, ET, "%0 specified here")
+ << (IsAttr ? "nonnull attribute" : "_Nonnull type annotation");
+}
+
+void __ubsan::__ubsan_handle_nonnull_arg(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullArg(Data, Opts, true);
+}
+
+void __ubsan::__ubsan_handle_nonnull_arg_abort(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullArg(Data, Opts, true);
+ Die();
+}
+
+void __ubsan::__ubsan_handle_nullability_arg(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(false);
+ handleNonNullArg(Data, Opts, false);
+}
+
+void __ubsan::__ubsan_handle_nullability_arg_abort(NonNullArgData *Data) {
+ GET_REPORT_OPTIONS(true);
+ handleNonNullArg(Data, Opts, false);
+ Die();
+}
+
+static void handlePointerOverflowImpl(PointerOverflowData *Data,
+ ValueHandle Base,
+ ValueHandle Result,
+ ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::PointerOverflow;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ if ((sptr(Base) >= 0) == (sptr(Result) >= 0)) {
+ if (Base > Result)
+ Diag(Loc, DL_Error, ET,
+ "addition of unsigned offset to %0 overflowed to %1")
+ << (void *)Base << (void *)Result;
+ else
+ Diag(Loc, DL_Error, ET,
+ "subtraction of unsigned offset from %0 overflowed to %1")
+ << (void *)Base << (void *)Result;
+ } else {
+ Diag(Loc, DL_Error, ET,
+ "pointer index expression with base %0 overflowed to %1")
+ << (void *)Base << (void *)Result;
+ }
+}
+
+void __ubsan::__ubsan_handle_pointer_overflow(PointerOverflowData *Data,
+ ValueHandle Base,
+ ValueHandle Result) {
+ GET_REPORT_OPTIONS(false);
+ handlePointerOverflowImpl(Data, Base, Result, Opts);
+}
+
+void __ubsan::__ubsan_handle_pointer_overflow_abort(PointerOverflowData *Data,
+ ValueHandle Base,
+ ValueHandle Result) {
+ GET_REPORT_OPTIONS(true);
+ handlePointerOverflowImpl(Data, Base, Result, Opts);
+ Die();
+}
+
+static void handleCFIBadIcall(CFICheckFailData *Data, ValueHandle Function,
+ ReportOptions Opts) {
+ if (Data->CheckKind != CFITCK_ICall && Data->CheckKind != CFITCK_NVMFCall)
+ Die();
+
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::CFIBadType;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ const char *CheckKindStr = Data->CheckKind == CFITCK_NVMFCall
+ ? "non-virtual pointer to member function call"
+ : "indirect function call";
+ Diag(Loc, DL_Error, ET,
+ "control flow integrity check for type %0 failed during %1")
+ << Data->Type << CheckKindStr;
+
+ SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
+ const char *FName = FLoc.get()->info.function;
+ if (!FName)
+ FName = "(unknown)";
+ Diag(FLoc, DL_Note, ET, "%0 defined here") << FName;
+
+ // If the failure involved different DSOs for the check location and icall
+ // target, report the DSO names.
+ const char *DstModule = FLoc.get()->info.module;
+ if (!DstModule)
+ DstModule = "(unknown)";
+
+ const char *SrcModule = Symbolizer::GetOrInit()->GetModuleNameForPc(Opts.pc);
+ if (!SrcModule)
+ SrcModule = "(unknown)";
+
+ if (internal_strcmp(SrcModule, DstModule))
+ Diag(Loc, DL_Note, ET,
+ "check failed in %0, destination function located in %1")
+ << SrcModule << DstModule;
+}
+
+namespace __ubsan {
+
+#ifdef UBSAN_CAN_USE_CXXABI
+
+#ifdef _WIN32
+
+extern "C" void __ubsan_handle_cfi_bad_type_default(CFICheckFailData *Data,
+ ValueHandle Vtable,
+ bool ValidVtable,
+ ReportOptions Opts) {
+ Die();
+}
+
+WIN_WEAK_ALIAS(__ubsan_handle_cfi_bad_type, __ubsan_handle_cfi_bad_type_default)
+#else
+SANITIZER_WEAK_ATTRIBUTE
+#endif
+void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
+ bool ValidVtable, ReportOptions Opts);
+
+#else
+void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
+ bool ValidVtable, ReportOptions Opts) {
+ Die();
+}
+#endif
+
+} // namespace __ubsan
+
+void __ubsan::__ubsan_handle_cfi_check_fail(CFICheckFailData *Data,
+ ValueHandle Value,
+ uptr ValidVtable) {
+ GET_REPORT_OPTIONS(false);
+ if (Data->CheckKind == CFITCK_ICall || Data->CheckKind == CFITCK_NVMFCall)
+ handleCFIBadIcall(Data, Value, Opts);
+ else
+ __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts);
+}
+
+void __ubsan::__ubsan_handle_cfi_check_fail_abort(CFICheckFailData *Data,
+ ValueHandle Value,
+ uptr ValidVtable) {
+ GET_REPORT_OPTIONS(true);
+ if (Data->CheckKind == CFITCK_ICall || Data->CheckKind == CFITCK_NVMFCall)
+ handleCFIBadIcall(Data, Value, Opts);
+ else
+ __ubsan_handle_cfi_bad_type(Data, Value, ValidVtable, Opts);
+ Die();
+}
+
+#endif // CAN_SANITIZE_UB
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers.h (revision 351984)
@@ -0,0 +1,228 @@
+//===-- ubsan_handlers.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Entry points to the runtime library for Clang's undefined behavior sanitizer.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_HANDLERS_H
+#define UBSAN_HANDLERS_H
+
+#include "ubsan_value.h"
+
+namespace __ubsan {
+
+struct TypeMismatchData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+ unsigned char LogAlignment;
+ unsigned char TypeCheckKind;
+};
+
+#define UNRECOVERABLE(checkname, ...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \
+ void __ubsan_handle_ ## checkname( __VA_ARGS__ );
+
+#define RECOVERABLE(checkname, ...) \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE \
+ void __ubsan_handle_ ## checkname( __VA_ARGS__ ); \
+ extern "C" SANITIZER_INTERFACE_ATTRIBUTE NORETURN \
+ void __ubsan_handle_ ## checkname ## _abort( __VA_ARGS__ );
+
+/// \brief Handle a runtime type check failure, caused by either a misaligned
+/// pointer, a null pointer, or a pointer to insufficient storage for the
+/// type.
+RECOVERABLE(type_mismatch_v1, TypeMismatchData *Data, ValueHandle Pointer)
+
+struct AlignmentAssumptionData {
+ SourceLocation Loc;
+ SourceLocation AssumptionLoc;
+ const TypeDescriptor &Type;
+};
+
+/// \brief Handle a runtime alignment assumption check failure,
+/// caused by a misaligned pointer.
+RECOVERABLE(alignment_assumption, AlignmentAssumptionData *Data,
+ ValueHandle Pointer, ValueHandle Alignment, ValueHandle Offset)
+
+struct OverflowData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+};
+
+/// \brief Handle an integer addition overflow.
+RECOVERABLE(add_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS)
+
+/// \brief Handle an integer subtraction overflow.
+RECOVERABLE(sub_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS)
+
+/// \brief Handle an integer multiplication overflow.
+RECOVERABLE(mul_overflow, OverflowData *Data, ValueHandle LHS, ValueHandle RHS)
+
+/// \brief Handle a signed integer overflow for a unary negate operator.
+RECOVERABLE(negate_overflow, OverflowData *Data, ValueHandle OldVal)
+
+/// \brief Handle an INT_MIN/-1 overflow or division by zero.
+RECOVERABLE(divrem_overflow, OverflowData *Data,
+ ValueHandle LHS, ValueHandle RHS)
+
+struct ShiftOutOfBoundsData {
+ SourceLocation Loc;
+ const TypeDescriptor &LHSType;
+ const TypeDescriptor &RHSType;
+};
+
+/// \brief Handle a shift where the RHS is out of bounds or a left shift where
+/// the LHS is negative or overflows.
+RECOVERABLE(shift_out_of_bounds, ShiftOutOfBoundsData *Data,
+ ValueHandle LHS, ValueHandle RHS)
+
+struct OutOfBoundsData {
+ SourceLocation Loc;
+ const TypeDescriptor &ArrayType;
+ const TypeDescriptor &IndexType;
+};
+
+/// \brief Handle an array index out of bounds error.
+RECOVERABLE(out_of_bounds, OutOfBoundsData *Data, ValueHandle Index)
+
+struct UnreachableData {
+ SourceLocation Loc;
+};
+
+/// \brief Handle a __builtin_unreachable which is reached.
+UNRECOVERABLE(builtin_unreachable, UnreachableData *Data)
+/// \brief Handle reaching the end of a value-returning function.
+UNRECOVERABLE(missing_return, UnreachableData *Data)
+
+struct VLABoundData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+};
+
+/// \brief Handle a VLA with a non-positive bound.
+RECOVERABLE(vla_bound_not_positive, VLABoundData *Data, ValueHandle Bound)
+
+// Keeping this around for binary compatibility with (sanitized) programs
+// compiled with older compilers.
+struct FloatCastOverflowData {
+ const TypeDescriptor &FromType;
+ const TypeDescriptor &ToType;
+};
+
+struct FloatCastOverflowDataV2 {
+ SourceLocation Loc;
+ const TypeDescriptor &FromType;
+ const TypeDescriptor &ToType;
+};
+
+/// Handle overflow in a conversion to or from a floating-point type.
+/// void *Data is one of FloatCastOverflowData* or FloatCastOverflowDataV2*
+RECOVERABLE(float_cast_overflow, void *Data, ValueHandle From)
+
+struct InvalidValueData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+};
+
+/// \brief Handle a load of an invalid value for the type.
+RECOVERABLE(load_invalid_value, InvalidValueData *Data, ValueHandle Val)
+
+/// Known implicit conversion check kinds.
+/// Keep in sync with the enum of the same name in CGExprScalar.cpp
+enum ImplicitConversionCheckKind : unsigned char {
+ ICCK_IntegerTruncation = 0, // Legacy, was only used by clang 7.
+ ICCK_UnsignedIntegerTruncation = 1,
+ ICCK_SignedIntegerTruncation = 2,
+ ICCK_IntegerSignChange = 3,
+ ICCK_SignedIntegerTruncationOrSignChange = 4,
+};
+
+struct ImplicitConversionData {
+ SourceLocation Loc;
+ const TypeDescriptor &FromType;
+ const TypeDescriptor &ToType;
+ /* ImplicitConversionCheckKind */ unsigned char Kind;
+};
+
+/// \brief Implict conversion that changed the value.
+RECOVERABLE(implicit_conversion, ImplicitConversionData *Data, ValueHandle Src,
+ ValueHandle Dst)
+
+/// Known builtin check kinds.
+/// Keep in sync with the enum of the same name in CodeGenFunction.h
+enum BuiltinCheckKind : unsigned char {
+ BCK_CTZPassedZero,
+ BCK_CLZPassedZero,
+};
+
+struct InvalidBuiltinData {
+ SourceLocation Loc;
+ unsigned char Kind;
+};
+
+/// Handle a builtin called in an invalid way.
+RECOVERABLE(invalid_builtin, InvalidBuiltinData *Data)
+
+struct NonNullReturnData {
+ SourceLocation AttrLoc;
+};
+
+/// \brief Handle returning null from function with the returns_nonnull
+/// attribute, or a return type annotated with _Nonnull.
+RECOVERABLE(nonnull_return_v1, NonNullReturnData *Data, SourceLocation *Loc)
+RECOVERABLE(nullability_return_v1, NonNullReturnData *Data, SourceLocation *Loc)
+
+struct NonNullArgData {
+ SourceLocation Loc;
+ SourceLocation AttrLoc;
+ int ArgIndex;
+};
+
+/// \brief Handle passing null pointer to a function parameter with the nonnull
+/// attribute, or a _Nonnull type annotation.
+RECOVERABLE(nonnull_arg, NonNullArgData *Data)
+RECOVERABLE(nullability_arg, NonNullArgData *Data)
+
+struct PointerOverflowData {
+ SourceLocation Loc;
+};
+
+RECOVERABLE(pointer_overflow, PointerOverflowData *Data, ValueHandle Base,
+ ValueHandle Result)
+
+/// \brief Known CFI check kinds.
+/// Keep in sync with the enum of the same name in CodeGenFunction.h
+enum CFITypeCheckKind : unsigned char {
+ CFITCK_VCall,
+ CFITCK_NVCall,
+ CFITCK_DerivedCast,
+ CFITCK_UnrelatedCast,
+ CFITCK_ICall,
+ CFITCK_NVMFCall,
+ CFITCK_VMFCall,
+};
+
+struct CFICheckFailData {
+ CFITypeCheckKind CheckKind;
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+};
+
+/// \brief Handle control flow integrity failures.
+RECOVERABLE(cfi_check_fail, CFICheckFailData *Data, ValueHandle Function,
+ uptr VtableIsValid)
+
+struct ReportOptions;
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_handle_cfi_bad_type(
+ CFICheckFailData *Data, ValueHandle Vtable, bool ValidVtable,
+ ReportOptions Opts);
+
+}
+
+#endif // UBSAN_HANDLERS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers_cxx.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers_cxx.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers_cxx.cc (revision 351984)
@@ -0,0 +1,205 @@
+//===-- ubsan_handlers_cxx.cc ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Error logging entry points for the UBSan runtime, which are only used for C++
+// compilations. This file is permitted to use language features which require
+// linking against a C++ ABI library.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_handlers.h"
+#include "ubsan_handlers_cxx.h"
+#include "ubsan_diag.h"
+#include "ubsan_type_hash.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_suppressions.h"
+
+using namespace __sanitizer;
+using namespace __ubsan;
+
+namespace __ubsan {
+ extern const char *TypeCheckKinds[];
+}
+
+// Returns true if UBSan has printed an error report.
+static bool HandleDynamicTypeCacheMiss(
+ DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash,
+ ReportOptions Opts) {
+ if (checkDynamicType((void*)Pointer, Data->TypeInfo, Hash))
+ // Just a cache miss. The type matches after all.
+ return false;
+
+ // Check if error report should be suppressed.
+ DynamicTypeInfo DTI = getDynamicTypeInfoFromObject((void*)Pointer);
+ if (DTI.isValid() && IsVptrCheckSuppressed(DTI.getMostDerivedTypeName()))
+ return false;
+
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::DynamicTypeMismatch;
+ if (ignoreReport(Loc, Opts, ET))
+ return false;
+
+ ScopedReport R(Opts, Loc, ET);
+
+ Diag(Loc, DL_Error, ET,
+ "%0 address %1 which does not point to an object of type %2")
+ << TypeCheckKinds[Data->TypeCheckKind] << (void*)Pointer << Data->Type;
+
+ // If possible, say what type it actually points to.
+ if (!DTI.isValid()) {
+ if (DTI.getOffset() < -VptrMaxOffsetToTop || DTI.getOffset() > VptrMaxOffsetToTop) {
+ Diag(Pointer, DL_Note, ET,
+ "object has a possibly invalid vptr: abs(offset to top) too big")
+ << TypeName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "possibly invalid vptr");
+ } else {
+ Diag(Pointer, DL_Note, ET, "object has invalid vptr")
+ << TypeName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "invalid vptr");
+ }
+ } else if (!DTI.getOffset())
+ Diag(Pointer, DL_Note, ET, "object is of type %0")
+ << TypeName(DTI.getMostDerivedTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr), "vptr for %0");
+ else
+ // FIXME: Find the type at the specified offset, and include that
+ // in the note.
+ Diag(Pointer - DTI.getOffset(), DL_Note, ET,
+ "object is base class subobject at offset %0 within object of type %1")
+ << DTI.getOffset() << TypeName(DTI.getMostDerivedTypeName())
+ << TypeName(DTI.getSubobjectTypeName())
+ << Range(Pointer, Pointer + sizeof(uptr),
+ "vptr for %2 base class of %1");
+ return true;
+}
+
+void __ubsan::__ubsan_handle_dynamic_type_cache_miss(
+ DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash) {
+ GET_REPORT_OPTIONS(false);
+ HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts);
+}
+void __ubsan::__ubsan_handle_dynamic_type_cache_miss_abort(
+ DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash) {
+ // Note: -fsanitize=vptr is always recoverable.
+ GET_REPORT_OPTIONS(false);
+ if (HandleDynamicTypeCacheMiss(Data, Pointer, Hash, Opts))
+ Die();
+}
+
+namespace __ubsan {
+void __ubsan_handle_cfi_bad_type(CFICheckFailData *Data, ValueHandle Vtable,
+ bool ValidVtable, ReportOptions Opts) {
+ SourceLocation Loc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::CFIBadType;
+
+ if (ignoreReport(Loc, Opts, ET))
+ return;
+
+ ScopedReport R(Opts, Loc, ET);
+ DynamicTypeInfo DTI = ValidVtable
+ ? getDynamicTypeInfoFromVtable((void *)Vtable)
+ : DynamicTypeInfo(0, 0, 0);
+
+ const char *CheckKindStr;
+ switch (Data->CheckKind) {
+ case CFITCK_VCall:
+ CheckKindStr = "virtual call";
+ break;
+ case CFITCK_NVCall:
+ CheckKindStr = "non-virtual call";
+ break;
+ case CFITCK_DerivedCast:
+ CheckKindStr = "base-to-derived cast";
+ break;
+ case CFITCK_UnrelatedCast:
+ CheckKindStr = "cast to unrelated type";
+ break;
+ case CFITCK_VMFCall:
+ CheckKindStr = "virtual pointer to member function call";
+ break;
+ case CFITCK_ICall:
+ case CFITCK_NVMFCall:
+ Die();
+ }
+
+ Diag(Loc, DL_Error, ET,
+ "control flow integrity check for type %0 failed during "
+ "%1 (vtable address %2)")
+ << Data->Type << CheckKindStr << (void *)Vtable;
+
+ // If possible, say what type it actually points to.
+ if (!DTI.isValid())
+ Diag(Vtable, DL_Note, ET, "invalid vtable");
+ else
+ Diag(Vtable, DL_Note, ET, "vtable is of type %0")
+ << TypeName(DTI.getMostDerivedTypeName());
+
+ // If the failure involved different DSOs for the check location and vtable,
+ // report the DSO names.
+ const char *DstModule = Symbolizer::GetOrInit()->GetModuleNameForPc(Vtable);
+ if (!DstModule)
+ DstModule = "(unknown)";
+
+ const char *SrcModule = Symbolizer::GetOrInit()->GetModuleNameForPc(Opts.pc);
+ if (!SrcModule)
+ SrcModule = "(unknown)";
+
+ if (internal_strcmp(SrcModule, DstModule))
+ Diag(Loc, DL_Note, ET, "check failed in %0, vtable located in %1")
+ << SrcModule << DstModule;
+}
+
+static bool handleFunctionTypeMismatch(FunctionTypeMismatchData *Data,
+ ValueHandle Function,
+ ValueHandle calleeRTTI,
+ ValueHandle fnRTTI, ReportOptions Opts) {
+ if (checkTypeInfoEquality(reinterpret_cast<void *>(calleeRTTI),
+ reinterpret_cast<void *>(fnRTTI)))
+ return false;
+
+ SourceLocation CallLoc = Data->Loc.acquire();
+ ErrorType ET = ErrorType::FunctionTypeMismatch;
+
+ if (ignoreReport(CallLoc, Opts, ET))
+ return true;
+
+ ScopedReport R(Opts, CallLoc, ET);
+
+ SymbolizedStackHolder FLoc(getSymbolizedLocation(Function));
+ const char *FName = FLoc.get()->info.function;
+ if (!FName)
+ FName = "(unknown)";
+
+ Diag(CallLoc, DL_Error, ET,
+ "call to function %0 through pointer to incorrect function type %1")
+ << FName << Data->Type;
+ Diag(FLoc, DL_Note, ET, "%0 defined here") << FName;
+ return true;
+}
+
+void __ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data,
+ ValueHandle Function,
+ ValueHandle calleeRTTI,
+ ValueHandle fnRTTI) {
+ GET_REPORT_OPTIONS(false);
+ handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts);
+}
+
+void __ubsan_handle_function_type_mismatch_v1_abort(
+ FunctionTypeMismatchData *Data, ValueHandle Function,
+ ValueHandle calleeRTTI, ValueHandle fnRTTI) {
+ GET_REPORT_OPTIONS(true);
+ if (handleFunctionTypeMismatch(Data, Function, calleeRTTI, fnRTTI, Opts))
+ Die();
+}
+} // namespace __ubsan
+
+#endif // CAN_SANITIZE_UB
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers_cxx.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers_cxx.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_handlers_cxx.h (revision 351984)
@@ -0,0 +1,54 @@
+//===-- ubsan_handlers_cxx.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Entry points to the runtime library for Clang's undefined behavior sanitizer,
+// for C++-specific checks. This code is not linked into C binaries.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_HANDLERS_CXX_H
+#define UBSAN_HANDLERS_CXX_H
+
+#include "ubsan_value.h"
+
+namespace __ubsan {
+
+struct DynamicTypeCacheMissData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+ void *TypeInfo;
+ unsigned char TypeCheckKind;
+};
+
+/// \brief Handle a runtime type check failure, caused by an incorrect vptr.
+/// When this handler is called, all we know is that the type was not in the
+/// cache; this does not necessarily imply the existence of a bug.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __ubsan_handle_dynamic_type_cache_miss(
+ DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash);
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+void __ubsan_handle_dynamic_type_cache_miss_abort(
+ DynamicTypeCacheMissData *Data, ValueHandle Pointer, ValueHandle Hash);
+
+struct FunctionTypeMismatchData {
+ SourceLocation Loc;
+ const TypeDescriptor &Type;
+};
+
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__ubsan_handle_function_type_mismatch_v1(FunctionTypeMismatchData *Data,
+ ValueHandle Val,
+ ValueHandle calleeRTTI,
+ ValueHandle fnRTTI);
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__ubsan_handle_function_type_mismatch_v1_abort(FunctionTypeMismatchData *Data,
+ ValueHandle Val,
+ ValueHandle calleeRTTI,
+ ValueHandle fnRTTI);
+}
+
+#endif // UBSAN_HANDLERS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init.cc (revision 351984)
@@ -0,0 +1,64 @@
+//===-- ubsan_init.cc -----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization of UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_diag.h"
+#include "ubsan_init.h"
+#include "ubsan_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "sanitizer_common/sanitizer_symbolizer.h"
+
+using namespace __ubsan;
+
+const char *__ubsan::GetSanititizerToolName() {
+ return "UndefinedBehaviorSanitizer";
+}
+
+static bool ubsan_initialized;
+static StaticSpinMutex ubsan_init_mu;
+
+static void CommonInit() {
+ InitializeSuppressions();
+}
+
+static void CommonStandaloneInit() {
+ SanitizerToolName = GetSanititizerToolName();
+ CacheBinaryName();
+ InitializeFlags();
+ __sanitizer_set_report_path(common_flags()->log_path);
+ AndroidLogInit();
+ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir);
+ CommonInit();
+}
+
+void __ubsan::InitAsStandalone() {
+ SpinMutexLock l(&ubsan_init_mu);
+ if (!ubsan_initialized) {
+ CommonStandaloneInit();
+ ubsan_initialized = true;
+ }
+}
+
+void __ubsan::InitAsStandaloneIfNecessary() { return InitAsStandalone(); }
+
+void __ubsan::InitAsPlugin() {
+ SpinMutexLock l(&ubsan_init_mu);
+ if (!ubsan_initialized) {
+ CommonInit();
+ ubsan_initialized = true;
+ }
+}
+
+#endif // CAN_SANITIZE_UB
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init.h (revision 351984)
@@ -0,0 +1,33 @@
+//===-- ubsan_init.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization function for UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_INIT_H
+#define UBSAN_INIT_H
+
+namespace __ubsan {
+
+// Get the full tool name for UBSan.
+const char *GetSanititizerToolName();
+
+// Initialize UBSan as a standalone tool. Typically should be called early
+// during initialization.
+void InitAsStandalone();
+
+// Initialize UBSan as a standalone tool, if it hasn't been initialized before.
+void InitAsStandaloneIfNecessary();
+
+// Initializes UBSan as a plugin tool. This function should be called once
+// from "parent tool" (e.g. ASan) initialization.
+void InitAsPlugin();
+
+} // namespace __ubsan
+
+#endif // UBSAN_INIT_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init_standalone.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init_standalone.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init_standalone.cc (revision 351984)
@@ -0,0 +1,33 @@
+//===-- ubsan_init_standalone.cc ------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization of standalone UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if !CAN_SANITIZE_UB
+# error "UBSan is not supported on this platform!"
+#endif
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "ubsan_init.h"
+#include "ubsan_signals_standalone.h"
+
+namespace __ubsan {
+
+class UbsanStandaloneInitializer {
+ public:
+ UbsanStandaloneInitializer() {
+ InitAsStandalone();
+ InitializeDeadlySignals();
+ }
+};
+static UbsanStandaloneInitializer ubsan_standalone_initializer;
+
+} // namespace __ubsan
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init_standalone.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init_standalone_preinit.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init_standalone_preinit.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init_standalone_preinit.cc (revision 351984)
@@ -0,0 +1,35 @@
+//===-- ubsan_init_standalone_preinit.cc ---------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Initialization of standalone UBSan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if !CAN_SANITIZE_UB
+#error "UBSan is not supported on this platform!"
+#endif
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "ubsan_init.h"
+#include "ubsan_signals_standalone.h"
+
+#if SANITIZER_CAN_USE_PREINIT_ARRAY
+
+namespace __ubsan {
+
+static void PreInitAsStandalone() {
+ InitAsStandalone();
+ InitializeDeadlySignals();
+}
+
+} // namespace __ubsan
+
+__attribute__((section(".preinit_array"), used)) void (*__local_ubsan_preinit)(
+ void) = __ubsan::PreInitAsStandalone;
+#endif // SANITIZER_CAN_USE_PREINIT_ARRAY
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_init_standalone_preinit.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_interface.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_interface.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_interface.inc (revision 351984)
@@ -0,0 +1,59 @@
+//===-- ubsan_interface.inc -----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// Ubsan interface list.
+//===----------------------------------------------------------------------===//
+INTERFACE_FUNCTION(__ubsan_handle_add_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_add_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_alignment_assumption)
+INTERFACE_FUNCTION(__ubsan_handle_alignment_assumption_abort)
+INTERFACE_FUNCTION(__ubsan_handle_builtin_unreachable)
+INTERFACE_FUNCTION(__ubsan_handle_cfi_bad_type)
+INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail)
+INTERFACE_FUNCTION(__ubsan_handle_cfi_check_fail_abort)
+INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_divrem_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss)
+INTERFACE_FUNCTION(__ubsan_handle_dynamic_type_cache_miss_abort)
+INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_float_cast_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1)
+INTERFACE_FUNCTION(__ubsan_handle_function_type_mismatch_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion)
+INTERFACE_FUNCTION(__ubsan_handle_implicit_conversion_abort)
+INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin)
+INTERFACE_FUNCTION(__ubsan_handle_invalid_builtin_abort)
+INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value)
+INTERFACE_FUNCTION(__ubsan_handle_load_invalid_value_abort)
+INTERFACE_FUNCTION(__ubsan_handle_missing_return)
+INTERFACE_FUNCTION(__ubsan_handle_mul_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_mul_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_negate_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_negate_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_arg_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1)
+INTERFACE_FUNCTION(__ubsan_handle_nonnull_return_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_arg)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_arg_abort)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1)
+INTERFACE_FUNCTION(__ubsan_handle_nullability_return_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds)
+INTERFACE_FUNCTION(__ubsan_handle_out_of_bounds_abort)
+INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_pointer_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds)
+INTERFACE_FUNCTION(__ubsan_handle_shift_out_of_bounds_abort)
+INTERFACE_FUNCTION(__ubsan_handle_sub_overflow)
+INTERFACE_FUNCTION(__ubsan_handle_sub_overflow_abort)
+INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1)
+INTERFACE_FUNCTION(__ubsan_handle_type_mismatch_v1_abort)
+INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive)
+INTERFACE_FUNCTION(__ubsan_handle_vla_bound_not_positive_abort)
+INTERFACE_WEAK_FUNCTION(__ubsan_default_options)
+INTERFACE_FUNCTION(__ubsan_on_report)
+INTERFACE_FUNCTION(__ubsan_get_current_report_data)
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_interface.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_monitor.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_monitor.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_monitor.cc (revision 351984)
@@ -0,0 +1,75 @@
+//===-- ubsan_monitor.cc ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Hooks which allow a monitor process to inspect UBSan's diagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_monitor.h"
+
+using namespace __ubsan;
+
+UndefinedBehaviorReport::UndefinedBehaviorReport(const char *IssueKind,
+ Location &Loc,
+ InternalScopedString &Msg)
+ : IssueKind(IssueKind), Loc(Loc), Buffer(Msg.length() + 1) {
+ // We have the common sanitizer reporting lock, so it's safe to register a
+ // new UB report.
+ RegisterUndefinedBehaviorReport(this);
+
+ // Make a copy of the diagnostic.
+ Buffer.append("%s", Msg.data());
+
+ // Let the monitor know that a report is available.
+ __ubsan_on_report();
+}
+
+static UndefinedBehaviorReport *CurrentUBR;
+
+void __ubsan::RegisterUndefinedBehaviorReport(UndefinedBehaviorReport *UBR) {
+ CurrentUBR = UBR;
+}
+
+SANITIZER_WEAK_DEFAULT_IMPL
+void __ubsan::__ubsan_on_report(void) {}
+
+void __ubsan::__ubsan_get_current_report_data(const char **OutIssueKind,
+ const char **OutMessage,
+ const char **OutFilename,
+ unsigned *OutLine,
+ unsigned *OutCol,
+ char **OutMemoryAddr) {
+ if (!OutIssueKind || !OutMessage || !OutFilename || !OutLine || !OutCol ||
+ !OutMemoryAddr)
+ UNREACHABLE("Invalid arguments passed to __ubsan_get_current_report_data");
+
+ InternalScopedString &Buf = CurrentUBR->Buffer;
+
+ // Ensure that the first character of the diagnostic text can't start with a
+ // lowercase letter.
+ char FirstChar = Buf.data()[0];
+ if (FirstChar >= 'a' && FirstChar <= 'z')
+ Buf.data()[0] = FirstChar - 'a' + 'A';
+
+ *OutIssueKind = CurrentUBR->IssueKind;
+ *OutMessage = Buf.data();
+ if (!CurrentUBR->Loc.isSourceLocation()) {
+ *OutFilename = "<unknown>";
+ *OutLine = *OutCol = 0;
+ } else {
+ SourceLocation SL = CurrentUBR->Loc.getSourceLocation();
+ *OutFilename = SL.getFilename();
+ *OutLine = SL.getLine();
+ *OutCol = SL.getColumn();
+ }
+
+ if (CurrentUBR->Loc.isMemoryLocation())
+ *OutMemoryAddr = (char *)CurrentUBR->Loc.getMemoryLocation();
+ else
+ *OutMemoryAddr = nullptr;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_monitor.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_monitor.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_monitor.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_monitor.h (revision 351984)
@@ -0,0 +1,48 @@
+//===-- ubsan_monitor.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Hooks which allow a monitor process to inspect UBSan's diagnostics.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef UBSAN_MONITOR_H
+#define UBSAN_MONITOR_H
+
+#include "ubsan_diag.h"
+#include "ubsan_value.h"
+
+namespace __ubsan {
+
+struct UndefinedBehaviorReport {
+ const char *IssueKind;
+ Location &Loc;
+ InternalScopedString Buffer;
+
+ UndefinedBehaviorReport(const char *IssueKind, Location &Loc,
+ InternalScopedString &Msg);
+};
+
+SANITIZER_INTERFACE_ATTRIBUTE void
+RegisterUndefinedBehaviorReport(UndefinedBehaviorReport *UBR);
+
+/// Called after a report is prepared. This serves to alert monitor processes
+/// that a UB report is available.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __ubsan_on_report(void);
+
+/// Used by the monitor process to extract information from a UB report. The
+/// data is only available until the next time __ubsan_on_report is called. The
+/// caller is responsible for copying and preserving the data if needed.
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE void
+__ubsan_get_current_report_data(const char **OutIssueKind,
+ const char **OutMessage,
+ const char **OutFilename, unsigned *OutLine,
+ unsigned *OutCol, char **OutMemoryAddr);
+
+} // end namespace __ubsan
+
+#endif // UBSAN_MONITOR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_monitor.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_platform.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_platform.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_platform.h (revision 351984)
@@ -0,0 +1,25 @@
+//===-- ubsan_platform.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Defines the platforms which UBSan is supported at.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_PLATFORM_H
+#define UBSAN_PLATFORM_H
+
+// Other platforms should be easy to add, and probably work as-is.
+#if defined(__linux__) || defined(__FreeBSD__) || defined(__APPLE__) || \
+ defined(__NetBSD__) || defined(__OpenBSD__) || \
+ (defined(__sun__) && defined(__svr4__)) || \
+ defined(_WIN32) || defined(__Fuchsia__) || defined(__rtems__)
+# define CAN_SANITIZE_UB 1
+#else
+# define CAN_SANITIZE_UB 0
+#endif
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_platform.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_signals_standalone.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_signals_standalone.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_signals_standalone.cc (revision 351984)
@@ -0,0 +1,71 @@
+//=-- ubsan_signals_standalone.cc
+//------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Installs signal handlers and related interceptors for UBSan standalone.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#include "sanitizer_common/sanitizer_platform.h"
+#if CAN_SANITIZE_UB
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "ubsan_diag.h"
+#include "ubsan_init.h"
+
+// Interception of signals breaks too many things on Android.
+// * It requires that ubsan is the first dependency of the main executable for
+// the interceptors to work correctly. This complicates deployment, as it
+// prevents us from enabling ubsan on random platform modules independently.
+// * For this to work with ART VM, ubsan signal handler has to be set after the
+// debuggerd handler, but before the ART handler.
+// * Interceptors don't work at all when ubsan runtime is loaded late, ex. when
+// it is part of an APK that does not use wrap.sh method.
+#if SANITIZER_FUCHSIA || SANITIZER_ANDROID
+
+namespace __ubsan {
+void InitializeDeadlySignals() {}
+}
+
+#else
+
+#define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name)
+#include "sanitizer_common/sanitizer_signal_interceptors.inc"
+
+// TODO(yln): Temporary workaround. Will be removed.
+void ubsan_GetStackTrace(BufferedStackTrace *stack, uptr max_depth,
+ uptr pc, uptr bp, void *context, bool fast);
+
+namespace __ubsan {
+
+static void OnStackUnwind(const SignalContext &sig, const void *,
+ BufferedStackTrace *stack) {
+ ubsan_GetStackTrace(stack, kStackTraceMax, sig.pc, sig.bp, sig.context,
+ common_flags()->fast_unwind_on_fatal);
+}
+
+static void UBsanOnDeadlySignal(int signo, void *siginfo, void *context) {
+ HandleDeadlySignal(siginfo, context, GetTid(), &OnStackUnwind, nullptr);
+}
+
+static bool is_initialized = false;
+
+void InitializeDeadlySignals() {
+ if (is_initialized)
+ return;
+ is_initialized = true;
+ InitializeSignalInterceptors();
+ InstallDeadlySignalHandlers(&UBsanOnDeadlySignal);
+}
+
+} // namespace __ubsan
+
+#endif
+
+#endif // CAN_SANITIZE_UB
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_signals_standalone.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_signals_standalone.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_signals_standalone.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_signals_standalone.h (revision 351984)
@@ -0,0 +1,24 @@
+//=-- ubsan_signals_standalone.h
+//------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Installs signal handlers and related interceptors for UBSan standalone.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef UBSAN_SIGNALS_STANDALONE_H
+#define UBSAN_SIGNALS_STANDALONE_H
+
+namespace __ubsan {
+
+// Initializes signal handlers and interceptors.
+void InitializeDeadlySignals();
+
+} // namespace __ubsan
+
+#endif // UBSAN_SIGNALS_STANDALONE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_signals_standalone.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash.cc (revision 351984)
@@ -0,0 +1,33 @@
+//===-- ubsan_type_hash.cc ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of a hash table for fast checking of inheritance
+// relationships. This file is only linked into C++ compilations, and is
+// permitted to use language features which require a C++ ABI library.
+//
+// Most of the implementation lives in an ABI-specific source file
+// (ubsan_type_hash_{itanium,win}.cc).
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_type_hash.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+
+/// A cache of recently-checked hashes. Mini hash table with "random" evictions.
+__ubsan::HashValue
+__ubsan::__ubsan_vptr_type_cache[__ubsan::VptrTypeCacheSize];
+
+__ubsan::DynamicTypeInfo __ubsan::getDynamicTypeInfoFromObject(void *Object) {
+ void *VtablePtr = *reinterpret_cast<void **>(Object);
+ return getDynamicTypeInfoFromVtable(VtablePtr);
+}
+
+#endif // CAN_SANITIZE_UB
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash.h (revision 351984)
@@ -0,0 +1,73 @@
+//===-- ubsan_type_hash.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Hashing of types for Clang's undefined behavior checker.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_TYPE_HASH_H
+#define UBSAN_TYPE_HASH_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __ubsan {
+
+typedef uptr HashValue;
+
+/// \brief Information about the dynamic type of an object (extracted from its
+/// vptr).
+class DynamicTypeInfo {
+ const char *MostDerivedTypeName;
+ sptr Offset;
+ const char *SubobjectTypeName;
+
+public:
+ DynamicTypeInfo(const char *MDTN, sptr Offset, const char *STN)
+ : MostDerivedTypeName(MDTN), Offset(Offset), SubobjectTypeName(STN) {}
+
+ /// Determine whether the object had a valid dynamic type.
+ bool isValid() const { return MostDerivedTypeName; }
+ /// Get the name of the most-derived type of the object.
+ const char *getMostDerivedTypeName() const { return MostDerivedTypeName; }
+ /// Get the offset from the most-derived type to this base class.
+ sptr getOffset() const { return Offset; }
+ /// Get the name of the most-derived type at the specified offset.
+ const char *getSubobjectTypeName() const { return SubobjectTypeName; }
+};
+
+/// \brief Get information about the dynamic type of an object.
+DynamicTypeInfo getDynamicTypeInfoFromObject(void *Object);
+
+/// \brief Get information about the dynamic type of an object from its vtable.
+DynamicTypeInfo getDynamicTypeInfoFromVtable(void *Vtable);
+
+/// \brief Check whether the dynamic type of \p Object has a \p Type subobject
+/// at offset 0.
+/// \return \c true if the type matches, \c false if not.
+bool checkDynamicType(void *Object, void *Type, HashValue Hash);
+
+const unsigned VptrTypeCacheSize = 128;
+
+/// A sanity check for Vtable. Offsets to top must be reasonably small
+/// numbers (by absolute value). It's a weak check for Vtable corruption.
+const int VptrMaxOffsetToTop = 1<<20;
+
+/// \brief A cache of the results of checkDynamicType. \c checkDynamicType would
+/// return \c true (modulo hash collisions) if
+/// \code
+/// __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] == Hash
+/// \endcode
+extern "C" SANITIZER_INTERFACE_ATTRIBUTE
+HashValue __ubsan_vptr_type_cache[VptrTypeCacheSize];
+
+/// \brief Do whatever is required by the ABI to check for std::type_info
+/// equivalence beyond simple pointer comparison.
+bool checkTypeInfoEquality(const void *TypeInfo1, const void *TypeInfo2);
+
+} // namespace __ubsan
+
+#endif // UBSAN_TYPE_HASH_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash_itanium.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash_itanium.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash_itanium.cc (revision 351984)
@@ -0,0 +1,268 @@
+//===-- ubsan_type_hash_itanium.cc ----------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of type hashing/lookup for Itanium C++ ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB && !SANITIZER_WINDOWS
+#include "ubsan_type_hash.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+
+// The following are intended to be binary compatible with the definitions
+// given in the Itanium ABI. We make no attempt to be ODR-compatible with
+// those definitions, since existing ABI implementations aren't.
+
+namespace std {
+ class type_info {
+ public:
+ virtual ~type_info();
+
+ const char *__type_name;
+ };
+}
+
+namespace __cxxabiv1 {
+
+/// Type info for classes with no bases, and base class for type info for
+/// classes with bases.
+class __class_type_info : public std::type_info {
+ ~__class_type_info() override;
+};
+
+/// Type info for classes with simple single public inheritance.
+class __si_class_type_info : public __class_type_info {
+public:
+ ~__si_class_type_info() override;
+
+ const __class_type_info *__base_type;
+};
+
+class __base_class_type_info {
+public:
+ const __class_type_info *__base_type;
+ long __offset_flags;
+
+ enum __offset_flags_masks {
+ __virtual_mask = 0x1,
+ __public_mask = 0x2,
+ __offset_shift = 8
+ };
+};
+
+/// Type info for classes with multiple, virtual, or non-public inheritance.
+class __vmi_class_type_info : public __class_type_info {
+public:
+ ~__vmi_class_type_info() override;
+
+ unsigned int flags;
+ unsigned int base_count;
+ __base_class_type_info base_info[1];
+};
+
+}
+
+namespace abi = __cxxabiv1;
+
+using namespace __sanitizer;
+
+// We implement a simple two-level cache for type-checking results. For each
+// (vptr,type) pair, a hash is computed. This hash is assumed to be globally
+// unique; if it collides, we will get false negatives, but:
+// * such a collision would have to occur on the *first* bad access,
+// * the probability of such a collision is low (and for a 64-bit target, is
+// negligible), and
+// * the vptr, and thus the hash, can be affected by ASLR, so multiple runs
+// give better coverage.
+//
+// The first caching layer is a small hash table with no chaining; buckets are
+// reused as needed. The second caching layer is a large hash table with open
+// chaining. We can freely evict from either layer since this is just a cache.
+//
+// FIXME: Make these hash table accesses thread-safe. The races here are benign:
+// assuming the unsequenced loads and stores don't misbehave too badly,
+// the worst case is false negatives or poor cache behavior, not false
+// positives or crashes.
+
+/// Find a bucket to store the given hash value in.
+static __ubsan::HashValue *getTypeCacheHashTableBucket(__ubsan::HashValue V) {
+ static const unsigned HashTableSize = 65537;
+ static __ubsan::HashValue __ubsan_vptr_hash_set[HashTableSize];
+
+ unsigned First = (V & 65535) ^ 1;
+ unsigned Probe = First;
+ for (int Tries = 5; Tries; --Tries) {
+ if (!__ubsan_vptr_hash_set[Probe] || __ubsan_vptr_hash_set[Probe] == V)
+ return &__ubsan_vptr_hash_set[Probe];
+ Probe += ((V >> 16) & 65535) + 1;
+ if (Probe >= HashTableSize)
+ Probe -= HashTableSize;
+ }
+ // FIXME: Pick a random entry from the probe sequence to evict rather than
+ // just taking the first.
+ return &__ubsan_vptr_hash_set[First];
+}
+
+/// \brief Determine whether \p Derived has a \p Base base class subobject at
+/// offset \p Offset.
+static bool isDerivedFromAtOffset(const abi::__class_type_info *Derived,
+ const abi::__class_type_info *Base,
+ sptr Offset) {
+ if (Derived->__type_name == Base->__type_name ||
+ __ubsan::checkTypeInfoEquality(Derived, Base))
+ return Offset == 0;
+
+ if (const abi::__si_class_type_info *SI =
+ dynamic_cast<const abi::__si_class_type_info*>(Derived))
+ return isDerivedFromAtOffset(SI->__base_type, Base, Offset);
+
+ const abi::__vmi_class_type_info *VTI =
+ dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
+ if (!VTI)
+ // No base class subobjects.
+ return false;
+
+ // Look for a base class which is derived from \p Base at the right offset.
+ for (unsigned int base = 0; base != VTI->base_count; ++base) {
+ // FIXME: Curtail the recursion if this base can't possibly contain the
+ // given offset.
+ sptr OffsetHere = VTI->base_info[base].__offset_flags >>
+ abi::__base_class_type_info::__offset_shift;
+ if (VTI->base_info[base].__offset_flags &
+ abi::__base_class_type_info::__virtual_mask)
+ // For now, just punt on virtual bases and say 'yes'.
+ // FIXME: OffsetHere is the offset in the vtable of the virtual base
+ // offset. Read the vbase offset out of the vtable and use it.
+ return true;
+ if (isDerivedFromAtOffset(VTI->base_info[base].__base_type,
+ Base, Offset - OffsetHere))
+ return true;
+ }
+
+ return false;
+}
+
+/// \brief Find the derived-most dynamic base class of \p Derived at offset
+/// \p Offset.
+static const abi::__class_type_info *findBaseAtOffset(
+ const abi::__class_type_info *Derived, sptr Offset) {
+ if (!Offset)
+ return Derived;
+
+ if (const abi::__si_class_type_info *SI =
+ dynamic_cast<const abi::__si_class_type_info*>(Derived))
+ return findBaseAtOffset(SI->__base_type, Offset);
+
+ const abi::__vmi_class_type_info *VTI =
+ dynamic_cast<const abi::__vmi_class_type_info*>(Derived);
+ if (!VTI)
+ // No base class subobjects.
+ return nullptr;
+
+ for (unsigned int base = 0; base != VTI->base_count; ++base) {
+ sptr OffsetHere = VTI->base_info[base].__offset_flags >>
+ abi::__base_class_type_info::__offset_shift;
+ if (VTI->base_info[base].__offset_flags &
+ abi::__base_class_type_info::__virtual_mask)
+ // FIXME: Can't handle virtual bases yet.
+ continue;
+ if (const abi::__class_type_info *Base =
+ findBaseAtOffset(VTI->base_info[base].__base_type,
+ Offset - OffsetHere))
+ return Base;
+ }
+
+ return nullptr;
+}
+
+namespace {
+
+struct VtablePrefix {
+ /// The offset from the vptr to the start of the most-derived object.
+ /// This will only be greater than zero in some virtual base class vtables
+ /// used during object con-/destruction, and will usually be exactly zero.
+ sptr Offset;
+ /// The type_info object describing the most-derived class type.
+ std::type_info *TypeInfo;
+};
+VtablePrefix *getVtablePrefix(void *Vtable) {
+ VtablePrefix *Vptr = reinterpret_cast<VtablePrefix*>(Vtable);
+ VtablePrefix *Prefix = Vptr - 1;
+ if (!IsAccessibleMemoryRange((uptr)Prefix, sizeof(VtablePrefix)))
+ return nullptr;
+ if (!Prefix->TypeInfo)
+ // This can't possibly be a valid vtable.
+ return nullptr;
+ return Prefix;
+}
+
+}
+
+bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) {
+ // A crash anywhere within this function probably means the vptr is corrupted.
+ // FIXME: Perform these checks more cautiously.
+
+ // Check whether this is something we've evicted from the cache.
+ HashValue *Bucket = getTypeCacheHashTableBucket(Hash);
+ if (*Bucket == Hash) {
+ __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash;
+ return true;
+ }
+
+ void *VtablePtr = *reinterpret_cast<void **>(Object);
+ VtablePrefix *Vtable = getVtablePrefix(VtablePtr);
+ if (!Vtable)
+ return false;
+ if (Vtable->Offset < -VptrMaxOffsetToTop || Vtable->Offset > VptrMaxOffsetToTop) {
+ // Too large or too small offset are signs of Vtable corruption.
+ return false;
+ }
+
+ // Check that this is actually a type_info object for a class type.
+ abi::__class_type_info *Derived =
+ dynamic_cast<abi::__class_type_info*>(Vtable->TypeInfo);
+ if (!Derived)
+ return false;
+
+ abi::__class_type_info *Base = (abi::__class_type_info*)Type;
+ if (!isDerivedFromAtOffset(Derived, Base, -Vtable->Offset))
+ return false;
+
+ // Success. Cache this result.
+ __ubsan_vptr_type_cache[Hash % VptrTypeCacheSize] = Hash;
+ *Bucket = Hash;
+ return true;
+}
+
+__ubsan::DynamicTypeInfo
+__ubsan::getDynamicTypeInfoFromVtable(void *VtablePtr) {
+ VtablePrefix *Vtable = getVtablePrefix(VtablePtr);
+ if (!Vtable)
+ return DynamicTypeInfo(nullptr, 0, nullptr);
+ if (Vtable->Offset < -VptrMaxOffsetToTop || Vtable->Offset > VptrMaxOffsetToTop)
+ return DynamicTypeInfo(nullptr, Vtable->Offset, nullptr);
+ const abi::__class_type_info *ObjectType = findBaseAtOffset(
+ static_cast<const abi::__class_type_info*>(Vtable->TypeInfo),
+ -Vtable->Offset);
+ return DynamicTypeInfo(Vtable->TypeInfo->__type_name, -Vtable->Offset,
+ ObjectType ? ObjectType->__type_name : "<unknown>");
+}
+
+bool __ubsan::checkTypeInfoEquality(const void *TypeInfo1,
+ const void *TypeInfo2) {
+ auto TI1 = static_cast<const std::type_info *>(TypeInfo1);
+ auto TI2 = static_cast<const std::type_info *>(TypeInfo2);
+ return SANITIZER_NON_UNIQUE_TYPEINFO && TI1->__type_name[0] != '*' &&
+ TI2->__type_name[0] != '*' &&
+ !internal_strcmp(TI1->__type_name, TI2->__type_name);
+}
+
+#endif // CAN_SANITIZE_UB && !SANITIZER_WINDOWS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash_itanium.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash_win.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash_win.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash_win.cc (revision 351984)
@@ -0,0 +1,84 @@
+//===-- ubsan_type_hash_win.cc --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of type hashing/lookup for Microsoft C++ ABI.
+//
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB && SANITIZER_WINDOWS
+#include "ubsan_type_hash.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+
+#include <typeinfo>
+
+struct CompleteObjectLocator {
+ int is_image_relative;
+ int offset_to_top;
+ int vfptr_offset;
+ int rtti_addr;
+ int chd_addr;
+ int obj_locator_addr;
+};
+
+struct CompleteObjectLocatorAbs {
+ int is_image_relative;
+ int offset_to_top;
+ int vfptr_offset;
+ std::type_info *rtti_addr;
+ void *chd_addr;
+ CompleteObjectLocator *obj_locator_addr;
+};
+
+bool __ubsan::checkDynamicType(void *Object, void *Type, HashValue Hash) {
+ // FIXME: Implement.
+ return false;
+}
+
+__ubsan::DynamicTypeInfo
+__ubsan::getDynamicTypeInfoFromVtable(void *VtablePtr) {
+ // The virtual table may not have a complete object locator if the object
+ // was compiled without RTTI (i.e. we might be reading from some other global
+ // laid out before the virtual table), so we need to carefully validate each
+ // pointer dereference and perform sanity checks.
+ CompleteObjectLocator **obj_locator_ptr =
+ ((CompleteObjectLocator**)VtablePtr)-1;
+ if (!IsAccessibleMemoryRange((uptr)obj_locator_ptr, sizeof(void*)))
+ return DynamicTypeInfo(0, 0, 0);
+
+ CompleteObjectLocator *obj_locator = *obj_locator_ptr;
+ if (!IsAccessibleMemoryRange((uptr)obj_locator,
+ sizeof(CompleteObjectLocator)))
+ return DynamicTypeInfo(0, 0, 0);
+
+ std::type_info *tinfo;
+ if (obj_locator->is_image_relative == 1) {
+ char *image_base = ((char *)obj_locator) - obj_locator->obj_locator_addr;
+ tinfo = (std::type_info *)(image_base + obj_locator->rtti_addr);
+ } else if (obj_locator->is_image_relative == 0)
+ tinfo = ((CompleteObjectLocatorAbs *)obj_locator)->rtti_addr;
+ else
+ // Probably not a complete object locator.
+ return DynamicTypeInfo(0, 0, 0);
+
+ if (!IsAccessibleMemoryRange((uptr)tinfo, sizeof(std::type_info)))
+ return DynamicTypeInfo(0, 0, 0);
+
+ // Okay, this is probably a std::type_info. Request its name.
+ // FIXME: Implement a base class search like we do for Itanium.
+ return DynamicTypeInfo(tinfo->name(), obj_locator->offset_to_top,
+ "<unknown>");
+}
+
+bool __ubsan::checkTypeInfoEquality(const void *, const void *) {
+ return false;
+}
+
+#endif // CAN_SANITIZE_UB && SANITIZER_WINDOWS
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_type_hash_win.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_value.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_value.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_value.cc (revision 351984)
@@ -0,0 +1,112 @@
+//===-- ubsan_value.cc ----------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Representation of a runtime value, as marshaled from the generated code to
+// the ubsan runtime.
+//
+//===----------------------------------------------------------------------===//
+
+#include "ubsan_platform.h"
+#if CAN_SANITIZE_UB
+#include "ubsan_value.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+
+using namespace __ubsan;
+
+SIntMax Value::getSIntValue() const {
+ CHECK(getType().isSignedIntegerTy());
+ if (isInlineInt()) {
+ // Val was zero-extended to ValueHandle. Sign-extend from original width
+ // to SIntMax.
+ const unsigned ExtraBits =
+ sizeof(SIntMax) * 8 - getType().getIntegerBitWidth();
+ return SIntMax(Val) << ExtraBits >> ExtraBits;
+ }
+ if (getType().getIntegerBitWidth() == 64)
+ return *reinterpret_cast<s64*>(Val);
+#if HAVE_INT128_T
+ if (getType().getIntegerBitWidth() == 128)
+ return *reinterpret_cast<s128*>(Val);
+#else
+ if (getType().getIntegerBitWidth() == 128)
+ UNREACHABLE("libclang_rt.ubsan was built without __int128 support");
+#endif
+ UNREACHABLE("unexpected bit width");
+}
+
+UIntMax Value::getUIntValue() const {
+ CHECK(getType().isUnsignedIntegerTy());
+ if (isInlineInt())
+ return Val;
+ if (getType().getIntegerBitWidth() == 64)
+ return *reinterpret_cast<u64*>(Val);
+#if HAVE_INT128_T
+ if (getType().getIntegerBitWidth() == 128)
+ return *reinterpret_cast<u128*>(Val);
+#else
+ if (getType().getIntegerBitWidth() == 128)
+ UNREACHABLE("libclang_rt.ubsan was built without __int128 support");
+#endif
+ UNREACHABLE("unexpected bit width");
+}
+
+UIntMax Value::getPositiveIntValue() const {
+ if (getType().isUnsignedIntegerTy())
+ return getUIntValue();
+ SIntMax Val = getSIntValue();
+ CHECK(Val >= 0);
+ return Val;
+}
+
+/// Get the floating-point value of this object, extended to a long double.
+/// These are always passed by address (our calling convention doesn't allow
+/// them to be passed in floating-point registers, so this has little cost).
+FloatMax Value::getFloatValue() const {
+ CHECK(getType().isFloatTy());
+ if (isInlineFloat()) {
+ switch (getType().getFloatBitWidth()) {
+#if 0
+ // FIXME: OpenCL / NEON 'half' type. LLVM can't lower the conversion
+ // from '__fp16' to 'long double'.
+ case 16: {
+ __fp16 Value;
+ internal_memcpy(&Value, &Val, 4);
+ return Value;
+ }
+#endif
+ case 32: {
+ float Value;
+#if defined(__BYTE_ORDER__) && __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+ // For big endian the float value is in the last 4 bytes.
+ // On some targets we may only have 4 bytes so we count backwards from
+ // the end of Val to account for both the 32-bit and 64-bit cases.
+ internal_memcpy(&Value, ((const char*)(&Val + 1)) - 4, 4);
+#else
+ internal_memcpy(&Value, &Val, 4);
+#endif
+ return Value;
+ }
+ case 64: {
+ double Value;
+ internal_memcpy(&Value, &Val, 8);
+ return Value;
+ }
+ }
+ } else {
+ switch (getType().getFloatBitWidth()) {
+ case 64: return *reinterpret_cast<double*>(Val);
+ case 80: return *reinterpret_cast<long double*>(Val);
+ case 96: return *reinterpret_cast<long double*>(Val);
+ case 128: return *reinterpret_cast<long double*>(Val);
+ }
+ }
+ UNREACHABLE("unexpected floating point bit width");
+}
+
+#endif // CAN_SANITIZE_UB
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_value.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_value.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_value.h (revision 351984)
@@ -0,0 +1,196 @@
+//===-- ubsan_value.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Representation of data which is passed from the compiler-generated calls into
+// the ubsan runtime.
+//
+//===----------------------------------------------------------------------===//
+#ifndef UBSAN_VALUE_H
+#define UBSAN_VALUE_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+// FIXME: Move this out to a config header.
+#if __SIZEOF_INT128__
+__extension__ typedef __int128 s128;
+__extension__ typedef unsigned __int128 u128;
+#define HAVE_INT128_T 1
+#else
+#define HAVE_INT128_T 0
+#endif
+
+namespace __ubsan {
+
+/// \brief Largest integer types we support.
+#if HAVE_INT128_T
+typedef s128 SIntMax;
+typedef u128 UIntMax;
+#else
+typedef s64 SIntMax;
+typedef u64 UIntMax;
+#endif
+
+/// \brief Largest floating-point type we support.
+typedef long double FloatMax;
+
+/// \brief A description of a source location. This corresponds to Clang's
+/// \c PresumedLoc type.
+class SourceLocation {
+ const char *Filename;
+ u32 Line;
+ u32 Column;
+
+public:
+ SourceLocation() : Filename(), Line(), Column() {}
+ SourceLocation(const char *Filename, unsigned Line, unsigned Column)
+ : Filename(Filename), Line(Line), Column(Column) {}
+
+ /// \brief Determine whether the source location is known.
+ bool isInvalid() const { return !Filename; }
+
+ /// \brief Atomically acquire a copy, disabling original in-place.
+ /// Exactly one call to acquire() returns a copy that isn't disabled.
+ SourceLocation acquire() {
+ u32 OldColumn = __sanitizer::atomic_exchange(
+ (__sanitizer::atomic_uint32_t *)&Column, ~u32(0),
+ __sanitizer::memory_order_relaxed);
+ return SourceLocation(Filename, Line, OldColumn);
+ }
+
+ /// \brief Determine if this Location has been disabled.
+ /// Disabled SourceLocations are invalid to use.
+ bool isDisabled() {
+ return Column == ~u32(0);
+ }
+
+ /// \brief Get the presumed filename for the source location.
+ const char *getFilename() const { return Filename; }
+ /// \brief Get the presumed line number.
+ unsigned getLine() const { return Line; }
+ /// \brief Get the column within the presumed line.
+ unsigned getColumn() const { return Column; }
+};
+
+
+/// \brief A description of a type.
+class TypeDescriptor {
+ /// A value from the \c Kind enumeration, specifying what flavor of type we
+ /// have.
+ u16 TypeKind;
+
+ /// A \c Type-specific value providing information which allows us to
+ /// interpret the meaning of a ValueHandle of this type.
+ u16 TypeInfo;
+
+ /// The name of the type follows, in a format suitable for including in
+ /// diagnostics.
+ char TypeName[1];
+
+public:
+ enum Kind {
+ /// An integer type. Lowest bit is 1 for a signed value, 0 for an unsigned
+ /// value. Remaining bits are log_2(bit width). The value representation is
+ /// the integer itself if it fits into a ValueHandle, and a pointer to the
+ /// integer otherwise.
+ TK_Integer = 0x0000,
+ /// A floating-point type. Low 16 bits are bit width. The value
+ /// representation is that of bitcasting the floating-point value to an
+ /// integer type.
+ TK_Float = 0x0001,
+ /// Any other type. The value representation is unspecified.
+ TK_Unknown = 0xffff
+ };
+
+ const char *getTypeName() const { return TypeName; }
+
+ Kind getKind() const {
+ return static_cast<Kind>(TypeKind);
+ }
+
+ bool isIntegerTy() const { return getKind() == TK_Integer; }
+ bool isSignedIntegerTy() const {
+ return isIntegerTy() && (TypeInfo & 1);
+ }
+ bool isUnsignedIntegerTy() const {
+ return isIntegerTy() && !(TypeInfo & 1);
+ }
+ unsigned getIntegerBitWidth() const {
+ CHECK(isIntegerTy());
+ return 1 << (TypeInfo >> 1);
+ }
+
+ bool isFloatTy() const { return getKind() == TK_Float; }
+ unsigned getFloatBitWidth() const {
+ CHECK(isFloatTy());
+ return TypeInfo;
+ }
+};
+
+/// \brief An opaque handle to a value.
+typedef uptr ValueHandle;
+
+
+/// \brief Representation of an operand value provided by the instrumented code.
+///
+/// This is a combination of a TypeDescriptor (which is emitted as constant data
+/// as an operand to a handler function) and a ValueHandle (which is passed at
+/// runtime when a check failure occurs).
+class Value {
+ /// The type of the value.
+ const TypeDescriptor &Type;
+ /// The encoded value itself.
+ ValueHandle Val;
+
+ /// Is \c Val a (zero-extended) integer?
+ bool isInlineInt() const {
+ CHECK(getType().isIntegerTy());
+ const unsigned InlineBits = sizeof(ValueHandle) * 8;
+ const unsigned Bits = getType().getIntegerBitWidth();
+ return Bits <= InlineBits;
+ }
+
+ /// Is \c Val a (zero-extended) integer representation of a float?
+ bool isInlineFloat() const {
+ CHECK(getType().isFloatTy());
+ const unsigned InlineBits = sizeof(ValueHandle) * 8;
+ const unsigned Bits = getType().getFloatBitWidth();
+ return Bits <= InlineBits;
+ }
+
+public:
+ Value(const TypeDescriptor &Type, ValueHandle Val) : Type(Type), Val(Val) {}
+
+ const TypeDescriptor &getType() const { return Type; }
+
+ /// \brief Get this value as a signed integer.
+ SIntMax getSIntValue() const;
+
+ /// \brief Get this value as an unsigned integer.
+ UIntMax getUIntValue() const;
+
+ /// \brief Decode this value, which must be a positive or unsigned integer.
+ UIntMax getPositiveIntValue() const;
+
+ /// Is this an integer with value -1?
+ bool isMinusOne() const {
+ return getType().isSignedIntegerTy() && getSIntValue() == -1;
+ }
+
+ /// Is this a negative integer?
+ bool isNegative() const {
+ return getType().isSignedIntegerTy() && getSIntValue() < 0;
+ }
+
+ /// \brief Get this value as a floating-point quantity.
+ FloatMax getFloatValue() const;
+};
+
+} // namespace __ubsan
+
+#endif // UBSAN_VALUE_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_dll_thunk.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_dll_thunk.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_dll_thunk.cc (revision 351984)
@@ -0,0 +1,20 @@
+//===-- ubsan_win_dll_thunk.cc --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines a family of thunks that should be statically linked into
+// the DLLs that have instrumentation in order to delegate the calls to the
+// shared runtime that lives in the main binary.
+// See https://github.com/google/sanitizers/issues/209 for the details.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DLL_THUNK
+#include "sanitizer_common/sanitizer_win_dll_thunk.h"
+// Ubsan interface functions.
+#define INTERFACE_FUNCTION(Name) INTERCEPT_SANITIZER_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "ubsan_interface.inc"
+#endif // SANITIZER_DLL_THUNK
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_dll_thunk.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc (revision 351984)
@@ -0,0 +1,20 @@
+//===-- ubsan_win_dynamic_runtime_thunk.cc --------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines things that need to be present in the application modules
+// to interact with Ubsan, when it is included in a dll.
+//
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC_RUNTIME_THUNK
+#define SANITIZER_IMPORT_INTERFACE 1
+#include "sanitizer_common/sanitizer_win_defs.h"
+// Define weak alias for all weak functions imported from ubsan.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) WIN_WEAK_IMPORT_DEF(Name)
+#include "ubsan_interface.inc"
+#endif // SANITIZER_DYNAMIC_RUNTIME_THUNK
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_dynamic_runtime_thunk.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_weak_interception.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_weak_interception.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_weak_interception.cc (revision 351984)
@@ -0,0 +1,23 @@
+//===-- ubsan_win_weak_interception.cc ------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+// This module should be included in Ubsan when it is implemented as a shared
+// library on Windows (dll), in order to delegate the calls of weak functions to
+// the implementation in the main executable when a strong definition is
+// provided.
+//===----------------------------------------------------------------------===//
+#ifdef SANITIZER_DYNAMIC
+#include "sanitizer_common/sanitizer_win_weak_interception.h"
+#include "ubsan_flags.h"
+#include "ubsan_monitor.h"
+// Check if strong definitions for weak functions are present in the main
+// executable. If that is the case, override dll functions to point to strong
+// implementations.
+#define INTERFACE_FUNCTION(Name)
+#define INTERFACE_WEAK_FUNCTION(Name) INTERCEPT_SANITIZER_WEAK_FUNCTION(Name)
+#include "ubsan_interface.inc"
+#endif // SANITIZER_DYNAMIC
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan_win_weak_interception.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/weak_symbols.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/weak_symbols.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/weak_symbols.txt (revision 351984)
@@ -0,0 +1 @@
+___ubsan_default_options
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/weak_symbols.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan.syms.extra
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan.syms.extra (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan/ubsan.syms.extra (revision 351984)
@@ -0,0 +1 @@
+__ubsan_*
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_AArch64.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_AArch64.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_AArch64.cc (revision 351984)
@@ -0,0 +1,127 @@
+//===-- xray_AArch64.cc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of AArch64-specific routines (64-bit).
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+#include <cassert>
+
+extern "C" void __clear_cache(void *start, void *end);
+
+namespace __xray {
+
+// The machine codes for some instructions used in runtime patching.
+enum class PatchOpcodes : uint32_t {
+ PO_StpX0X30SP_m16e = 0xA9BF7BE0, // STP X0, X30, [SP, #-16]!
+ PO_LdrW0_12 = 0x18000060, // LDR W0, #12
+ PO_LdrX16_12 = 0x58000070, // LDR X16, #12
+ PO_BlrX16 = 0xD63F0200, // BLR X16
+ PO_LdpX0X30SP_16 = 0xA8C17BE0, // LDP X0, X30, [SP], #16
+ PO_B32 = 0x14000008 // B #32
+};
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // xray_sled_n:
+ // B #32
+ // 7 NOPs (24 bytes)
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n:
+ // STP X0, X30, [SP, #-16]! ; PUSH {r0, lr}
+ // LDR W0, #12 ; W0 := function ID
+ // LDR X16,#12 ; X16 := address of the trampoline
+ // BLR X16
+ // ;DATA: 32 bits of function ID
+ // ;DATA: lower 32 bits of the address of the trampoline
+ // ;DATA: higher 32 bits of the address of the trampoline
+ // LDP X0, X30, [SP], #16 ; POP {r0, lr}
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // B #32
+
+ uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.Address);
+ uint32_t *CurAddress = FirstAddress + 1;
+ if (Enable) {
+ *CurAddress = uint32_t(PatchOpcodes::PO_LdrW0_12);
+ CurAddress++;
+ *CurAddress = uint32_t(PatchOpcodes::PO_LdrX16_12);
+ CurAddress++;
+ *CurAddress = uint32_t(PatchOpcodes::PO_BlrX16);
+ CurAddress++;
+ *CurAddress = FuncId;
+ CurAddress++;
+ *reinterpret_cast<void (**)()>(CurAddress) = TracingHook;
+ CurAddress += 2;
+ *CurAddress = uint32_t(PatchOpcodes::PO_LdpX0X30SP_16);
+ CurAddress++;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
+ uint32_t(PatchOpcodes::PO_StpX0X30SP_m16e), std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
+ uint32_t(PatchOpcodes::PO_B32), std::memory_order_release);
+ }
+ __clear_cache(reinterpret_cast<char *>(FirstAddress),
+ reinterpret_cast<char *>(CurAddress));
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionTailExit);
+}
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled)
+ XRAY_NEVER_INSTRUMENT { // FIXME: Implement in aarch64?
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in aarch64?
+ return false;
+}
+
+// FIXME: Maybe implement this better?
+bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
+
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_AArch64.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_allocator.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_allocator.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_allocator.h (revision 351984)
@@ -0,0 +1,288 @@
+//===-- xray_allocator.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Defines the allocator interface for an arena allocator, used primarily for
+// the profiling runtime.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_ALLOCATOR_H
+#define XRAY_ALLOCATOR_H
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#if SANITIZER_FUCHSIA
+#include <zircon/process.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+#else
+#include "sanitizer_common/sanitizer_posix.h"
+#endif
+#include "xray_defs.h"
+#include "xray_utils.h"
+#include <cstddef>
+#include <cstdint>
+#include <sys/mman.h>
+
+namespace __xray {
+
+// We implement our own memory allocation routine which will bypass the
+// internal allocator. This allows us to manage the memory directly, using
+// mmap'ed memory to back the allocators.
+template <class T> T *allocate() XRAY_NEVER_INSTRUMENT {
+ uptr RoundedSize = RoundUpTo(sizeof(T), GetPageSizeCached());
+#if SANITIZER_FUCHSIA
+ zx_handle_t Vmo;
+ zx_status_t Status = _zx_vmo_create(RoundedSize, 0, &Vmo);
+ if (Status != ZX_OK) {
+ if (Verbosity())
+ Report("XRay Profiling: Failed to create VMO of size %zu: %s\n",
+ sizeof(T), _zx_status_get_string(Status));
+ return nullptr;
+ }
+ uintptr_t B;
+ Status =
+ _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0,
+ Vmo, 0, sizeof(T), &B);
+ _zx_handle_close(Vmo);
+ if (Status != ZX_OK) {
+ if (Verbosity())
+ Report("XRay Profiling: Failed to map VMAR of size %zu: %s\n", sizeof(T),
+ _zx_status_get_string(Status));
+ return nullptr;
+ }
+ return reinterpret_cast<T *>(B);
+#else
+ uptr B = internal_mmap(NULL, RoundedSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ int ErrNo = 0;
+ if (UNLIKELY(internal_iserror(B, &ErrNo))) {
+ if (Verbosity())
+ Report(
+ "XRay Profiling: Failed to allocate memory of size %d; Error = %d.\n",
+ RoundedSize, B);
+ return nullptr;
+ }
+#endif
+ return reinterpret_cast<T *>(B);
+}
+
+template <class T> void deallocate(T *B) XRAY_NEVER_INSTRUMENT {
+ if (B == nullptr)
+ return;
+ uptr RoundedSize = RoundUpTo(sizeof(T), GetPageSizeCached());
+#if SANITIZER_FUCHSIA
+ _zx_vmar_unmap(_zx_vmar_root_self(), reinterpret_cast<uintptr_t>(B),
+ RoundedSize);
+#else
+ internal_munmap(B, RoundedSize);
+#endif
+}
+
+template <class T = unsigned char>
+T *allocateBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
+ uptr RoundedSize = RoundUpTo(S * sizeof(T), GetPageSizeCached());
+#if SANITIZER_FUCHSIA
+ zx_handle_t Vmo;
+ zx_status_t Status = _zx_vmo_create(RoundedSize, 0, &Vmo);
+ if (Status != ZX_OK) {
+ if (Verbosity())
+ Report("XRay Profiling: Failed to create VMO of size %zu: %s\n", S,
+ _zx_status_get_string(Status));
+ return nullptr;
+ }
+ uintptr_t B;
+ Status = _zx_vmar_map(_zx_vmar_root_self(),
+ ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, Vmo, 0, S, &B);
+ _zx_handle_close(Vmo);
+ if (Status != ZX_OK) {
+ if (Verbosity())
+ Report("XRay Profiling: Failed to map VMAR of size %zu: %s\n", S,
+ _zx_status_get_string(Status));
+ return nullptr;
+ }
+#else
+ uptr B = internal_mmap(NULL, RoundedSize, PROT_READ | PROT_WRITE,
+ MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+ int ErrNo = 0;
+ if (UNLIKELY(internal_iserror(B, &ErrNo))) {
+ if (Verbosity())
+ Report(
+ "XRay Profiling: Failed to allocate memory of size %d; Error = %d.\n",
+ RoundedSize, B);
+ return nullptr;
+ }
+#endif
+ return reinterpret_cast<T *>(B);
+}
+
+template <class T> void deallocateBuffer(T *B, size_t S) XRAY_NEVER_INSTRUMENT {
+ if (B == nullptr)
+ return;
+ uptr RoundedSize = RoundUpTo(S * sizeof(T), GetPageSizeCached());
+#if SANITIZER_FUCHSIA
+ _zx_vmar_unmap(_zx_vmar_root_self(), reinterpret_cast<uintptr_t>(B),
+ RoundedSize);
+#else
+ internal_munmap(B, RoundedSize);
+#endif
+}
+
+template <class T, class... U>
+T *initArray(size_t N, U &&... Us) XRAY_NEVER_INSTRUMENT {
+ auto A = allocateBuffer<T>(N);
+ if (A != nullptr)
+ while (N > 0)
+ new (A + (--N)) T(std::forward<U>(Us)...);
+ return A;
+}
+
+/// The Allocator type hands out fixed-sized chunks of memory that are
+/// cache-line aligned and sized. This is useful for placement of
+/// performance-sensitive data in memory that's frequently accessed. The
+/// allocator also self-limits the peak memory usage to a dynamically defined
+/// maximum.
+///
+/// N is the lower-bound size of the block of memory to return from the
+/// allocation function. N is used to compute the size of a block, which is
+/// cache-line-size multiples worth of memory. We compute the size of a block by
+/// determining how many cache lines worth of memory is required to subsume N.
+///
+/// The Allocator instance will manage its own memory acquired through mmap.
+/// This severely constrains the platforms on which this can be used to POSIX
+/// systems where mmap semantics are well-defined.
+///
+/// FIXME: Isolate the lower-level memory management to a different abstraction
+/// that can be platform-specific.
+template <size_t N> struct Allocator {
+ // The Allocator returns memory as Block instances.
+ struct Block {
+ /// Compute the minimum cache-line size multiple that is >= N.
+ static constexpr auto Size = nearest_boundary(N, kCacheLineSize);
+ void *Data;
+ };
+
+private:
+ size_t MaxMemory{0};
+ unsigned char *BackingStore = nullptr;
+ unsigned char *AlignedNextBlock = nullptr;
+ size_t AllocatedBlocks = 0;
+ bool Owned;
+ SpinMutex Mutex{};
+
+ void *Alloc() XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Lock(&Mutex);
+ if (UNLIKELY(BackingStore == nullptr)) {
+ BackingStore = allocateBuffer(MaxMemory);
+ if (BackingStore == nullptr) {
+ if (Verbosity())
+ Report("XRay Profiling: Failed to allocate memory for allocator.\n");
+ return nullptr;
+ }
+
+ AlignedNextBlock = BackingStore;
+
+ // Ensure that NextBlock is aligned appropriately.
+ auto BackingStoreNum = reinterpret_cast<uintptr_t>(BackingStore);
+ auto AlignedNextBlockNum = nearest_boundary(
+ reinterpret_cast<uintptr_t>(AlignedNextBlock), kCacheLineSize);
+ if (diff(AlignedNextBlockNum, BackingStoreNum) > ptrdiff_t(MaxMemory)) {
+ deallocateBuffer(BackingStore, MaxMemory);
+ AlignedNextBlock = BackingStore = nullptr;
+ if (Verbosity())
+ Report("XRay Profiling: Cannot obtain enough memory from "
+ "preallocated region.\n");
+ return nullptr;
+ }
+
+ AlignedNextBlock = reinterpret_cast<unsigned char *>(AlignedNextBlockNum);
+
+ // Assert that AlignedNextBlock is cache-line aligned.
+ DCHECK_EQ(reinterpret_cast<uintptr_t>(AlignedNextBlock) % kCacheLineSize,
+ 0);
+ }
+
+ if (((AllocatedBlocks + 1) * Block::Size) > MaxMemory)
+ return nullptr;
+
+ // Align the pointer we'd like to return to an appropriate alignment, then
+ // advance the pointer from where to start allocations.
+ void *Result = AlignedNextBlock;
+ AlignedNextBlock =
+ reinterpret_cast<unsigned char *>(AlignedNextBlock) + Block::Size;
+ ++AllocatedBlocks;
+ return Result;
+ }
+
+public:
+ explicit Allocator(size_t M) XRAY_NEVER_INSTRUMENT
+ : MaxMemory(RoundUpTo(M, kCacheLineSize)),
+ BackingStore(nullptr),
+ AlignedNextBlock(nullptr),
+ AllocatedBlocks(0),
+ Owned(true),
+ Mutex() {}
+
+ explicit Allocator(void *P, size_t M) XRAY_NEVER_INSTRUMENT
+ : MaxMemory(M),
+ BackingStore(reinterpret_cast<unsigned char *>(P)),
+ AlignedNextBlock(reinterpret_cast<unsigned char *>(P)),
+ AllocatedBlocks(0),
+ Owned(false),
+ Mutex() {}
+
+ Allocator(const Allocator &) = delete;
+ Allocator &operator=(const Allocator &) = delete;
+
+ Allocator(Allocator &&O) XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock L0(&Mutex);
+ SpinMutexLock L1(&O.Mutex);
+ MaxMemory = O.MaxMemory;
+ O.MaxMemory = 0;
+ BackingStore = O.BackingStore;
+ O.BackingStore = nullptr;
+ AlignedNextBlock = O.AlignedNextBlock;
+ O.AlignedNextBlock = nullptr;
+ AllocatedBlocks = O.AllocatedBlocks;
+ O.AllocatedBlocks = 0;
+ Owned = O.Owned;
+ O.Owned = false;
+ }
+
+ Allocator &operator=(Allocator &&O) XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock L0(&Mutex);
+ SpinMutexLock L1(&O.Mutex);
+ MaxMemory = O.MaxMemory;
+ O.MaxMemory = 0;
+ if (BackingStore != nullptr)
+ deallocateBuffer(BackingStore, MaxMemory);
+ BackingStore = O.BackingStore;
+ O.BackingStore = nullptr;
+ AlignedNextBlock = O.AlignedNextBlock;
+ O.AlignedNextBlock = nullptr;
+ AllocatedBlocks = O.AllocatedBlocks;
+ O.AllocatedBlocks = 0;
+ Owned = O.Owned;
+ O.Owned = false;
+ return *this;
+ }
+
+ Block Allocate() XRAY_NEVER_INSTRUMENT { return {Alloc()}; }
+
+ ~Allocator() NOEXCEPT XRAY_NEVER_INSTRUMENT {
+ if (Owned && BackingStore != nullptr) {
+ deallocateBuffer(BackingStore, MaxMemory);
+ }
+ }
+};
+
+} // namespace __xray
+
+#endif // XRAY_ALLOCATOR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_allocator.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_arm.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_arm.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_arm.cc (revision 351984)
@@ -0,0 +1,164 @@
+//===-- xray_arm.cc ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of ARM-specific routines (32-bit).
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+#include <cassert>
+
+extern "C" void __clear_cache(void *start, void *end);
+
+namespace __xray {
+
+// The machine codes for some instructions used in runtime patching.
+enum class PatchOpcodes : uint32_t {
+ PO_PushR0Lr = 0xE92D4001, // PUSH {r0, lr}
+ PO_BlxIp = 0xE12FFF3C, // BLX ip
+ PO_PopR0Lr = 0xE8BD4001, // POP {r0, lr}
+ PO_B20 = 0xEA000005 // B #20
+};
+
+// 0xUUUUWXYZ -> 0x000W0XYZ
+inline static uint32_t getMovwMask(const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ return (Value & 0xfff) | ((Value & 0xf000) << 4);
+}
+
+// 0xWXYZUUUU -> 0x000W0XYZ
+inline static uint32_t getMovtMask(const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ return getMovwMask(Value >> 16);
+}
+
+// Writes the following instructions:
+// MOVW R<regNo>, #<lower 16 bits of the |Value|>
+// MOVT R<regNo>, #<higher 16 bits of the |Value|>
+inline static uint32_t *
+write32bitLoadReg(uint8_t regNo, uint32_t *Address,
+ const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ // This is a fatal error: we cannot just report it and continue execution.
+ assert(regNo <= 15 && "Register number must be 0 to 15.");
+ // MOVW R, #0xWXYZ in machine code is 0xE30WRXYZ
+ *Address = (0xE3000000 | (uint32_t(regNo) << 12) | getMovwMask(Value));
+ Address++;
+ // MOVT R, #0xWXYZ in machine code is 0xE34WRXYZ
+ *Address = (0xE3400000 | (uint32_t(regNo) << 12) | getMovtMask(Value));
+ return Address + 1;
+}
+
+// Writes the following instructions:
+// MOVW r0, #<lower 16 bits of the |Value|>
+// MOVT r0, #<higher 16 bits of the |Value|>
+inline static uint32_t *
+write32bitLoadR0(uint32_t *Address,
+ const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ return write32bitLoadReg(0, Address, Value);
+}
+
+// Writes the following instructions:
+// MOVW ip, #<lower 16 bits of the |Value|>
+// MOVT ip, #<higher 16 bits of the |Value|>
+inline static uint32_t *
+write32bitLoadIP(uint32_t *Address,
+ const uint32_t Value) XRAY_NEVER_INSTRUMENT {
+ return write32bitLoadReg(12, Address, Value);
+}
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // xray_sled_n:
+ // B #20
+ // 6 NOPs (24 bytes)
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n:
+ // PUSH {r0, lr}
+ // MOVW r0, #<lower 16 bits of function ID>
+ // MOVT r0, #<higher 16 bits of function ID>
+ // MOVW ip, #<lower 16 bits of address of TracingHook>
+ // MOVT ip, #<higher 16 bits of address of TracingHook>
+ // BLX ip
+ // POP {r0, lr}
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // B #20
+
+ uint32_t *FirstAddress = reinterpret_cast<uint32_t *>(Sled.Address);
+ uint32_t *CurAddress = FirstAddress + 1;
+ if (Enable) {
+ CurAddress =
+ write32bitLoadR0(CurAddress, reinterpret_cast<uint32_t>(FuncId));
+ CurAddress =
+ write32bitLoadIP(CurAddress, reinterpret_cast<uint32_t>(TracingHook));
+ *CurAddress = uint32_t(PatchOpcodes::PO_BlxIp);
+ CurAddress++;
+ *CurAddress = uint32_t(PatchOpcodes::PO_PopR0Lr);
+ CurAddress++;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
+ uint32_t(PatchOpcodes::PO_PushR0Lr), std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(FirstAddress),
+ uint32_t(PatchOpcodes::PO_B20), std::memory_order_release);
+ }
+ __clear_cache(reinterpret_cast<char *>(FirstAddress),
+ reinterpret_cast<char *>(CurAddress));
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionTailExit);
+}
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled)
+ XRAY_NEVER_INSTRUMENT { // FIXME: Implement in arm?
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in arm?
+ return false;
+}
+
+// FIXME: Maybe implement this better?
+bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
+
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_arm.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.cc (revision 351984)
@@ -0,0 +1,49 @@
+//===-- xray_basic_flags.cc -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// XRay Basic flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "xray_basic_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "xray_defs.h"
+
+using namespace __sanitizer;
+
+namespace __xray {
+
+/// Use via basicFlags().
+BasicFlags xray_basic_flags_dont_use_directly;
+
+void BasicFlags::setDefaults() XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "xray_basic_flags.inc"
+#undef XRAY_FLAG
+}
+
+void registerXRayBasicFlags(FlagParser *P,
+ BasicFlags *F) XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(P, #Name, Description, &F->Name);
+#include "xray_basic_flags.inc"
+#undef XRAY_FLAG
+}
+
+const char *useCompilerDefinedBasicFlags() XRAY_NEVER_INSTRUMENT {
+#ifdef XRAY_BASIC_OPTIONS
+ return SANITIZER_STRINGIFY(XRAY_BASIC_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.h (revision 351984)
@@ -0,0 +1,37 @@
+//===-- xray_basic_flags.h -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instruementation system.
+//
+// XRay Basic Mode runtime flags.
+//===----------------------------------------------------------------------===//
+
+#ifndef XRAY_BASIC_FLAGS_H
+#define XRAY_BASIC_FLAGS_H
+
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __xray {
+
+struct BasicFlags {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "xray_basic_flags.inc"
+#undef XRAY_FLAG
+
+ void setDefaults();
+};
+
+extern BasicFlags xray_basic_flags_dont_use_directly;
+extern void registerXRayBasicFlags(FlagParser *P, BasicFlags *F);
+const char *useCompilerDefinedBasicFlags();
+inline BasicFlags *basicFlags() { return &xray_basic_flags_dont_use_directly; }
+
+} // namespace __xray
+
+#endif // XRAY_BASIC_FLAGS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.inc (revision 351984)
@@ -0,0 +1,23 @@
+//===-- xray_basic_flags.inc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// XRay runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_FLAG
+#error "Define XRAY_FLAG prior to including this file!"
+#endif
+
+XRAY_FLAG(int, func_duration_threshold_us, 5,
+ "Basic logging will try to skip functions that execute for fewer "
+ "microseconds than this threshold.")
+XRAY_FLAG(int, max_stack_depth, 64,
+ "Basic logging will keep track of at most this deep a call stack, "
+ "any more and the recordings will be dropped.")
+XRAY_FLAG(int, thread_buffer_size, 1024,
+ "The number of entries to keep on a per-thread buffer.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_logging.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_logging.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_logging.cc (revision 351984)
@@ -0,0 +1,515 @@
+//===-- xray_basic_logging.cc -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of a simple in-memory log of XRay events. This defines a
+// logging function that's compatible with the XRay handler interface, and
+// routines for exporting data to files.
+//
+//===----------------------------------------------------------------------===//
+
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <sys/stat.h>
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_MAC
+#include <sys/syscall.h>
+#endif
+#include <sys/types.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "xray/xray_records.h"
+#include "xray_recursion_guard.h"
+#include "xray_basic_flags.h"
+#include "xray_basic_logging.h"
+#include "xray_defs.h"
+#include "xray_flags.h"
+#include "xray_interface_internal.h"
+#include "xray_tsc.h"
+#include "xray_utils.h"
+
+namespace __xray {
+
+static SpinMutex LogMutex;
+
+namespace {
+// We use elements of this type to record the entry TSC of every function ID we
+// see as we're tracing a particular thread's execution.
+struct alignas(16) StackEntry {
+ int32_t FuncId;
+ uint16_t Type;
+ uint8_t CPU;
+ uint8_t Padding;
+ uint64_t TSC;
+};
+
+static_assert(sizeof(StackEntry) == 16, "Wrong size for StackEntry");
+
+struct XRAY_TLS_ALIGNAS(64) ThreadLocalData {
+ void *InMemoryBuffer = nullptr;
+ size_t BufferSize = 0;
+ size_t BufferOffset = 0;
+ void *ShadowStack = nullptr;
+ size_t StackSize = 0;
+ size_t StackEntries = 0;
+ __xray::LogWriter *LogWriter = nullptr;
+};
+
+struct BasicLoggingOptions {
+ int DurationFilterMicros = 0;
+ size_t MaxStackDepth = 0;
+ size_t ThreadBufferSize = 0;
+};
+} // namespace
+
+static pthread_key_t PThreadKey;
+
+static atomic_uint8_t BasicInitialized{0};
+
+struct BasicLoggingOptions GlobalOptions;
+
+thread_local atomic_uint8_t Guard{0};
+
+static atomic_uint8_t UseRealTSC{0};
+static atomic_uint64_t ThresholdTicks{0};
+static atomic_uint64_t TicksPerSec{0};
+static atomic_uint64_t CycleFrequency{NanosecondsPerSecond};
+
+static LogWriter *getLog() XRAY_NEVER_INSTRUMENT {
+ LogWriter* LW = LogWriter::Open();
+ if (LW == nullptr)
+ return LW;
+
+ static pthread_once_t DetectOnce = PTHREAD_ONCE_INIT;
+ pthread_once(&DetectOnce, +[] {
+ if (atomic_load(&UseRealTSC, memory_order_acquire))
+ atomic_store(&CycleFrequency, getTSCFrequency(), memory_order_release);
+ });
+
+ // Since we're here, we get to write the header. We set it up so that the
+ // header will only be written once, at the start, and let the threads
+ // logging do writes which just append.
+ XRayFileHeader Header;
+ // Version 2 includes tail exit records.
+ // Version 3 includes pid inside records.
+ Header.Version = 3;
+ Header.Type = FileTypes::NAIVE_LOG;
+ Header.CycleFrequency = atomic_load(&CycleFrequency, memory_order_acquire);
+
+ // FIXME: Actually check whether we have 'constant_tsc' and 'nonstop_tsc'
+ // before setting the values in the header.
+ Header.ConstantTSC = 1;
+ Header.NonstopTSC = 1;
+ LW->WriteAll(reinterpret_cast<char *>(&Header),
+ reinterpret_cast<char *>(&Header) + sizeof(Header));
+ return LW;
+}
+
+static LogWriter *getGlobalLog() XRAY_NEVER_INSTRUMENT {
+ static pthread_once_t OnceInit = PTHREAD_ONCE_INIT;
+ static LogWriter *LW = nullptr;
+ pthread_once(&OnceInit, +[] { LW = getLog(); });
+ return LW;
+}
+
+static ThreadLocalData &getThreadLocalData() XRAY_NEVER_INSTRUMENT {
+ thread_local ThreadLocalData TLD;
+ thread_local bool UNUSED TOnce = [] {
+ if (GlobalOptions.ThreadBufferSize == 0) {
+ if (Verbosity())
+ Report("Not initializing TLD since ThreadBufferSize == 0.\n");
+ return false;
+ }
+ pthread_setspecific(PThreadKey, &TLD);
+ TLD.LogWriter = getGlobalLog();
+ TLD.InMemoryBuffer = reinterpret_cast<XRayRecord *>(
+ InternalAlloc(sizeof(XRayRecord) * GlobalOptions.ThreadBufferSize,
+ nullptr, alignof(XRayRecord)));
+ TLD.BufferSize = GlobalOptions.ThreadBufferSize;
+ TLD.BufferOffset = 0;
+ if (GlobalOptions.MaxStackDepth == 0) {
+ if (Verbosity())
+ Report("Not initializing the ShadowStack since MaxStackDepth == 0.\n");
+ TLD.StackSize = 0;
+ TLD.StackEntries = 0;
+ TLD.ShadowStack = nullptr;
+ return false;
+ }
+ TLD.ShadowStack = reinterpret_cast<StackEntry *>(
+ InternalAlloc(sizeof(StackEntry) * GlobalOptions.MaxStackDepth, nullptr,
+ alignof(StackEntry)));
+ TLD.StackSize = GlobalOptions.MaxStackDepth;
+ TLD.StackEntries = 0;
+ return false;
+ }();
+ return TLD;
+}
+
+template <class RDTSC>
+void InMemoryRawLog(int32_t FuncId, XRayEntryType Type,
+ RDTSC ReadTSC) XRAY_NEVER_INSTRUMENT {
+ auto &TLD = getThreadLocalData();
+ LogWriter *LW = getGlobalLog();
+ if (LW == nullptr)
+ return;
+
+ // Use a simple recursion guard, to handle cases where we're already logging
+ // and for one reason or another, this function gets called again in the same
+ // thread.
+ RecursionGuard G(Guard);
+ if (!G)
+ return;
+
+ uint8_t CPU = 0;
+ uint64_t TSC = ReadTSC(CPU);
+
+ switch (Type) {
+ case XRayEntryType::ENTRY:
+ case XRayEntryType::LOG_ARGS_ENTRY: {
+ // Short circuit if we've reached the maximum depth of the stack.
+ if (TLD.StackEntries++ >= TLD.StackSize)
+ return;
+
+ // When we encounter an entry event, we keep track of the TSC and the CPU,
+ // and put it in the stack.
+ StackEntry E;
+ E.FuncId = FuncId;
+ E.CPU = CPU;
+ E.Type = Type;
+ E.TSC = TSC;
+ auto StackEntryPtr = static_cast<char *>(TLD.ShadowStack) +
+ (sizeof(StackEntry) * (TLD.StackEntries - 1));
+ internal_memcpy(StackEntryPtr, &E, sizeof(StackEntry));
+ break;
+ }
+ case XRayEntryType::EXIT:
+ case XRayEntryType::TAIL: {
+ if (TLD.StackEntries == 0)
+ break;
+
+ if (--TLD.StackEntries >= TLD.StackSize)
+ return;
+
+ // When we encounter an exit event, we check whether all the following are
+ // true:
+ //
+ // - The Function ID is the same as the most recent entry in the stack.
+ // - The CPU is the same as the most recent entry in the stack.
+ // - The Delta of the TSCs is less than the threshold amount of time we're
+ // looking to record.
+ //
+ // If all of these conditions are true, we pop the stack and don't write a
+ // record and move the record offset back.
+ StackEntry StackTop;
+ auto StackEntryPtr = static_cast<char *>(TLD.ShadowStack) +
+ (sizeof(StackEntry) * TLD.StackEntries);
+ internal_memcpy(&StackTop, StackEntryPtr, sizeof(StackEntry));
+ if (StackTop.FuncId == FuncId && StackTop.CPU == CPU &&
+ StackTop.TSC < TSC) {
+ auto Delta = TSC - StackTop.TSC;
+ if (Delta < atomic_load(&ThresholdTicks, memory_order_relaxed)) {
+ DCHECK(TLD.BufferOffset > 0);
+ TLD.BufferOffset -= StackTop.Type == XRayEntryType::ENTRY ? 1 : 2;
+ return;
+ }
+ }
+ break;
+ }
+ default:
+ // Should be unreachable.
+ DCHECK(false && "Unsupported XRayEntryType encountered.");
+ break;
+ }
+
+ // First determine whether the delta between the function's enter record and
+ // the exit record is higher than the threshold.
+ XRayRecord R;
+ R.RecordType = RecordTypes::NORMAL;
+ R.CPU = CPU;
+ R.TSC = TSC;
+ R.TId = GetTid();
+ R.PId = internal_getpid();
+ R.Type = Type;
+ R.FuncId = FuncId;
+ auto FirstEntry = reinterpret_cast<XRayRecord *>(TLD.InMemoryBuffer);
+ internal_memcpy(FirstEntry + TLD.BufferOffset, &R, sizeof(R));
+ if (++TLD.BufferOffset == TLD.BufferSize) {
+ SpinMutexLock Lock(&LogMutex);
+ LW->WriteAll(reinterpret_cast<char *>(FirstEntry),
+ reinterpret_cast<char *>(FirstEntry + TLD.BufferOffset));
+ TLD.BufferOffset = 0;
+ TLD.StackEntries = 0;
+ }
+}
+
+template <class RDTSC>
+void InMemoryRawLogWithArg(int32_t FuncId, XRayEntryType Type, uint64_t Arg1,
+ RDTSC ReadTSC) XRAY_NEVER_INSTRUMENT {
+ auto &TLD = getThreadLocalData();
+ auto FirstEntry =
+ reinterpret_cast<XRayArgPayload *>(TLD.InMemoryBuffer);
+ const auto &BuffLen = TLD.BufferSize;
+ LogWriter *LW = getGlobalLog();
+ if (LW == nullptr)
+ return;
+
+ // First we check whether there's enough space to write the data consecutively
+ // in the thread-local buffer. If not, we first flush the buffer before
+ // attempting to write the two records that must be consecutive.
+ if (TLD.BufferOffset + 2 > BuffLen) {
+ SpinMutexLock Lock(&LogMutex);
+ LW->WriteAll(reinterpret_cast<char *>(FirstEntry),
+ reinterpret_cast<char *>(FirstEntry + TLD.BufferOffset));
+ TLD.BufferOffset = 0;
+ TLD.StackEntries = 0;
+ }
+
+ // Then we write the "we have an argument" record.
+ InMemoryRawLog(FuncId, Type, ReadTSC);
+
+ RecursionGuard G(Guard);
+ if (!G)
+ return;
+
+ // And, from here on write the arg payload.
+ XRayArgPayload R;
+ R.RecordType = RecordTypes::ARG_PAYLOAD;
+ R.FuncId = FuncId;
+ R.TId = GetTid();
+ R.PId = internal_getpid();
+ R.Arg = Arg1;
+ internal_memcpy(FirstEntry + TLD.BufferOffset, &R, sizeof(R));
+ if (++TLD.BufferOffset == BuffLen) {
+ SpinMutexLock Lock(&LogMutex);
+ LW->WriteAll(reinterpret_cast<char *>(FirstEntry),
+ reinterpret_cast<char *>(FirstEntry + TLD.BufferOffset));
+ TLD.BufferOffset = 0;
+ TLD.StackEntries = 0;
+ }
+}
+
+void basicLoggingHandleArg0RealTSC(int32_t FuncId,
+ XRayEntryType Type) XRAY_NEVER_INSTRUMENT {
+ InMemoryRawLog(FuncId, Type, readTSC);
+}
+
+void basicLoggingHandleArg0EmulateTSC(int32_t FuncId, XRayEntryType Type)
+ XRAY_NEVER_INSTRUMENT {
+ InMemoryRawLog(FuncId, Type, [](uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ timespec TS;
+ int result = clock_gettime(CLOCK_REALTIME, &TS);
+ if (result != 0) {
+ Report("clock_gettimg(2) return %d, errno=%d.", result, int(errno));
+ TS = {0, 0};
+ }
+ CPU = 0;
+ return TS.tv_sec * NanosecondsPerSecond + TS.tv_nsec;
+ });
+}
+
+void basicLoggingHandleArg1RealTSC(int32_t FuncId, XRayEntryType Type,
+ uint64_t Arg1) XRAY_NEVER_INSTRUMENT {
+ InMemoryRawLogWithArg(FuncId, Type, Arg1, readTSC);
+}
+
+void basicLoggingHandleArg1EmulateTSC(int32_t FuncId, XRayEntryType Type,
+ uint64_t Arg1) XRAY_NEVER_INSTRUMENT {
+ InMemoryRawLogWithArg(
+ FuncId, Type, Arg1, [](uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ timespec TS;
+ int result = clock_gettime(CLOCK_REALTIME, &TS);
+ if (result != 0) {
+ Report("clock_gettimg(2) return %d, errno=%d.", result, int(errno));
+ TS = {0, 0};
+ }
+ CPU = 0;
+ return TS.tv_sec * NanosecondsPerSecond + TS.tv_nsec;
+ });
+}
+
+static void TLDDestructor(void *P) XRAY_NEVER_INSTRUMENT {
+ ThreadLocalData &TLD = *reinterpret_cast<ThreadLocalData *>(P);
+ auto ExitGuard = at_scope_exit([&TLD] {
+ // Clean up dynamic resources.
+ if (TLD.InMemoryBuffer)
+ InternalFree(TLD.InMemoryBuffer);
+ if (TLD.ShadowStack)
+ InternalFree(TLD.ShadowStack);
+ if (Verbosity())
+ Report("Cleaned up log for TID: %d\n", GetTid());
+ });
+
+ if (TLD.LogWriter == nullptr || TLD.BufferOffset == 0) {
+ if (Verbosity())
+ Report("Skipping buffer for TID: %d; Offset = %llu\n", GetTid(),
+ TLD.BufferOffset);
+ return;
+ }
+
+ {
+ SpinMutexLock L(&LogMutex);
+ TLD.LogWriter->WriteAll(reinterpret_cast<char *>(TLD.InMemoryBuffer),
+ reinterpret_cast<char *>(TLD.InMemoryBuffer) +
+ (sizeof(XRayRecord) * TLD.BufferOffset));
+ }
+
+ // Because this thread's exit could be the last one trying to write to
+ // the file and that we're not able to close out the file properly, we
+ // sync instead and hope that the pending writes are flushed as the
+ // thread exits.
+ TLD.LogWriter->Flush();
+}
+
+XRayLogInitStatus basicLoggingInit(UNUSED size_t BufferSize,
+ UNUSED size_t BufferMax, void *Options,
+ size_t OptionsSize) XRAY_NEVER_INSTRUMENT {
+ uint8_t Expected = 0;
+ if (!atomic_compare_exchange_strong(&BasicInitialized, &Expected, 1,
+ memory_order_acq_rel)) {
+ if (Verbosity())
+ Report("Basic logging already initialized.\n");
+ return XRayLogInitStatus::XRAY_LOG_INITIALIZED;
+ }
+
+ static pthread_once_t OnceInit = PTHREAD_ONCE_INIT;
+ pthread_once(&OnceInit, +[] {
+ pthread_key_create(&PThreadKey, TLDDestructor);
+ atomic_store(&UseRealTSC, probeRequiredCPUFeatures(), memory_order_release);
+ // Initialize the global TicksPerSec value.
+ atomic_store(&TicksPerSec,
+ probeRequiredCPUFeatures() ? getTSCFrequency()
+ : NanosecondsPerSecond,
+ memory_order_release);
+ if (!atomic_load(&UseRealTSC, memory_order_relaxed) && Verbosity())
+ Report("WARNING: Required CPU features missing for XRay instrumentation, "
+ "using emulation instead.\n");
+ });
+
+ FlagParser P;
+ BasicFlags F;
+ F.setDefaults();
+ registerXRayBasicFlags(&P, &F);
+ P.ParseString(useCompilerDefinedBasicFlags());
+ auto *EnvOpts = GetEnv("XRAY_BASIC_OPTIONS");
+ if (EnvOpts == nullptr)
+ EnvOpts = "";
+
+ P.ParseString(EnvOpts);
+
+ // If XRAY_BASIC_OPTIONS was not defined, then we use the deprecated options
+ // set through XRAY_OPTIONS instead.
+ if (internal_strlen(EnvOpts) == 0) {
+ F.func_duration_threshold_us =
+ flags()->xray_naive_log_func_duration_threshold_us;
+ F.max_stack_depth = flags()->xray_naive_log_max_stack_depth;
+ F.thread_buffer_size = flags()->xray_naive_log_thread_buffer_size;
+ }
+
+ P.ParseString(static_cast<const char *>(Options));
+ GlobalOptions.ThreadBufferSize = F.thread_buffer_size;
+ GlobalOptions.DurationFilterMicros = F.func_duration_threshold_us;
+ GlobalOptions.MaxStackDepth = F.max_stack_depth;
+ *basicFlags() = F;
+
+ atomic_store(&ThresholdTicks,
+ atomic_load(&TicksPerSec, memory_order_acquire) *
+ GlobalOptions.DurationFilterMicros / 1000000,
+ memory_order_release);
+ __xray_set_handler_arg1(atomic_load(&UseRealTSC, memory_order_acquire)
+ ? basicLoggingHandleArg1RealTSC
+ : basicLoggingHandleArg1EmulateTSC);
+ __xray_set_handler(atomic_load(&UseRealTSC, memory_order_acquire)
+ ? basicLoggingHandleArg0RealTSC
+ : basicLoggingHandleArg0EmulateTSC);
+
+ // TODO: Implement custom event and typed event handling support in Basic
+ // Mode.
+ __xray_remove_customevent_handler();
+ __xray_remove_typedevent_handler();
+
+ return XRayLogInitStatus::XRAY_LOG_INITIALIZED;
+}
+
+XRayLogInitStatus basicLoggingFinalize() XRAY_NEVER_INSTRUMENT {
+ uint8_t Expected = 0;
+ if (!atomic_compare_exchange_strong(&BasicInitialized, &Expected, 0,
+ memory_order_acq_rel) &&
+ Verbosity())
+ Report("Basic logging already finalized.\n");
+
+ // Nothing really to do aside from marking state of the global to be
+ // uninitialized.
+
+ return XRayLogInitStatus::XRAY_LOG_FINALIZED;
+}
+
+XRayLogFlushStatus basicLoggingFlush() XRAY_NEVER_INSTRUMENT {
+ // This really does nothing, since flushing the logs happen at the end of a
+ // thread's lifetime, or when the buffers are full.
+ return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
+}
+
+// This is a handler that, effectively, does nothing.
+void basicLoggingHandleArg0Empty(int32_t, XRayEntryType) XRAY_NEVER_INSTRUMENT {
+}
+
+bool basicLogDynamicInitializer() XRAY_NEVER_INSTRUMENT {
+ XRayLogImpl Impl{
+ basicLoggingInit,
+ basicLoggingFinalize,
+ basicLoggingHandleArg0Empty,
+ basicLoggingFlush,
+ };
+ auto RegistrationResult = __xray_log_register_mode("xray-basic", Impl);
+ if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK &&
+ Verbosity())
+ Report("Cannot register XRay Basic Mode to 'xray-basic'; error = %d\n",
+ RegistrationResult);
+ if (flags()->xray_naive_log ||
+ !internal_strcmp(flags()->xray_mode, "xray-basic")) {
+ auto SelectResult = __xray_log_select_mode("xray-basic");
+ if (SelectResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK) {
+ if (Verbosity())
+ Report("Failed selecting XRay Basic Mode; error = %d\n", SelectResult);
+ return false;
+ }
+
+ // We initialize the implementation using the data we get from the
+ // XRAY_BASIC_OPTIONS environment variable, at this point of the
+ // implementation.
+ auto *Env = GetEnv("XRAY_BASIC_OPTIONS");
+ auto InitResult =
+ __xray_log_init_mode("xray-basic", Env == nullptr ? "" : Env);
+ if (InitResult != XRayLogInitStatus::XRAY_LOG_INITIALIZED) {
+ if (Verbosity())
+ Report("Failed initializing XRay Basic Mode; error = %d\n", InitResult);
+ return false;
+ }
+
+ // At this point we know that we've successfully initialized Basic mode
+ // tracing, and the only chance we're going to get for the current thread to
+ // clean-up may be at thread/program exit. To ensure that we're going to get
+ // the cleanup even without calling the finalization routines, we're
+ // registering a program exit function that will do the cleanup.
+ static pthread_once_t DynamicOnce = PTHREAD_ONCE_INIT;
+ pthread_once(&DynamicOnce, +[] {
+ static void *FakeTLD = nullptr;
+ FakeTLD = &getThreadLocalData();
+ Atexit(+[] { TLDDestructor(FakeTLD); });
+ });
+ }
+ return true;
+}
+
+} // namespace __xray
+
+static auto UNUSED Unused = __xray::basicLogDynamicInitializer();
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_logging.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_logging.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_logging.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_logging.h (revision 351984)
@@ -0,0 +1,42 @@
+//===-- xray_basic_logging.h ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_INMEMORY_LOG_H
+#define XRAY_XRAY_INMEMORY_LOG_H
+
+#include "xray/xray_log_interface.h"
+
+/// Basic (Naive) Mode
+/// ==================
+///
+/// This implementation hooks in through the XRay logging implementation
+/// framework. The Basic Mode implementation will keep appending to a file as
+/// soon as the thread-local buffers are full. It keeps minimal in-memory state
+/// and does the minimum filtering required to keep log files smaller.
+
+namespace __xray {
+
+XRayLogInitStatus basicLoggingInit(size_t BufferSize, size_t BufferMax,
+ void *Options, size_t OptionsSize);
+XRayLogInitStatus basicLoggingFinalize();
+
+void basicLoggingHandleArg0RealTSC(int32_t FuncId, XRayEntryType Entry);
+void basicLoggingHandleArg0EmulateTSC(int32_t FuncId, XRayEntryType Entry);
+void basicLoggingHandleArg1RealTSC(int32_t FuncId, XRayEntryType Entry,
+ uint64_t Arg1);
+void basicLoggingHandleArg1EmulateTSC(int32_t FuncId, XRayEntryType Entry,
+ uint64_t Arg1);
+XRayLogFlushStatus basicLoggingFlush();
+XRayLogInitStatus basicLoggingReset();
+
+} // namespace __xray
+
+#endif // XRAY_XRAY_INMEMORY_LOG_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_basic_logging.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_buffer_queue.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_buffer_queue.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_buffer_queue.cc (revision 351984)
@@ -0,0 +1,237 @@
+//===-- xray_buffer_queue.cc -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instruementation system.
+//
+// Defines the interface for a buffer queue implementation.
+//
+//===----------------------------------------------------------------------===//
+#include "xray_buffer_queue.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#if !SANITIZER_FUCHSIA
+#include "sanitizer_common/sanitizer_posix.h"
+#endif
+#include "xray_allocator.h"
+#include "xray_defs.h"
+#include <memory>
+#include <sys/mman.h>
+
+using namespace __xray;
+
+namespace {
+
+BufferQueue::ControlBlock *allocControlBlock(size_t Size, size_t Count) {
+ auto B =
+ allocateBuffer((sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
+ return B == nullptr ? nullptr
+ : reinterpret_cast<BufferQueue::ControlBlock *>(B);
+}
+
+void deallocControlBlock(BufferQueue::ControlBlock *C, size_t Size,
+ size_t Count) {
+ deallocateBuffer(reinterpret_cast<unsigned char *>(C),
+ (sizeof(BufferQueue::ControlBlock) - 1) + (Size * Count));
+}
+
+void decRefCount(BufferQueue::ControlBlock *C, size_t Size, size_t Count) {
+ if (C == nullptr)
+ return;
+ if (atomic_fetch_sub(&C->RefCount, 1, memory_order_acq_rel) == 1)
+ deallocControlBlock(C, Size, Count);
+}
+
+void incRefCount(BufferQueue::ControlBlock *C) {
+ if (C == nullptr)
+ return;
+ atomic_fetch_add(&C->RefCount, 1, memory_order_acq_rel);
+}
+
+// We use a struct to ensure that we are allocating one atomic_uint64_t per
+// cache line. This allows us to not worry about false-sharing among atomic
+// objects being updated (constantly) by different threads.
+struct ExtentsPadded {
+ union {
+ atomic_uint64_t Extents;
+ unsigned char Storage[kCacheLineSize];
+ };
+};
+
+constexpr size_t kExtentsSize = sizeof(ExtentsPadded);
+
+} // namespace
+
+BufferQueue::ErrorCode BufferQueue::init(size_t BS, size_t BC) {
+ SpinMutexLock Guard(&Mutex);
+
+ if (!finalizing())
+ return BufferQueue::ErrorCode::AlreadyInitialized;
+
+ cleanupBuffers();
+
+ bool Success = false;
+ BufferSize = BS;
+ BufferCount = BC;
+
+ BackingStore = allocControlBlock(BufferSize, BufferCount);
+ if (BackingStore == nullptr)
+ return BufferQueue::ErrorCode::NotEnoughMemory;
+
+ auto CleanupBackingStore = at_scope_exit([&, this] {
+ if (Success)
+ return;
+ deallocControlBlock(BackingStore, BufferSize, BufferCount);
+ BackingStore = nullptr;
+ });
+
+ // Initialize enough atomic_uint64_t instances, each
+ ExtentsBackingStore = allocControlBlock(kExtentsSize, BufferCount);
+ if (ExtentsBackingStore == nullptr)
+ return BufferQueue::ErrorCode::NotEnoughMemory;
+
+ auto CleanupExtentsBackingStore = at_scope_exit([&, this] {
+ if (Success)
+ return;
+ deallocControlBlock(ExtentsBackingStore, kExtentsSize, BufferCount);
+ ExtentsBackingStore = nullptr;
+ });
+
+ Buffers = initArray<BufferRep>(BufferCount);
+ if (Buffers == nullptr)
+ return BufferQueue::ErrorCode::NotEnoughMemory;
+
+ // At this point we increment the generation number to associate the buffers
+ // to the new generation.
+ atomic_fetch_add(&Generation, 1, memory_order_acq_rel);
+
+ // First, we initialize the refcount in the ControlBlock, which we treat as
+ // being at the start of the BackingStore pointer.
+ atomic_store(&BackingStore->RefCount, 1, memory_order_release);
+ atomic_store(&ExtentsBackingStore->RefCount, 1, memory_order_release);
+
+ // Then we initialise the individual buffers that sub-divide the whole backing
+ // store. Each buffer will start at the `Data` member of the ControlBlock, and
+ // will be offsets from these locations.
+ for (size_t i = 0; i < BufferCount; ++i) {
+ auto &T = Buffers[i];
+ auto &Buf = T.Buff;
+ auto *E = reinterpret_cast<ExtentsPadded *>(&ExtentsBackingStore->Data +
+ (kExtentsSize * i));
+ Buf.Extents = &E->Extents;
+ atomic_store(Buf.Extents, 0, memory_order_release);
+ Buf.Generation = generation();
+ Buf.Data = &BackingStore->Data + (BufferSize * i);
+ Buf.Size = BufferSize;
+ Buf.BackingStore = BackingStore;
+ Buf.ExtentsBackingStore = ExtentsBackingStore;
+ Buf.Count = BufferCount;
+ T.Used = false;
+ }
+
+ Next = Buffers;
+ First = Buffers;
+ LiveBuffers = 0;
+ atomic_store(&Finalizing, 0, memory_order_release);
+ Success = true;
+ return BufferQueue::ErrorCode::Ok;
+}
+
+BufferQueue::BufferQueue(size_t B, size_t N,
+ bool &Success) XRAY_NEVER_INSTRUMENT
+ : BufferSize(B),
+ BufferCount(N),
+ Mutex(),
+ Finalizing{1},
+ BackingStore(nullptr),
+ ExtentsBackingStore(nullptr),
+ Buffers(nullptr),
+ Next(Buffers),
+ First(Buffers),
+ LiveBuffers(0),
+ Generation{0} {
+ Success = init(B, N) == BufferQueue::ErrorCode::Ok;
+}
+
+BufferQueue::ErrorCode BufferQueue::getBuffer(Buffer &Buf) {
+ if (atomic_load(&Finalizing, memory_order_acquire))
+ return ErrorCode::QueueFinalizing;
+
+ BufferRep *B = nullptr;
+ {
+ SpinMutexLock Guard(&Mutex);
+ if (LiveBuffers == BufferCount)
+ return ErrorCode::NotEnoughMemory;
+ B = Next++;
+ if (Next == (Buffers + BufferCount))
+ Next = Buffers;
+ ++LiveBuffers;
+ }
+
+ incRefCount(BackingStore);
+ incRefCount(ExtentsBackingStore);
+ Buf = B->Buff;
+ Buf.Generation = generation();
+ B->Used = true;
+ return ErrorCode::Ok;
+}
+
+BufferQueue::ErrorCode BufferQueue::releaseBuffer(Buffer &Buf) {
+ // Check whether the buffer being referred to is within the bounds of the
+ // backing store's range.
+ BufferRep *B = nullptr;
+ {
+ SpinMutexLock Guard(&Mutex);
+ if (Buf.Generation != generation() || LiveBuffers == 0) {
+ Buf = {};
+ decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
+ decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
+ return BufferQueue::ErrorCode::Ok;
+ }
+
+ if (Buf.Data < &BackingStore->Data ||
+ Buf.Data > &BackingStore->Data + (BufferCount * BufferSize))
+ return BufferQueue::ErrorCode::UnrecognizedBuffer;
+
+ --LiveBuffers;
+ B = First++;
+ if (First == (Buffers + BufferCount))
+ First = Buffers;
+ }
+
+ // Now that the buffer has been released, we mark it as "used".
+ B->Buff = Buf;
+ B->Used = true;
+ decRefCount(Buf.BackingStore, Buf.Size, Buf.Count);
+ decRefCount(Buf.ExtentsBackingStore, kExtentsSize, Buf.Count);
+ atomic_store(B->Buff.Extents, atomic_load(Buf.Extents, memory_order_acquire),
+ memory_order_release);
+ Buf = {};
+ return ErrorCode::Ok;
+}
+
+BufferQueue::ErrorCode BufferQueue::finalize() {
+ if (atomic_exchange(&Finalizing, 1, memory_order_acq_rel))
+ return ErrorCode::QueueFinalizing;
+ return ErrorCode::Ok;
+}
+
+void BufferQueue::cleanupBuffers() {
+ for (auto B = Buffers, E = Buffers + BufferCount; B != E; ++B)
+ B->~BufferRep();
+ deallocateBuffer(Buffers, BufferCount);
+ decRefCount(BackingStore, BufferSize, BufferCount);
+ decRefCount(ExtentsBackingStore, kExtentsSize, BufferCount);
+ BackingStore = nullptr;
+ ExtentsBackingStore = nullptr;
+ Buffers = nullptr;
+ BufferCount = 0;
+ BufferSize = 0;
+}
+
+BufferQueue::~BufferQueue() { cleanupBuffers(); }
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_buffer_queue.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_buffer_queue.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_buffer_queue.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_buffer_queue.h (revision 351984)
@@ -0,0 +1,280 @@
+//===-- xray_buffer_queue.h ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Defines the interface for a buffer queue implementation.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_BUFFER_QUEUE_H
+#define XRAY_BUFFER_QUEUE_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "xray_defs.h"
+#include <cstddef>
+#include <cstdint>
+
+namespace __xray {
+
+/// BufferQueue implements a circular queue of fixed sized buffers (much like a
+/// freelist) but is concerned with making it quick to initialise, finalise, and
+/// get from or return buffers to the queue. This is one key component of the
+/// "flight data recorder" (FDR) mode to support ongoing XRay function call
+/// trace collection.
+class BufferQueue {
+public:
+ /// ControlBlock represents the memory layout of how we interpret the backing
+ /// store for all buffers and extents managed by a BufferQueue instance. The
+ /// ControlBlock has the reference count as the first member, sized according
+ /// to platform-specific cache-line size. We never use the Buffer member of
+ /// the union, which is only there for compiler-supported alignment and
+ /// sizing.
+ ///
+ /// This ensures that the `Data` member will be placed at least kCacheLineSize
+ /// bytes from the beginning of the structure.
+ struct ControlBlock {
+ union {
+ atomic_uint64_t RefCount;
+ char Buffer[kCacheLineSize];
+ };
+
+ /// We need to make this size 1, to conform to the C++ rules for array data
+ /// members. Typically, we want to subtract this 1 byte for sizing
+ /// information.
+ char Data[1];
+ };
+
+ struct Buffer {
+ atomic_uint64_t *Extents = nullptr;
+ uint64_t Generation{0};
+ void *Data = nullptr;
+ size_t Size = 0;
+
+ private:
+ friend class BufferQueue;
+ ControlBlock *BackingStore = nullptr;
+ ControlBlock *ExtentsBackingStore = nullptr;
+ size_t Count = 0;
+ };
+
+ struct BufferRep {
+ // The managed buffer.
+ Buffer Buff;
+
+ // This is true if the buffer has been returned to the available queue, and
+ // is considered "used" by another thread.
+ bool Used = false;
+ };
+
+private:
+ // This models a ForwardIterator. |T| Must be either a `Buffer` or `const
+ // Buffer`. Note that we only advance to the "used" buffers, when
+ // incrementing, so that at dereference we're always at a valid point.
+ template <class T> class Iterator {
+ public:
+ BufferRep *Buffers = nullptr;
+ size_t Offset = 0;
+ size_t Max = 0;
+
+ Iterator &operator++() {
+ DCHECK_NE(Offset, Max);
+ do {
+ ++Offset;
+ } while (!Buffers[Offset].Used && Offset != Max);
+ return *this;
+ }
+
+ Iterator operator++(int) {
+ Iterator C = *this;
+ ++(*this);
+ return C;
+ }
+
+ T &operator*() const { return Buffers[Offset].Buff; }
+
+ T *operator->() const { return &(Buffers[Offset].Buff); }
+
+ Iterator(BufferRep *Root, size_t O, size_t M) XRAY_NEVER_INSTRUMENT
+ : Buffers(Root),
+ Offset(O),
+ Max(M) {
+ // We want to advance to the first Offset where the 'Used' property is
+ // true, or to the end of the list/queue.
+ while (!Buffers[Offset].Used && Offset != Max) {
+ ++Offset;
+ }
+ }
+
+ Iterator() = default;
+ Iterator(const Iterator &) = default;
+ Iterator(Iterator &&) = default;
+ Iterator &operator=(const Iterator &) = default;
+ Iterator &operator=(Iterator &&) = default;
+ ~Iterator() = default;
+
+ template <class V>
+ friend bool operator==(const Iterator &L, const Iterator<V> &R) {
+ DCHECK_EQ(L.Max, R.Max);
+ return L.Buffers == R.Buffers && L.Offset == R.Offset;
+ }
+
+ template <class V>
+ friend bool operator!=(const Iterator &L, const Iterator<V> &R) {
+ return !(L == R);
+ }
+ };
+
+ // Size of each individual Buffer.
+ size_t BufferSize;
+
+ // Amount of pre-allocated buffers.
+ size_t BufferCount;
+
+ SpinMutex Mutex;
+ atomic_uint8_t Finalizing;
+
+ // The collocated ControlBlock and buffer storage.
+ ControlBlock *BackingStore;
+
+ // The collocated ControlBlock and extents storage.
+ ControlBlock *ExtentsBackingStore;
+
+ // A dynamically allocated array of BufferRep instances.
+ BufferRep *Buffers;
+
+ // Pointer to the next buffer to be handed out.
+ BufferRep *Next;
+
+ // Pointer to the entry in the array where the next released buffer will be
+ // placed.
+ BufferRep *First;
+
+ // Count of buffers that have been handed out through 'getBuffer'.
+ size_t LiveBuffers;
+
+ // We use a generation number to identify buffers and which generation they're
+ // associated with.
+ atomic_uint64_t Generation;
+
+ /// Releases references to the buffers backed by the current buffer queue.
+ void cleanupBuffers();
+
+public:
+ enum class ErrorCode : unsigned {
+ Ok,
+ NotEnoughMemory,
+ QueueFinalizing,
+ UnrecognizedBuffer,
+ AlreadyFinalized,
+ AlreadyInitialized,
+ };
+
+ static const char *getErrorString(ErrorCode E) {
+ switch (E) {
+ case ErrorCode::Ok:
+ return "(none)";
+ case ErrorCode::NotEnoughMemory:
+ return "no available buffers in the queue";
+ case ErrorCode::QueueFinalizing:
+ return "queue already finalizing";
+ case ErrorCode::UnrecognizedBuffer:
+ return "buffer being returned not owned by buffer queue";
+ case ErrorCode::AlreadyFinalized:
+ return "queue already finalized";
+ case ErrorCode::AlreadyInitialized:
+ return "queue already initialized";
+ }
+ return "unknown error";
+ }
+
+ /// Initialise a queue of size |N| with buffers of size |B|. We report success
+ /// through |Success|.
+ BufferQueue(size_t B, size_t N, bool &Success);
+
+ /// Updates |Buf| to contain the pointer to an appropriate buffer. Returns an
+ /// error in case there are no available buffers to return when we will run
+ /// over the upper bound for the total buffers.
+ ///
+ /// Requirements:
+ /// - BufferQueue is not finalising.
+ ///
+ /// Returns:
+ /// - ErrorCode::NotEnoughMemory on exceeding MaxSize.
+ /// - ErrorCode::Ok when we find a Buffer.
+ /// - ErrorCode::QueueFinalizing or ErrorCode::AlreadyFinalized on
+ /// a finalizing/finalized BufferQueue.
+ ErrorCode getBuffer(Buffer &Buf);
+
+ /// Updates |Buf| to point to nullptr, with size 0.
+ ///
+ /// Returns:
+ /// - ErrorCode::Ok when we successfully release the buffer.
+ /// - ErrorCode::UnrecognizedBuffer for when this BufferQueue does not own
+ /// the buffer being released.
+ ErrorCode releaseBuffer(Buffer &Buf);
+
+ /// Initializes the buffer queue, starting a new generation. We can re-set the
+ /// size of buffers with |BS| along with the buffer count with |BC|.
+ ///
+ /// Returns:
+ /// - ErrorCode::Ok when we successfully initialize the buffer. This
+ /// requires that the buffer queue is previously finalized.
+ /// - ErrorCode::AlreadyInitialized when the buffer queue is not finalized.
+ ErrorCode init(size_t BS, size_t BC);
+
+ bool finalizing() const {
+ return atomic_load(&Finalizing, memory_order_acquire);
+ }
+
+ uint64_t generation() const {
+ return atomic_load(&Generation, memory_order_acquire);
+ }
+
+ /// Returns the configured size of the buffers in the buffer queue.
+ size_t ConfiguredBufferSize() const { return BufferSize; }
+
+ /// Sets the state of the BufferQueue to finalizing, which ensures that:
+ ///
+ /// - All subsequent attempts to retrieve a Buffer will fail.
+ /// - All releaseBuffer operations will not fail.
+ ///
+ /// After a call to finalize succeeds, all subsequent calls to finalize will
+ /// fail with ErrorCode::QueueFinalizing.
+ ErrorCode finalize();
+
+ /// Applies the provided function F to each Buffer in the queue, only if the
+ /// Buffer is marked 'used' (i.e. has been the result of getBuffer(...) and a
+ /// releaseBuffer(...) operation).
+ template <class F> void apply(F Fn) XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock G(&Mutex);
+ for (auto I = begin(), E = end(); I != E; ++I)
+ Fn(*I);
+ }
+
+ using const_iterator = Iterator<const Buffer>;
+ using iterator = Iterator<Buffer>;
+
+ /// Provides iterator access to the raw Buffer instances.
+ iterator begin() const { return iterator(Buffers, 0, BufferCount); }
+ const_iterator cbegin() const {
+ return const_iterator(Buffers, 0, BufferCount);
+ }
+ iterator end() const { return iterator(Buffers, BufferCount, BufferCount); }
+ const_iterator cend() const {
+ return const_iterator(Buffers, BufferCount, BufferCount);
+ }
+
+ // Cleans up allocated buffers.
+ ~BufferQueue();
+};
+
+} // namespace __xray
+
+#endif // XRAY_BUFFER_QUEUE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_buffer_queue.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_defs.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_defs.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_defs.h (revision 351984)
@@ -0,0 +1,31 @@
+//===-- xray_defs.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common definitions useful for XRay sources.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_DEFS_H
+#define XRAY_XRAY_DEFS_H
+
+#if XRAY_SUPPORTED
+#define XRAY_NEVER_INSTRUMENT __attribute__((xray_never_instrument))
+#else
+#define XRAY_NEVER_INSTRUMENT
+#endif
+
+#if SANITIZER_NETBSD
+// NetBSD: thread_local is not aligned properly, and the code relying
+// on it segfaults
+#define XRAY_TLS_ALIGNAS(x)
+#define XRAY_HAS_TLS_ALIGNAS 0
+#else
+#define XRAY_TLS_ALIGNAS(x) alignas(x)
+#define XRAY_HAS_TLS_ALIGNAS 1
+#endif
+
+#endif // XRAY_XRAY_DEFS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_defs.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_controller.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_controller.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_controller.h (revision 351984)
@@ -0,0 +1,372 @@
+//===-- xray_fdr_controller.h ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef COMPILER_RT_LIB_XRAY_XRAY_FDR_CONTROLLER_H_
+#define COMPILER_RT_LIB_XRAY_XRAY_FDR_CONTROLLER_H_
+
+#include <limits>
+#include <time.h>
+
+#include "xray/xray_interface.h"
+#include "xray/xray_records.h"
+#include "xray_buffer_queue.h"
+#include "xray_fdr_log_writer.h"
+
+namespace __xray {
+
+template <size_t Version = 5> class FDRController {
+ BufferQueue *BQ;
+ BufferQueue::Buffer &B;
+ FDRLogWriter &W;
+ int (*WallClockReader)(clockid_t, struct timespec *) = 0;
+ uint64_t CycleThreshold = 0;
+
+ uint64_t LastFunctionEntryTSC = 0;
+ uint64_t LatestTSC = 0;
+ uint16_t LatestCPU = 0;
+ tid_t TId = 0;
+ pid_t PId = 0;
+ bool First = true;
+
+ uint32_t UndoableFunctionEnters = 0;
+ uint32_t UndoableTailExits = 0;
+
+ bool finalized() const XRAY_NEVER_INSTRUMENT {
+ return BQ == nullptr || BQ->finalizing();
+ }
+
+ bool hasSpace(size_t S) XRAY_NEVER_INSTRUMENT {
+ return B.Data != nullptr && B.Generation == BQ->generation() &&
+ W.getNextRecord() + S <= reinterpret_cast<char *>(B.Data) + B.Size;
+ }
+
+ constexpr int32_t mask(int32_t FuncId) const XRAY_NEVER_INSTRUMENT {
+ return FuncId & ((1 << 29) - 1);
+ }
+
+ bool getNewBuffer() XRAY_NEVER_INSTRUMENT {
+ if (BQ->getBuffer(B) != BufferQueue::ErrorCode::Ok)
+ return false;
+
+ W.resetRecord();
+ DCHECK_EQ(W.getNextRecord(), B.Data);
+ LatestTSC = 0;
+ LatestCPU = 0;
+ First = true;
+ UndoableFunctionEnters = 0;
+ UndoableTailExits = 0;
+ atomic_store(B.Extents, 0, memory_order_release);
+ return true;
+ }
+
+ bool setupNewBuffer() XRAY_NEVER_INSTRUMENT {
+ if (finalized())
+ return false;
+
+ DCHECK(hasSpace(sizeof(MetadataRecord) * 3));
+ TId = GetTid();
+ PId = internal_getpid();
+ struct timespec TS {
+ 0, 0
+ };
+ WallClockReader(CLOCK_MONOTONIC, &TS);
+
+ MetadataRecord Metadata[] = {
+ // Write out a MetadataRecord to signify that this is the start of a new
+ // buffer, associated with a particular thread, with a new CPU. For the
+ // data, we have 15 bytes to squeeze as much information as we can. At
+ // this point we only write down the following bytes:
+ // - Thread ID (tid_t, cast to 4 bytes type due to Darwin being 8
+ // bytes)
+ createMetadataRecord<MetadataRecord::RecordKinds::NewBuffer>(
+ static_cast<int32_t>(TId)),
+
+ // Also write the WalltimeMarker record. We only really need microsecond
+ // precision here, and enforce across platforms that we need 64-bit
+ // seconds and 32-bit microseconds encoded in the Metadata record.
+ createMetadataRecord<MetadataRecord::RecordKinds::WalltimeMarker>(
+ static_cast<int64_t>(TS.tv_sec),
+ static_cast<int32_t>(TS.tv_nsec / 1000)),
+
+ // Also write the Pid record.
+ createMetadataRecord<MetadataRecord::RecordKinds::Pid>(
+ static_cast<int32_t>(PId)),
+ };
+
+ if (finalized())
+ return false;
+ return W.writeMetadataRecords(Metadata);
+ }
+
+ bool prepareBuffer(size_t S) XRAY_NEVER_INSTRUMENT {
+ if (finalized())
+ return returnBuffer();
+
+ if (UNLIKELY(!hasSpace(S))) {
+ if (!returnBuffer())
+ return false;
+ if (!getNewBuffer())
+ return false;
+ if (!setupNewBuffer())
+ return false;
+ }
+
+ if (First) {
+ First = false;
+ W.resetRecord();
+ atomic_store(B.Extents, 0, memory_order_release);
+ return setupNewBuffer();
+ }
+
+ return true;
+ }
+
+ bool returnBuffer() XRAY_NEVER_INSTRUMENT {
+ if (BQ == nullptr)
+ return false;
+
+ First = true;
+ if (finalized()) {
+ BQ->releaseBuffer(B); // ignore result.
+ return false;
+ }
+
+ return BQ->releaseBuffer(B) == BufferQueue::ErrorCode::Ok;
+ }
+
+ enum class PreambleResult { NoChange, WroteMetadata, InvalidBuffer };
+ PreambleResult recordPreamble(uint64_t TSC,
+ uint16_t CPU) XRAY_NEVER_INSTRUMENT {
+ if (UNLIKELY(LatestCPU != CPU || LatestTSC == 0)) {
+ // We update our internal tracking state for the Latest TSC and CPU we've
+ // seen, then write out the appropriate metadata and function records.
+ LatestTSC = TSC;
+ LatestCPU = CPU;
+
+ if (B.Generation != BQ->generation())
+ return PreambleResult::InvalidBuffer;
+
+ W.writeMetadata<MetadataRecord::RecordKinds::NewCPUId>(CPU, TSC);
+ return PreambleResult::WroteMetadata;
+ }
+
+ DCHECK_EQ(LatestCPU, CPU);
+
+ if (UNLIKELY(LatestTSC > TSC ||
+ TSC - LatestTSC >
+ uint64_t{std::numeric_limits<int32_t>::max()})) {
+ // Either the TSC has wrapped around from the last TSC we've seen or the
+ // delta is too large to fit in a 32-bit signed integer, so we write a
+ // wrap-around record.
+ LatestTSC = TSC;
+
+ if (B.Generation != BQ->generation())
+ return PreambleResult::InvalidBuffer;
+
+ W.writeMetadata<MetadataRecord::RecordKinds::TSCWrap>(TSC);
+ return PreambleResult::WroteMetadata;
+ }
+
+ return PreambleResult::NoChange;
+ }
+
+ bool rewindRecords(int32_t FuncId, uint64_t TSC,
+ uint16_t CPU) XRAY_NEVER_INSTRUMENT {
+ // Undo one enter record, because at this point we are either at the state
+ // of:
+ // - We are exiting a function that we recently entered.
+ // - We are exiting a function that was the result of a sequence of tail
+ // exits, and we can check whether the tail exits can be re-wound.
+ //
+ FunctionRecord F;
+ W.undoWrites(sizeof(FunctionRecord));
+ if (B.Generation != BQ->generation())
+ return false;
+ internal_memcpy(&F, W.getNextRecord(), sizeof(FunctionRecord));
+
+ DCHECK(F.RecordKind ==
+ uint8_t(FunctionRecord::RecordKinds::FunctionEnter) &&
+ "Expected to find function entry recording when rewinding.");
+ DCHECK_EQ(F.FuncId, FuncId & ~(0x0F << 28));
+
+ LatestTSC -= F.TSCDelta;
+ if (--UndoableFunctionEnters != 0) {
+ LastFunctionEntryTSC -= F.TSCDelta;
+ return true;
+ }
+
+ LastFunctionEntryTSC = 0;
+ auto RewindingTSC = LatestTSC;
+ auto RewindingRecordPtr = W.getNextRecord() - sizeof(FunctionRecord);
+ while (UndoableTailExits) {
+ if (B.Generation != BQ->generation())
+ return false;
+ internal_memcpy(&F, RewindingRecordPtr, sizeof(FunctionRecord));
+ DCHECK_EQ(F.RecordKind,
+ uint8_t(FunctionRecord::RecordKinds::FunctionTailExit));
+ RewindingTSC -= F.TSCDelta;
+ RewindingRecordPtr -= sizeof(FunctionRecord);
+ if (B.Generation != BQ->generation())
+ return false;
+ internal_memcpy(&F, RewindingRecordPtr, sizeof(FunctionRecord));
+
+ // This tail call exceeded the threshold duration. It will not be erased.
+ if ((TSC - RewindingTSC) >= CycleThreshold) {
+ UndoableTailExits = 0;
+ return true;
+ }
+
+ --UndoableTailExits;
+ W.undoWrites(sizeof(FunctionRecord) * 2);
+ LatestTSC = RewindingTSC;
+ }
+ return true;
+ }
+
+public:
+ template <class WallClockFunc>
+ FDRController(BufferQueue *BQ, BufferQueue::Buffer &B, FDRLogWriter &W,
+ WallClockFunc R, uint64_t C) XRAY_NEVER_INSTRUMENT
+ : BQ(BQ),
+ B(B),
+ W(W),
+ WallClockReader(R),
+ CycleThreshold(C) {}
+
+ bool functionEnter(int32_t FuncId, uint64_t TSC,
+ uint16_t CPU) XRAY_NEVER_INSTRUMENT {
+ if (finalized() ||
+ !prepareBuffer(sizeof(MetadataRecord) + sizeof(FunctionRecord)))
+ return returnBuffer();
+
+ auto PreambleStatus = recordPreamble(TSC, CPU);
+ if (PreambleStatus == PreambleResult::InvalidBuffer)
+ return returnBuffer();
+
+ if (PreambleStatus == PreambleResult::WroteMetadata) {
+ UndoableFunctionEnters = 1;
+ UndoableTailExits = 0;
+ } else {
+ ++UndoableFunctionEnters;
+ }
+
+ auto Delta = TSC - LatestTSC;
+ LastFunctionEntryTSC = TSC;
+ LatestTSC = TSC;
+ return W.writeFunction(FDRLogWriter::FunctionRecordKind::Enter,
+ mask(FuncId), Delta);
+ }
+
+ bool functionTailExit(int32_t FuncId, uint64_t TSC,
+ uint16_t CPU) XRAY_NEVER_INSTRUMENT {
+ if (finalized())
+ return returnBuffer();
+
+ if (!prepareBuffer(sizeof(MetadataRecord) + sizeof(FunctionRecord)))
+ return returnBuffer();
+
+ auto PreambleStatus = recordPreamble(TSC, CPU);
+ if (PreambleStatus == PreambleResult::InvalidBuffer)
+ return returnBuffer();
+
+ if (PreambleStatus == PreambleResult::NoChange &&
+ UndoableFunctionEnters != 0 &&
+ TSC - LastFunctionEntryTSC < CycleThreshold)
+ return rewindRecords(FuncId, TSC, CPU);
+
+ UndoableTailExits = UndoableFunctionEnters ? UndoableTailExits + 1 : 0;
+ UndoableFunctionEnters = 0;
+ auto Delta = TSC - LatestTSC;
+ LatestTSC = TSC;
+ return W.writeFunction(FDRLogWriter::FunctionRecordKind::TailExit,
+ mask(FuncId), Delta);
+ }
+
+ bool functionEnterArg(int32_t FuncId, uint64_t TSC, uint16_t CPU,
+ uint64_t Arg) XRAY_NEVER_INSTRUMENT {
+ if (finalized() ||
+ !prepareBuffer((2 * sizeof(MetadataRecord)) + sizeof(FunctionRecord)) ||
+ recordPreamble(TSC, CPU) == PreambleResult::InvalidBuffer)
+ return returnBuffer();
+
+ auto Delta = TSC - LatestTSC;
+ LatestTSC = TSC;
+ LastFunctionEntryTSC = 0;
+ UndoableFunctionEnters = 0;
+ UndoableTailExits = 0;
+
+ return W.writeFunctionWithArg(FDRLogWriter::FunctionRecordKind::EnterArg,
+ mask(FuncId), Delta, Arg);
+ }
+
+ bool functionExit(int32_t FuncId, uint64_t TSC,
+ uint16_t CPU) XRAY_NEVER_INSTRUMENT {
+ if (finalized() ||
+ !prepareBuffer(sizeof(MetadataRecord) + sizeof(FunctionRecord)))
+ return returnBuffer();
+
+ auto PreambleStatus = recordPreamble(TSC, CPU);
+ if (PreambleStatus == PreambleResult::InvalidBuffer)
+ return returnBuffer();
+
+ if (PreambleStatus == PreambleResult::NoChange &&
+ UndoableFunctionEnters != 0 &&
+ TSC - LastFunctionEntryTSC < CycleThreshold)
+ return rewindRecords(FuncId, TSC, CPU);
+
+ auto Delta = TSC - LatestTSC;
+ LatestTSC = TSC;
+ UndoableFunctionEnters = 0;
+ UndoableTailExits = 0;
+ return W.writeFunction(FDRLogWriter::FunctionRecordKind::Exit, mask(FuncId),
+ Delta);
+ }
+
+ bool customEvent(uint64_t TSC, uint16_t CPU, const void *Event,
+ int32_t EventSize) XRAY_NEVER_INSTRUMENT {
+ if (finalized() ||
+ !prepareBuffer((2 * sizeof(MetadataRecord)) + EventSize) ||
+ recordPreamble(TSC, CPU) == PreambleResult::InvalidBuffer)
+ return returnBuffer();
+
+ auto Delta = TSC - LatestTSC;
+ LatestTSC = TSC;
+ UndoableFunctionEnters = 0;
+ UndoableTailExits = 0;
+ return W.writeCustomEvent(Delta, Event, EventSize);
+ }
+
+ bool typedEvent(uint64_t TSC, uint16_t CPU, uint16_t EventType,
+ const void *Event, int32_t EventSize) XRAY_NEVER_INSTRUMENT {
+ if (finalized() ||
+ !prepareBuffer((2 * sizeof(MetadataRecord)) + EventSize) ||
+ recordPreamble(TSC, CPU) == PreambleResult::InvalidBuffer)
+ return returnBuffer();
+
+ auto Delta = TSC - LatestTSC;
+ LatestTSC = TSC;
+ UndoableFunctionEnters = 0;
+ UndoableTailExits = 0;
+ return W.writeTypedEvent(Delta, EventType, Event, EventSize);
+ }
+
+ bool flush() XRAY_NEVER_INSTRUMENT {
+ if (finalized()) {
+ returnBuffer(); // ignore result.
+ return true;
+ }
+ return returnBuffer();
+ }
+};
+
+} // namespace __xray
+
+#endif // COMPILER-RT_LIB_XRAY_XRAY_FDR_CONTROLLER_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.cc (revision 351984)
@@ -0,0 +1,47 @@
+//===-- xray_fdr_flags.cc ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// XRay FDR flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "xray_fdr_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "xray_defs.h"
+
+using namespace __sanitizer;
+
+namespace __xray {
+
+FDRFlags xray_fdr_flags_dont_use_directly; // use via fdrFlags().
+
+void FDRFlags::setDefaults() XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "xray_fdr_flags.inc"
+#undef XRAY_FLAG
+}
+
+void registerXRayFDRFlags(FlagParser *P, FDRFlags *F) XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(P, #Name, Description, &F->Name);
+#include "xray_fdr_flags.inc"
+#undef XRAY_FLAG
+}
+
+const char *useCompilerDefinedFDRFlags() XRAY_NEVER_INSTRUMENT {
+#ifdef XRAY_FDR_OPTIONS
+ return SANITIZER_STRINGIFY(XRAY_FDR_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.h (revision 351984)
@@ -0,0 +1,37 @@
+//===-- xray_fdr_flags.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This file defines the flags for the flight-data-recorder mode implementation.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_FDR_FLAGS_H
+#define XRAY_FDR_FLAGS_H
+
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __xray {
+
+struct FDRFlags {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "xray_fdr_flags.inc"
+#undef XRAY_FLAG
+
+ void setDefaults();
+};
+
+extern FDRFlags xray_fdr_flags_dont_use_directly;
+extern void registerXRayFDRFlags(FlagParser *P, FDRFlags *F);
+const char *useCompilerDefinedFDRFlags();
+inline FDRFlags *fdrFlags() { return &xray_fdr_flags_dont_use_directly; }
+
+} // namespace __xray
+
+#endif // XRAY_FDR_FLAGS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.inc (revision 351984)
@@ -0,0 +1,28 @@
+//===-- xray_fdr_flags.inc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// XRay FDR Mode runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_FLAG
+#error "Define XRAY_FLAG prior to including this file!"
+#endif
+
+// FDR (Flight Data Recorder) Mode logging options.
+XRAY_FLAG(int, func_duration_threshold_us, 5,
+ "FDR logging will try to skip functions that execute for fewer "
+ "microseconds than this threshold.")
+XRAY_FLAG(int, grace_period_ms, 100,
+ "FDR logging will wait this much time in milliseconds before "
+ "actually flushing the log; this gives a chance for threads to "
+ "notice that the log has been finalized and clean up.")
+XRAY_FLAG(int, buffer_size, 16384,
+ "Size of buffers in the circular buffer queue.")
+XRAY_FLAG(int, buffer_max, 100, "Maximum number of buffers in the queue.")
+XRAY_FLAG(bool, no_file_flush, false,
+ "Set to true to not write log files by default.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_log_records.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_log_records.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_log_records.h (revision 351984)
@@ -0,0 +1,75 @@
+//===-- xray_fdr_log_records.h -------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_FDR_LOG_RECORDS_H
+#define XRAY_XRAY_FDR_LOG_RECORDS_H
+#include <cstdint>
+
+namespace __xray {
+
+enum class RecordType : uint8_t { Function, Metadata };
+
+// A MetadataRecord encodes the kind of record in its first byte, and have 15
+// additional bytes in the end to hold free-form data.
+struct alignas(16) MetadataRecord {
+ // A MetadataRecord must always have a type of 1.
+ /* RecordType */ uint8_t Type : 1;
+
+ // Each kind of record is represented as a 7-bit value (even though we use an
+ // unsigned 8-bit enum class to do so).
+ enum class RecordKinds : uint8_t {
+ NewBuffer,
+ EndOfBuffer,
+ NewCPUId,
+ TSCWrap,
+ WalltimeMarker,
+ CustomEventMarker,
+ CallArgument,
+ BufferExtents,
+ TypedEventMarker,
+ Pid,
+ };
+
+ // Use 7 bits to identify this record type.
+ /* RecordKinds */ uint8_t RecordKind : 7;
+ char Data[15];
+} __attribute__((packed));
+
+static_assert(sizeof(MetadataRecord) == 16, "Wrong size for MetadataRecord.");
+
+struct alignas(8) FunctionRecord {
+ // A FunctionRecord must always have a type of 0.
+ /* RecordType */ uint8_t Type : 1;
+ enum class RecordKinds {
+ FunctionEnter = 0x00,
+ FunctionExit = 0x01,
+ FunctionTailExit = 0x02,
+ };
+ /* RecordKinds */ uint8_t RecordKind : 3;
+
+ // We only use 28 bits of the function ID, so that we can use as few bytes as
+ // possible. This means we only support 2^28 (268,435,456) unique function ids
+ // in a single binary.
+ int FuncId : 28;
+
+ // We use another 4 bytes to hold the delta between the previous entry's TSC.
+ // In case we've found that the distance is greater than the allowable 32 bits
+ // (either because we are running in a different CPU and the TSC might be
+ // different then), we should use a MetadataRecord before this FunctionRecord
+ // that will contain the full TSC for that CPU, and keep this to 0.
+ uint32_t TSCDelta;
+} __attribute__((packed));
+
+static_assert(sizeof(FunctionRecord) == 8, "Wrong size for FunctionRecord.");
+
+} // namespace __xray
+
+#endif // XRAY_XRAY_FDR_LOG_RECORDS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_log_records.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_log_writer.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_log_writer.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_log_writer.h (revision 351984)
@@ -0,0 +1,231 @@
+//===-- xray_fdr_log_writer.h ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
+#define COMPILER_RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
+
+#include "xray_buffer_queue.h"
+#include "xray_fdr_log_records.h"
+#include <functional>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+namespace __xray {
+
+template <size_t Index> struct SerializerImpl {
+ template <class Tuple,
+ typename std::enable_if<
+ Index<std::tuple_size<
+ typename std::remove_reference<Tuple>::type>::value,
+ int>::type = 0> static void serializeTo(char *Buffer,
+ Tuple &&T) {
+ auto P = reinterpret_cast<const char *>(&std::get<Index>(T));
+ constexpr auto Size = sizeof(std::get<Index>(T));
+ internal_memcpy(Buffer, P, Size);
+ SerializerImpl<Index + 1>::serializeTo(Buffer + Size,
+ std::forward<Tuple>(T));
+ }
+
+ template <class Tuple,
+ typename std::enable_if<
+ Index >= std::tuple_size<typename std::remove_reference<
+ Tuple>::type>::value,
+ int>::type = 0>
+ static void serializeTo(char *, Tuple &&) {}
+};
+
+using Serializer = SerializerImpl<0>;
+
+template <class Tuple, size_t Index> struct AggregateSizesImpl {
+ static constexpr size_t value =
+ sizeof(typename std::tuple_element<Index, Tuple>::type) +
+ AggregateSizesImpl<Tuple, Index - 1>::value;
+};
+
+template <class Tuple> struct AggregateSizesImpl<Tuple, 0> {
+ static constexpr size_t value =
+ sizeof(typename std::tuple_element<0, Tuple>::type);
+};
+
+template <class Tuple> struct AggregateSizes {
+ static constexpr size_t value =
+ AggregateSizesImpl<Tuple, std::tuple_size<Tuple>::value - 1>::value;
+};
+
+template <MetadataRecord::RecordKinds Kind, class... DataTypes>
+MetadataRecord createMetadataRecord(DataTypes &&... Ds) {
+ static_assert(AggregateSizes<std::tuple<DataTypes...>>::value <=
+ sizeof(MetadataRecord) - 1,
+ "Metadata payload longer than metadata buffer!");
+ MetadataRecord R;
+ R.Type = 1;
+ R.RecordKind = static_cast<uint8_t>(Kind);
+ Serializer::serializeTo(R.Data,
+ std::make_tuple(std::forward<DataTypes>(Ds)...));
+ return R;
+}
+
+class FDRLogWriter {
+ BufferQueue::Buffer &Buffer;
+ char *NextRecord = nullptr;
+
+ template <class T> void writeRecord(const T &R) {
+ internal_memcpy(NextRecord, reinterpret_cast<const char *>(&R), sizeof(T));
+ NextRecord += sizeof(T);
+ // We need this atomic fence here to ensure that other threads attempting to
+ // read the bytes in the buffer will see the writes committed before the
+ // extents are updated.
+ atomic_thread_fence(memory_order_release);
+ atomic_fetch_add(Buffer.Extents, sizeof(T), memory_order_acq_rel);
+ }
+
+public:
+ explicit FDRLogWriter(BufferQueue::Buffer &B, char *P)
+ : Buffer(B), NextRecord(P) {
+ DCHECK_NE(Buffer.Data, nullptr);
+ DCHECK_NE(NextRecord, nullptr);
+ }
+
+ explicit FDRLogWriter(BufferQueue::Buffer &B)
+ : FDRLogWriter(B, static_cast<char *>(B.Data)) {}
+
+ template <MetadataRecord::RecordKinds Kind, class... Data>
+ bool writeMetadata(Data &&... Ds) {
+ // TODO: Check boundary conditions:
+ // 1) Buffer is full, and cannot handle one metadata record.
+ // 2) Buffer queue is finalising.
+ writeRecord(createMetadataRecord<Kind>(std::forward<Data>(Ds)...));
+ return true;
+ }
+
+ template <size_t N> size_t writeMetadataRecords(MetadataRecord (&Recs)[N]) {
+ constexpr auto Size = sizeof(MetadataRecord) * N;
+ internal_memcpy(NextRecord, reinterpret_cast<const char *>(Recs), Size);
+ NextRecord += Size;
+ // We need this atomic fence here to ensure that other threads attempting to
+ // read the bytes in the buffer will see the writes committed before the
+ // extents are updated.
+ atomic_thread_fence(memory_order_release);
+ atomic_fetch_add(Buffer.Extents, Size, memory_order_acq_rel);
+ return Size;
+ }
+
+ enum class FunctionRecordKind : uint8_t {
+ Enter = 0x00,
+ Exit = 0x01,
+ TailExit = 0x02,
+ EnterArg = 0x03,
+ };
+
+ bool writeFunction(FunctionRecordKind Kind, int32_t FuncId, int32_t Delta) {
+ FunctionRecord R;
+ R.Type = 0;
+ R.RecordKind = uint8_t(Kind);
+ R.FuncId = FuncId;
+ R.TSCDelta = Delta;
+ writeRecord(R);
+ return true;
+ }
+
+ bool writeFunctionWithArg(FunctionRecordKind Kind, int32_t FuncId,
+ int32_t Delta, uint64_t Arg) {
+ // We need to write the function with arg into the buffer, and then
+ // atomically update the buffer extents. This ensures that any reads
+ // synchronised on the buffer extents record will always see the writes
+ // that happen before the atomic update.
+ FunctionRecord R;
+ R.Type = 0;
+ R.RecordKind = uint8_t(Kind);
+ R.FuncId = FuncId;
+ R.TSCDelta = Delta;
+ MetadataRecord A =
+ createMetadataRecord<MetadataRecord::RecordKinds::CallArgument>(Arg);
+ NextRecord = reinterpret_cast<char *>(internal_memcpy(
+ NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
+ sizeof(R);
+ NextRecord = reinterpret_cast<char *>(internal_memcpy(
+ NextRecord, reinterpret_cast<char *>(&A), sizeof(A))) +
+ sizeof(A);
+ // We need this atomic fence here to ensure that other threads attempting to
+ // read the bytes in the buffer will see the writes committed before the
+ // extents are updated.
+ atomic_thread_fence(memory_order_release);
+ atomic_fetch_add(Buffer.Extents, sizeof(R) + sizeof(A),
+ memory_order_acq_rel);
+ return true;
+ }
+
+ bool writeCustomEvent(int32_t Delta, const void *Event, int32_t EventSize) {
+ // We write the metadata record and the custom event data into the buffer
+ // first, before we atomically update the extents for the buffer. This
+ // allows us to ensure that any threads reading the extents of the buffer
+ // will only ever see the full metadata and custom event payload accounted
+ // (no partial writes accounted).
+ MetadataRecord R =
+ createMetadataRecord<MetadataRecord::RecordKinds::CustomEventMarker>(
+ EventSize, Delta);
+ NextRecord = reinterpret_cast<char *>(internal_memcpy(
+ NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
+ sizeof(R);
+ NextRecord = reinterpret_cast<char *>(
+ internal_memcpy(NextRecord, Event, EventSize)) +
+ EventSize;
+
+ // We need this atomic fence here to ensure that other threads attempting to
+ // read the bytes in the buffer will see the writes committed before the
+ // extents are updated.
+ atomic_thread_fence(memory_order_release);
+ atomic_fetch_add(Buffer.Extents, sizeof(R) + EventSize,
+ memory_order_acq_rel);
+ return true;
+ }
+
+ bool writeTypedEvent(int32_t Delta, uint16_t EventType, const void *Event,
+ int32_t EventSize) {
+ // We do something similar when writing out typed events, see
+ // writeCustomEvent(...) above for details.
+ MetadataRecord R =
+ createMetadataRecord<MetadataRecord::RecordKinds::TypedEventMarker>(
+ EventSize, Delta, EventType);
+ NextRecord = reinterpret_cast<char *>(internal_memcpy(
+ NextRecord, reinterpret_cast<char *>(&R), sizeof(R))) +
+ sizeof(R);
+ NextRecord = reinterpret_cast<char *>(
+ internal_memcpy(NextRecord, Event, EventSize)) +
+ EventSize;
+
+ // We need this atomic fence here to ensure that other threads attempting to
+ // read the bytes in the buffer will see the writes committed before the
+ // extents are updated.
+ atomic_thread_fence(memory_order_release);
+ atomic_fetch_add(Buffer.Extents, EventSize, memory_order_acq_rel);
+ return true;
+ }
+
+ char *getNextRecord() const { return NextRecord; }
+
+ void resetRecord() {
+ NextRecord = reinterpret_cast<char *>(Buffer.Data);
+ atomic_store(Buffer.Extents, 0, memory_order_release);
+ }
+
+ void undoWrites(size_t B) {
+ DCHECK_GE(NextRecord - B, reinterpret_cast<char *>(Buffer.Data));
+ NextRecord -= B;
+ atomic_fetch_sub(Buffer.Extents, B, memory_order_acq_rel);
+ }
+
+}; // namespace __xray
+
+} // namespace __xray
+
+#endif // COMPILER-RT_LIB_XRAY_XRAY_FDR_LOG_WRITER_H_
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_logging.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_logging.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_logging.cc (revision 351984)
@@ -0,0 +1,757 @@
+//===-- xray_fdr_logging.cc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Here we implement the Flight Data Recorder mode for XRay, where we use
+// compact structures to store records in memory as well as when writing out the
+// data to files.
+//
+//===----------------------------------------------------------------------===//
+#include "xray_fdr_logging.h"
+#include <cassert>
+#include <errno.h>
+#include <limits>
+#include <memory>
+#include <pthread.h>
+#include <sys/time.h>
+#include <time.h>
+#include <unistd.h>
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray/xray_interface.h"
+#include "xray/xray_records.h"
+#include "xray_allocator.h"
+#include "xray_buffer_queue.h"
+#include "xray_defs.h"
+#include "xray_fdr_controller.h"
+#include "xray_fdr_flags.h"
+#include "xray_fdr_log_writer.h"
+#include "xray_flags.h"
+#include "xray_recursion_guard.h"
+#include "xray_tsc.h"
+#include "xray_utils.h"
+
+namespace __xray {
+
+static atomic_sint32_t LoggingStatus = {
+ XRayLogInitStatus::XRAY_LOG_UNINITIALIZED};
+
+namespace {
+
+// Group together thread-local-data in a struct, then hide it behind a function
+// call so that it can be initialized on first use instead of as a global. We
+// force the alignment to 64-bytes for x86 cache line alignment, as this
+// structure is used in the hot path of implementation.
+struct XRAY_TLS_ALIGNAS(64) ThreadLocalData {
+ BufferQueue::Buffer Buffer{};
+ BufferQueue *BQ = nullptr;
+
+ using LogWriterStorage =
+ typename std::aligned_storage<sizeof(FDRLogWriter),
+ alignof(FDRLogWriter)>::type;
+
+ LogWriterStorage LWStorage;
+ FDRLogWriter *Writer = nullptr;
+
+ using ControllerStorage =
+ typename std::aligned_storage<sizeof(FDRController<>),
+ alignof(FDRController<>)>::type;
+ ControllerStorage CStorage;
+ FDRController<> *Controller = nullptr;
+};
+
+} // namespace
+
+static_assert(std::is_trivially_destructible<ThreadLocalData>::value,
+ "ThreadLocalData must be trivially destructible");
+
+// Use a global pthread key to identify thread-local data for logging.
+static pthread_key_t Key;
+
+// Global BufferQueue.
+static std::aligned_storage<sizeof(BufferQueue)>::type BufferQueueStorage;
+static BufferQueue *BQ = nullptr;
+
+// Global thresholds for function durations.
+static atomic_uint64_t ThresholdTicks{0};
+
+// Global for ticks per second.
+static atomic_uint64_t TicksPerSec{0};
+
+static atomic_sint32_t LogFlushStatus = {
+ XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING};
+
+// This function will initialize the thread-local data structure used by the FDR
+// logging implementation and return a reference to it. The implementation
+// details require a bit of care to maintain.
+//
+// First, some requirements on the implementation in general:
+//
+// - XRay handlers should not call any memory allocation routines that may
+// delegate to an instrumented implementation. This means functions like
+// malloc() and free() should not be called while instrumenting.
+//
+// - We would like to use some thread-local data initialized on first-use of
+// the XRay instrumentation. These allow us to implement unsynchronized
+// routines that access resources associated with the thread.
+//
+// The implementation here uses a few mechanisms that allow us to provide both
+// the requirements listed above. We do this by:
+//
+// 1. Using a thread-local aligned storage buffer for representing the
+// ThreadLocalData struct. This data will be uninitialized memory by
+// design.
+//
+// 2. Not requiring a thread exit handler/implementation, keeping the
+// thread-local as purely a collection of references/data that do not
+// require cleanup.
+//
+// We're doing this to avoid using a `thread_local` object that has a
+// non-trivial destructor, because the C++ runtime might call std::malloc(...)
+// to register calls to destructors. Deadlocks may arise when, for example, an
+// externally provided malloc implementation is XRay instrumented, and
+// initializing the thread-locals involves calling into malloc. A malloc
+// implementation that does global synchronization might be holding a lock for a
+// critical section, calling a function that might be XRay instrumented (and
+// thus in turn calling into malloc by virtue of registration of the
+// thread_local's destructor).
+#if XRAY_HAS_TLS_ALIGNAS
+static_assert(alignof(ThreadLocalData) >= 64,
+ "ThreadLocalData must be cache line aligned.");
+#endif
+static ThreadLocalData &getThreadLocalData() {
+ thread_local typename std::aligned_storage<
+ sizeof(ThreadLocalData), alignof(ThreadLocalData)>::type TLDStorage{};
+
+ if (pthread_getspecific(Key) == NULL) {
+ new (reinterpret_cast<ThreadLocalData *>(&TLDStorage)) ThreadLocalData{};
+ pthread_setspecific(Key, &TLDStorage);
+ }
+
+ return *reinterpret_cast<ThreadLocalData *>(&TLDStorage);
+}
+
+static XRayFileHeader &fdrCommonHeaderInfo() {
+ static std::aligned_storage<sizeof(XRayFileHeader)>::type HStorage;
+ static pthread_once_t OnceInit = PTHREAD_ONCE_INIT;
+ static bool TSCSupported = true;
+ static uint64_t CycleFrequency = NanosecondsPerSecond;
+ pthread_once(
+ &OnceInit, +[] {
+ XRayFileHeader &H = reinterpret_cast<XRayFileHeader &>(HStorage);
+ // Version 2 of the log writes the extents of the buffer, instead of
+ // relying on an end-of-buffer record.
+ // Version 3 includes PID metadata record.
+ // Version 4 includes CPU data in the custom event records.
+ // Version 5 uses relative deltas for custom and typed event records,
+ // and removes the CPU data in custom event records (similar to how
+ // function records use deltas instead of full TSCs and rely on other
+ // metadata records for TSC wraparound and CPU migration).
+ H.Version = 5;
+ H.Type = FileTypes::FDR_LOG;
+
+ // Test for required CPU features and cache the cycle frequency
+ TSCSupported = probeRequiredCPUFeatures();
+ if (TSCSupported)
+ CycleFrequency = getTSCFrequency();
+ H.CycleFrequency = CycleFrequency;
+
+ // FIXME: Actually check whether we have 'constant_tsc' and
+ // 'nonstop_tsc' before setting the values in the header.
+ H.ConstantTSC = 1;
+ H.NonstopTSC = 1;
+ });
+ return reinterpret_cast<XRayFileHeader &>(HStorage);
+}
+
+// This is the iterator implementation, which knows how to handle FDR-mode
+// specific buffers. This is used as an implementation of the iterator function
+// needed by __xray_set_buffer_iterator(...). It maintains a global state of the
+// buffer iteration for the currently installed FDR mode buffers. In particular:
+//
+// - If the argument represents the initial state of XRayBuffer ({nullptr, 0})
+// then the iterator returns the header information.
+// - If the argument represents the header information ({address of header
+// info, size of the header info}) then it returns the first FDR buffer's
+// address and extents.
+// - It will keep returning the next buffer and extents as there are more
+// buffers to process. When the input represents the last buffer, it will
+// return the initial state to signal completion ({nullptr, 0}).
+//
+// See xray/xray_log_interface.h for more details on the requirements for the
+// implementations of __xray_set_buffer_iterator(...) and
+// __xray_log_process_buffers(...).
+XRayBuffer fdrIterator(const XRayBuffer B) {
+ DCHECK(internal_strcmp(__xray_log_get_current_mode(), "xray-fdr") == 0);
+ DCHECK(BQ->finalizing());
+
+ if (BQ == nullptr || !BQ->finalizing()) {
+ if (Verbosity())
+ Report(
+ "XRay FDR: Failed global buffer queue is null or not finalizing!\n");
+ return {nullptr, 0};
+ }
+
+ // We use a global scratch-pad for the header information, which only gets
+ // initialized the first time this function is called. We'll update one part
+ // of this information with some relevant data (in particular the number of
+ // buffers to expect).
+ static std::aligned_storage<sizeof(XRayFileHeader)>::type HeaderStorage;
+ static pthread_once_t HeaderOnce = PTHREAD_ONCE_INIT;
+ pthread_once(
+ &HeaderOnce, +[] {
+ reinterpret_cast<XRayFileHeader &>(HeaderStorage) =
+ fdrCommonHeaderInfo();
+ });
+
+ // We use a convenience alias for code referring to Header from here on out.
+ auto &Header = reinterpret_cast<XRayFileHeader &>(HeaderStorage);
+ if (B.Data == nullptr && B.Size == 0) {
+ Header.FdrData = FdrAdditionalHeaderData{BQ->ConfiguredBufferSize()};
+ return XRayBuffer{static_cast<void *>(&Header), sizeof(Header)};
+ }
+
+ static BufferQueue::const_iterator It{};
+ static BufferQueue::const_iterator End{};
+ static uint8_t *CurrentBuffer{nullptr};
+ static size_t SerializedBufferSize = 0;
+ if (B.Data == static_cast<void *>(&Header) && B.Size == sizeof(Header)) {
+ // From this point on, we provide raw access to the raw buffer we're getting
+ // from the BufferQueue. We're relying on the iterators from the current
+ // Buffer queue.
+ It = BQ->cbegin();
+ End = BQ->cend();
+ }
+
+ if (CurrentBuffer != nullptr) {
+ deallocateBuffer(CurrentBuffer, SerializedBufferSize);
+ CurrentBuffer = nullptr;
+ }
+
+ if (It == End)
+ return {nullptr, 0};
+
+ // Set up the current buffer to contain the extents like we would when writing
+ // out to disk. The difference here would be that we still write "empty"
+ // buffers, or at least go through the iterators faithfully to let the
+ // handlers see the empty buffers in the queue.
+ //
+ // We need this atomic fence here to ensure that writes happening to the
+ // buffer have been committed before we load the extents atomically. Because
+ // the buffer is not explicitly synchronised across threads, we rely on the
+ // fence ordering to ensure that writes we expect to have been completed
+ // before the fence are fully committed before we read the extents.
+ atomic_thread_fence(memory_order_acquire);
+ auto BufferSize = atomic_load(It->Extents, memory_order_acquire);
+ SerializedBufferSize = BufferSize + sizeof(MetadataRecord);
+ CurrentBuffer = allocateBuffer(SerializedBufferSize);
+ if (CurrentBuffer == nullptr)
+ return {nullptr, 0};
+
+ // Write out the extents as a Metadata Record into the CurrentBuffer.
+ MetadataRecord ExtentsRecord;
+ ExtentsRecord.Type = uint8_t(RecordType::Metadata);
+ ExtentsRecord.RecordKind =
+ uint8_t(MetadataRecord::RecordKinds::BufferExtents);
+ internal_memcpy(ExtentsRecord.Data, &BufferSize, sizeof(BufferSize));
+ auto AfterExtents =
+ static_cast<char *>(internal_memcpy(CurrentBuffer, &ExtentsRecord,
+ sizeof(MetadataRecord))) +
+ sizeof(MetadataRecord);
+ internal_memcpy(AfterExtents, It->Data, BufferSize);
+
+ XRayBuffer Result;
+ Result.Data = CurrentBuffer;
+ Result.Size = SerializedBufferSize;
+ ++It;
+ return Result;
+}
+
+// Must finalize before flushing.
+XRayLogFlushStatus fdrLoggingFlush() XRAY_NEVER_INSTRUMENT {
+ if (atomic_load(&LoggingStatus, memory_order_acquire) !=
+ XRayLogInitStatus::XRAY_LOG_FINALIZED) {
+ if (Verbosity())
+ Report("Not flushing log, implementation is not finalized.\n");
+ return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ }
+
+ s32 Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ if (!atomic_compare_exchange_strong(&LogFlushStatus, &Result,
+ XRayLogFlushStatus::XRAY_LOG_FLUSHING,
+ memory_order_release)) {
+ if (Verbosity())
+ Report("Not flushing log, implementation is still finalizing.\n");
+ return static_cast<XRayLogFlushStatus>(Result);
+ }
+
+ if (BQ == nullptr) {
+ if (Verbosity())
+ Report("Cannot flush when global buffer queue is null.\n");
+ return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ }
+
+ // We wait a number of milliseconds to allow threads to see that we've
+ // finalised before attempting to flush the log.
+ SleepForMillis(fdrFlags()->grace_period_ms);
+
+ // At this point, we're going to uninstall the iterator implementation, before
+ // we decide to do anything further with the global buffer queue.
+ __xray_log_remove_buffer_iterator();
+
+ // Once flushed, we should set the global status of the logging implementation
+ // to "uninitialized" to allow for FDR-logging multiple runs.
+ auto ResetToUnitialized = at_scope_exit([] {
+ atomic_store(&LoggingStatus, XRayLogInitStatus::XRAY_LOG_UNINITIALIZED,
+ memory_order_release);
+ });
+
+ auto CleanupBuffers = at_scope_exit([] {
+ auto &TLD = getThreadLocalData();
+ if (TLD.Controller != nullptr)
+ TLD.Controller->flush();
+ });
+
+ if (fdrFlags()->no_file_flush) {
+ if (Verbosity())
+ Report("XRay FDR: Not flushing to file, 'no_file_flush=true'.\n");
+
+ atomic_store(&LogFlushStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED,
+ memory_order_release);
+ return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
+ }
+
+ // We write out the file in the following format:
+ //
+ // 1) We write down the XRay file header with version 1, type FDR_LOG.
+ // 2) Then we use the 'apply' member of the BufferQueue that's live, to
+ // ensure that at this point in time we write down the buffers that have
+ // been released (and marked "used") -- we dump the full buffer for now
+ // (fixed-sized) and let the tools reading the buffers deal with the data
+ // afterwards.
+ //
+ LogWriter *LW = LogWriter::Open();
+ if (LW == nullptr) {
+ auto Result = XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ atomic_store(&LogFlushStatus, Result, memory_order_release);
+ return Result;
+ }
+
+ XRayFileHeader Header = fdrCommonHeaderInfo();
+ Header.FdrData = FdrAdditionalHeaderData{BQ->ConfiguredBufferSize()};
+ LW->WriteAll(reinterpret_cast<char *>(&Header),
+ reinterpret_cast<char *>(&Header) + sizeof(Header));
+
+ // Release the current thread's buffer before we attempt to write out all the
+ // buffers. This ensures that in case we had only a single thread going, that
+ // we are able to capture the data nonetheless.
+ auto &TLD = getThreadLocalData();
+ if (TLD.Controller != nullptr)
+ TLD.Controller->flush();
+
+ BQ->apply([&](const BufferQueue::Buffer &B) {
+ // Starting at version 2 of the FDR logging implementation, we only write
+ // the records identified by the extents of the buffer. We use the Extents
+ // from the Buffer and write that out as the first record in the buffer. We
+ // still use a Metadata record, but fill in the extents instead for the
+ // data.
+ MetadataRecord ExtentsRecord;
+ auto BufferExtents = atomic_load(B.Extents, memory_order_acquire);
+ DCHECK(BufferExtents <= B.Size);
+ ExtentsRecord.Type = uint8_t(RecordType::Metadata);
+ ExtentsRecord.RecordKind =
+ uint8_t(MetadataRecord::RecordKinds::BufferExtents);
+ internal_memcpy(ExtentsRecord.Data, &BufferExtents, sizeof(BufferExtents));
+ if (BufferExtents > 0) {
+ LW->WriteAll(reinterpret_cast<char *>(&ExtentsRecord),
+ reinterpret_cast<char *>(&ExtentsRecord) +
+ sizeof(MetadataRecord));
+ LW->WriteAll(reinterpret_cast<char *>(B.Data),
+ reinterpret_cast<char *>(B.Data) + BufferExtents);
+ }
+ });
+
+ atomic_store(&LogFlushStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED,
+ memory_order_release);
+ return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
+}
+
+XRayLogInitStatus fdrLoggingFinalize() XRAY_NEVER_INSTRUMENT {
+ s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED;
+ if (!atomic_compare_exchange_strong(&LoggingStatus, &CurrentStatus,
+ XRayLogInitStatus::XRAY_LOG_FINALIZING,
+ memory_order_release)) {
+ if (Verbosity())
+ Report("Cannot finalize log, implementation not initialized.\n");
+ return static_cast<XRayLogInitStatus>(CurrentStatus);
+ }
+
+ // Do special things to make the log finalize itself, and not allow any more
+ // operations to be performed until re-initialized.
+ if (BQ == nullptr) {
+ if (Verbosity())
+ Report("Attempting to finalize an uninitialized global buffer!\n");
+ } else {
+ BQ->finalize();
+ }
+
+ atomic_store(&LoggingStatus, XRayLogInitStatus::XRAY_LOG_FINALIZED,
+ memory_order_release);
+ return XRayLogInitStatus::XRAY_LOG_FINALIZED;
+}
+
+struct TSCAndCPU {
+ uint64_t TSC = 0;
+ unsigned char CPU = 0;
+};
+
+static TSCAndCPU getTimestamp() XRAY_NEVER_INSTRUMENT {
+ // We want to get the TSC as early as possible, so that we can check whether
+ // we've seen this CPU before. We also do it before we load anything else,
+ // to allow for forward progress with the scheduling.
+ TSCAndCPU Result;
+
+ // Test once for required CPU features
+ static pthread_once_t OnceProbe = PTHREAD_ONCE_INIT;
+ static bool TSCSupported = true;
+ pthread_once(
+ &OnceProbe, +[] { TSCSupported = probeRequiredCPUFeatures(); });
+
+ if (TSCSupported) {
+ Result.TSC = __xray::readTSC(Result.CPU);
+ } else {
+ // FIXME: This code needs refactoring as it appears in multiple locations
+ timespec TS;
+ int result = clock_gettime(CLOCK_REALTIME, &TS);
+ if (result != 0) {
+ Report("clock_gettime(2) return %d, errno=%d", result, int(errno));
+ TS = {0, 0};
+ }
+ Result.CPU = 0;
+ Result.TSC = TS.tv_sec * __xray::NanosecondsPerSecond + TS.tv_nsec;
+ }
+ return Result;
+}
+
+thread_local atomic_uint8_t Running{0};
+
+static bool setupTLD(ThreadLocalData &TLD) XRAY_NEVER_INSTRUMENT {
+ // Check if we're finalizing, before proceeding.
+ {
+ auto Status = atomic_load(&LoggingStatus, memory_order_acquire);
+ if (Status == XRayLogInitStatus::XRAY_LOG_FINALIZING ||
+ Status == XRayLogInitStatus::XRAY_LOG_FINALIZED) {
+ if (TLD.Controller != nullptr) {
+ TLD.Controller->flush();
+ TLD.Controller = nullptr;
+ }
+ return false;
+ }
+ }
+
+ if (UNLIKELY(TLD.Controller == nullptr)) {
+ // Set up the TLD buffer queue.
+ if (UNLIKELY(BQ == nullptr))
+ return false;
+ TLD.BQ = BQ;
+
+ // Check that we have a valid buffer.
+ if (TLD.Buffer.Generation != BQ->generation() &&
+ TLD.BQ->releaseBuffer(TLD.Buffer) != BufferQueue::ErrorCode::Ok)
+ return false;
+
+ // Set up a buffer, before setting up the log writer. Bail out on failure.
+ if (TLD.BQ->getBuffer(TLD.Buffer) != BufferQueue::ErrorCode::Ok)
+ return false;
+
+ // Set up the Log Writer for this thread.
+ if (UNLIKELY(TLD.Writer == nullptr)) {
+ auto *LWStorage = reinterpret_cast<FDRLogWriter *>(&TLD.LWStorage);
+ new (LWStorage) FDRLogWriter(TLD.Buffer);
+ TLD.Writer = LWStorage;
+ } else {
+ TLD.Writer->resetRecord();
+ }
+
+ auto *CStorage = reinterpret_cast<FDRController<> *>(&TLD.CStorage);
+ new (CStorage)
+ FDRController<>(TLD.BQ, TLD.Buffer, *TLD.Writer, clock_gettime,
+ atomic_load_relaxed(&ThresholdTicks));
+ TLD.Controller = CStorage;
+ }
+
+ DCHECK_NE(TLD.Controller, nullptr);
+ return true;
+}
+
+void fdrLoggingHandleArg0(int32_t FuncId,
+ XRayEntryType Entry) XRAY_NEVER_INSTRUMENT {
+ auto TC = getTimestamp();
+ auto &TSC = TC.TSC;
+ auto &CPU = TC.CPU;
+ RecursionGuard Guard{Running};
+ if (!Guard)
+ return;
+
+ auto &TLD = getThreadLocalData();
+ if (!setupTLD(TLD))
+ return;
+
+ switch (Entry) {
+ case XRayEntryType::ENTRY:
+ case XRayEntryType::LOG_ARGS_ENTRY:
+ TLD.Controller->functionEnter(FuncId, TSC, CPU);
+ return;
+ case XRayEntryType::EXIT:
+ TLD.Controller->functionExit(FuncId, TSC, CPU);
+ return;
+ case XRayEntryType::TAIL:
+ TLD.Controller->functionTailExit(FuncId, TSC, CPU);
+ return;
+ case XRayEntryType::CUSTOM_EVENT:
+ case XRayEntryType::TYPED_EVENT:
+ break;
+ }
+}
+
+void fdrLoggingHandleArg1(int32_t FuncId, XRayEntryType Entry,
+ uint64_t Arg) XRAY_NEVER_INSTRUMENT {
+ auto TC = getTimestamp();
+ auto &TSC = TC.TSC;
+ auto &CPU = TC.CPU;
+ RecursionGuard Guard{Running};
+ if (!Guard)
+ return;
+
+ auto &TLD = getThreadLocalData();
+ if (!setupTLD(TLD))
+ return;
+
+ switch (Entry) {
+ case XRayEntryType::ENTRY:
+ case XRayEntryType::LOG_ARGS_ENTRY:
+ TLD.Controller->functionEnterArg(FuncId, TSC, CPU, Arg);
+ return;
+ case XRayEntryType::EXIT:
+ TLD.Controller->functionExit(FuncId, TSC, CPU);
+ return;
+ case XRayEntryType::TAIL:
+ TLD.Controller->functionTailExit(FuncId, TSC, CPU);
+ return;
+ case XRayEntryType::CUSTOM_EVENT:
+ case XRayEntryType::TYPED_EVENT:
+ break;
+ }
+}
+
+void fdrLoggingHandleCustomEvent(void *Event,
+ std::size_t EventSize) XRAY_NEVER_INSTRUMENT {
+ auto TC = getTimestamp();
+ auto &TSC = TC.TSC;
+ auto &CPU = TC.CPU;
+ RecursionGuard Guard{Running};
+ if (!Guard)
+ return;
+
+ // Complain when we ever get at least one custom event that's larger than what
+ // we can possibly support.
+ if (EventSize >
+ static_cast<std::size_t>(std::numeric_limits<int32_t>::max())) {
+ static pthread_once_t Once = PTHREAD_ONCE_INIT;
+ pthread_once(
+ &Once, +[] {
+ Report("Custom event size too large; truncating to %d.\n",
+ std::numeric_limits<int32_t>::max());
+ });
+ }
+
+ auto &TLD = getThreadLocalData();
+ if (!setupTLD(TLD))
+ return;
+
+ int32_t ReducedEventSize = static_cast<int32_t>(EventSize);
+ TLD.Controller->customEvent(TSC, CPU, Event, ReducedEventSize);
+}
+
+void fdrLoggingHandleTypedEvent(
+ uint16_t EventType, const void *Event,
+ std::size_t EventSize) noexcept XRAY_NEVER_INSTRUMENT {
+ auto TC = getTimestamp();
+ auto &TSC = TC.TSC;
+ auto &CPU = TC.CPU;
+ RecursionGuard Guard{Running};
+ if (!Guard)
+ return;
+
+ // Complain when we ever get at least one typed event that's larger than what
+ // we can possibly support.
+ if (EventSize >
+ static_cast<std::size_t>(std::numeric_limits<int32_t>::max())) {
+ static pthread_once_t Once = PTHREAD_ONCE_INIT;
+ pthread_once(
+ &Once, +[] {
+ Report("Typed event size too large; truncating to %d.\n",
+ std::numeric_limits<int32_t>::max());
+ });
+ }
+
+ auto &TLD = getThreadLocalData();
+ if (!setupTLD(TLD))
+ return;
+
+ int32_t ReducedEventSize = static_cast<int32_t>(EventSize);
+ TLD.Controller->typedEvent(TSC, CPU, EventType, Event, ReducedEventSize);
+}
+
+XRayLogInitStatus fdrLoggingInit(size_t, size_t, void *Options,
+ size_t OptionsSize) XRAY_NEVER_INSTRUMENT {
+ if (Options == nullptr)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+
+ s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ if (!atomic_compare_exchange_strong(&LoggingStatus, &CurrentStatus,
+ XRayLogInitStatus::XRAY_LOG_INITIALIZING,
+ memory_order_release)) {
+ if (Verbosity())
+ Report("Cannot initialize already initialized implementation.\n");
+ return static_cast<XRayLogInitStatus>(CurrentStatus);
+ }
+
+ if (Verbosity())
+ Report("Initializing FDR mode with options: %s\n",
+ static_cast<const char *>(Options));
+
+ // TODO: Factor out the flags specific to the FDR mode implementation. For
+ // now, use the global/single definition of the flags, since the FDR mode
+ // flags are already defined there.
+ FlagParser FDRParser;
+ FDRFlags FDRFlags;
+ registerXRayFDRFlags(&FDRParser, &FDRFlags);
+ FDRFlags.setDefaults();
+
+ // Override first from the general XRAY_DEFAULT_OPTIONS compiler-provided
+ // options until we migrate everyone to use the XRAY_FDR_OPTIONS
+ // compiler-provided options.
+ FDRParser.ParseString(useCompilerDefinedFlags());
+ FDRParser.ParseString(useCompilerDefinedFDRFlags());
+ auto *EnvOpts = GetEnv("XRAY_FDR_OPTIONS");
+ if (EnvOpts == nullptr)
+ EnvOpts = "";
+ FDRParser.ParseString(EnvOpts);
+
+ // FIXME: Remove this when we fully remove the deprecated flags.
+ if (internal_strlen(EnvOpts) == 0) {
+ FDRFlags.func_duration_threshold_us =
+ flags()->xray_fdr_log_func_duration_threshold_us;
+ FDRFlags.grace_period_ms = flags()->xray_fdr_log_grace_period_ms;
+ }
+
+ // The provided options should always override the compiler-provided and
+ // environment-variable defined options.
+ FDRParser.ParseString(static_cast<const char *>(Options));
+ *fdrFlags() = FDRFlags;
+ auto BufferSize = FDRFlags.buffer_size;
+ auto BufferMax = FDRFlags.buffer_max;
+
+ if (BQ == nullptr) {
+ bool Success = false;
+ BQ = reinterpret_cast<BufferQueue *>(&BufferQueueStorage);
+ new (BQ) BufferQueue(BufferSize, BufferMax, Success);
+ if (!Success) {
+ Report("BufferQueue init failed.\n");
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ }
+ } else {
+ if (BQ->init(BufferSize, BufferMax) != BufferQueue::ErrorCode::Ok) {
+ if (Verbosity())
+ Report("Failed to re-initialize global buffer queue. Init failed.\n");
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ }
+ }
+
+ static pthread_once_t OnceInit = PTHREAD_ONCE_INIT;
+ pthread_once(
+ &OnceInit, +[] {
+ atomic_store(&TicksPerSec,
+ probeRequiredCPUFeatures() ? getTSCFrequency()
+ : __xray::NanosecondsPerSecond,
+ memory_order_release);
+ pthread_key_create(
+ &Key, +[](void *TLDPtr) {
+ if (TLDPtr == nullptr)
+ return;
+ auto &TLD = *reinterpret_cast<ThreadLocalData *>(TLDPtr);
+ if (TLD.BQ == nullptr)
+ return;
+ if (TLD.Buffer.Data == nullptr)
+ return;
+ auto EC = TLD.BQ->releaseBuffer(TLD.Buffer);
+ if (EC != BufferQueue::ErrorCode::Ok)
+ Report("At thread exit, failed to release buffer at %p; "
+ "error=%s\n",
+ TLD.Buffer.Data, BufferQueue::getErrorString(EC));
+ });
+ });
+
+ atomic_store(&ThresholdTicks,
+ atomic_load_relaxed(&TicksPerSec) *
+ fdrFlags()->func_duration_threshold_us / 1000000,
+ memory_order_release);
+ // Arg1 handler should go in first to avoid concurrent code accidentally
+ // falling back to arg0 when it should have ran arg1.
+ __xray_set_handler_arg1(fdrLoggingHandleArg1);
+ // Install the actual handleArg0 handler after initialising the buffers.
+ __xray_set_handler(fdrLoggingHandleArg0);
+ __xray_set_customevent_handler(fdrLoggingHandleCustomEvent);
+ __xray_set_typedevent_handler(fdrLoggingHandleTypedEvent);
+
+ // Install the buffer iterator implementation.
+ __xray_log_set_buffer_iterator(fdrIterator);
+
+ atomic_store(&LoggingStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZED,
+ memory_order_release);
+
+ if (Verbosity())
+ Report("XRay FDR init successful.\n");
+ return XRayLogInitStatus::XRAY_LOG_INITIALIZED;
+}
+
+bool fdrLogDynamicInitializer() XRAY_NEVER_INSTRUMENT {
+ XRayLogImpl Impl{
+ fdrLoggingInit,
+ fdrLoggingFinalize,
+ fdrLoggingHandleArg0,
+ fdrLoggingFlush,
+ };
+ auto RegistrationResult = __xray_log_register_mode("xray-fdr", Impl);
+ if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK &&
+ Verbosity()) {
+ Report("Cannot register XRay FDR mode to 'xray-fdr'; error = %d\n",
+ RegistrationResult);
+ return false;
+ }
+
+ if (flags()->xray_fdr_log ||
+ !internal_strcmp(flags()->xray_mode, "xray-fdr")) {
+ auto SelectResult = __xray_log_select_mode("xray-fdr");
+ if (SelectResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK &&
+ Verbosity()) {
+ Report("Cannot select XRay FDR mode as 'xray-fdr'; error = %d\n",
+ SelectResult);
+ return false;
+ }
+ }
+ return true;
+}
+
+} // namespace __xray
+
+static auto UNUSED Unused = __xray::fdrLogDynamicInitializer();
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_logging.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_logging.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_logging.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_logging.h (revision 351984)
@@ -0,0 +1,38 @@
+//===-- xray_fdr_logging.h ------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_FDR_LOGGING_H
+#define XRAY_XRAY_FDR_LOGGING_H
+
+#include "xray/xray_log_interface.h"
+#include "xray_fdr_log_records.h"
+
+// FDR (Flight Data Recorder) Mode
+// ===============================
+//
+// The XRay whitepaper describes a mode of operation for function call trace
+// logging that involves writing small records into an in-memory circular
+// buffer, that then gets logged to disk on demand. To do this efficiently and
+// capture as much data as we can, we use smaller records compared to the
+// default mode of always writing fixed-size records.
+
+namespace __xray {
+XRayLogInitStatus fdrLoggingInit(size_t BufferSize, size_t BufferMax,
+ void *Options, size_t OptionsSize);
+XRayLogInitStatus fdrLoggingFinalize();
+void fdrLoggingHandleArg0(int32_t FuncId, XRayEntryType Entry);
+void fdrLoggingHandleArg1(int32_t FuncId, XRayEntryType Entry, uint64_t Arg1);
+XRayLogFlushStatus fdrLoggingFlush();
+XRayLogInitStatus fdrLoggingReset();
+
+} // namespace __xray
+
+#endif // XRAY_XRAY_FDR_LOGGING_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_fdr_logging.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.cc (revision 351984)
@@ -0,0 +1,84 @@
+//===-- xray_flags.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// XRay flag parsing logic.
+//===----------------------------------------------------------------------===//
+
+#include "xray_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "xray_defs.h"
+
+using namespace __sanitizer;
+
+namespace __xray {
+
+Flags xray_flags_dont_use_directly; // use via flags().
+
+void Flags::setDefaults() XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "xray_flags.inc"
+#undef XRAY_FLAG
+}
+
+void registerXRayFlags(FlagParser *P, Flags *F) XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(P, #Name, Description, &F->Name);
+#include "xray_flags.inc"
+#undef XRAY_FLAG
+}
+
+// This function, as defined with the help of a macro meant to be introduced at
+// build time of the XRay runtime, passes in a statically defined list of
+// options that control XRay. This means users/deployments can tweak the
+// defaults that override the hard-coded defaults in the xray_flags.inc at
+// compile-time using the XRAY_DEFAULT_OPTIONS macro.
+const char *useCompilerDefinedFlags() XRAY_NEVER_INSTRUMENT {
+#ifdef XRAY_DEFAULT_OPTIONS
+ // Do the double-layered string conversion to prevent badly crafted strings
+ // provided through the XRAY_DEFAULT_OPTIONS from causing compilation issues
+ // (or changing the semantics of the implementation through the macro). This
+ // ensures that we convert whatever XRAY_DEFAULT_OPTIONS is defined as a
+ // string literal.
+ return SANITIZER_STRINGIFY(XRAY_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+void initializeFlags() XRAY_NEVER_INSTRUMENT {
+ SetCommonFlagsDefaults();
+ auto *F = flags();
+ F->setDefaults();
+
+ FlagParser XRayParser;
+ registerXRayFlags(&XRayParser, F);
+ RegisterCommonFlags(&XRayParser);
+
+ // Use options defaulted at compile-time for the runtime.
+ const char *XRayCompileFlags = useCompilerDefinedFlags();
+ XRayParser.ParseString(XRayCompileFlags);
+
+ // Override from environment variables.
+ XRayParser.ParseStringFromEnv("XRAY_OPTIONS");
+
+ // Override from command line.
+ InitializeCommonFlags();
+
+ if (Verbosity())
+ ReportUnrecognizedFlags();
+
+ if (common_flags()->help) {
+ XRayParser.PrintFlagDescriptions();
+ }
+}
+
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.h (revision 351984)
@@ -0,0 +1,39 @@
+//===-- xray_flags.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instruementation system.
+//
+// XRay runtime flags.
+//===----------------------------------------------------------------------===//
+
+#ifndef XRAY_FLAGS_H
+#define XRAY_FLAGS_H
+
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __xray {
+
+struct Flags {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "xray_flags.inc"
+#undef XRAY_FLAG
+
+ void setDefaults();
+};
+
+extern Flags xray_flags_dont_use_directly;
+extern void registerXRayFlags(FlagParser *P, Flags *F);
+const char *useCompilerDefinedFlags();
+inline Flags *flags() { return &xray_flags_dont_use_directly; }
+
+void initializeFlags();
+
+} // namespace __xray
+
+#endif // XRAY_FLAGS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.inc (revision 351984)
@@ -0,0 +1,49 @@
+//===-- xray_flags.inc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// XRay runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_FLAG
+#error "Define XRAY_FLAG prior to including this file!"
+#endif
+
+XRAY_FLAG(bool, patch_premain, false,
+ "Whether to patch instrumentation points before main.")
+XRAY_FLAG(const char *, xray_logfile_base, "xray-log.",
+ "Filename base for the xray logfile.")
+XRAY_FLAG(const char *, xray_mode, "", "Mode to install by default.")
+XRAY_FLAG(uptr, xray_page_size_override, 0,
+ "Override the default page size for the system, in bytes. The size "
+ "should be a power-of-two.")
+
+// Basic (Naive) Mode logging options.
+XRAY_FLAG(bool, xray_naive_log, false,
+ "DEPRECATED: Use xray_mode=xray-basic instead.")
+XRAY_FLAG(int, xray_naive_log_func_duration_threshold_us, 5,
+ "DEPRECATED: use the environment variable XRAY_BASIC_OPTIONS and set "
+ "func_duration_threshold_us instead.")
+XRAY_FLAG(int, xray_naive_log_max_stack_depth, 64,
+ "DEPRECATED: use the environment variable XRAY_BASIC_OPTIONS and set "
+ "max_stack_depth instead.")
+XRAY_FLAG(int, xray_naive_log_thread_buffer_size, 1024,
+ "DEPRECATED: use the environment variable XRAY_BASIC_OPTIONS and set "
+ "thread_buffer_size instead.")
+
+// FDR (Flight Data Recorder) Mode logging options.
+XRAY_FLAG(bool, xray_fdr_log, false,
+ "DEPRECATED: Use xray_mode=xray-fdr instead.")
+XRAY_FLAG(int, xray_fdr_log_func_duration_threshold_us, 5,
+ "DEPRECATED: use the environment variable XRAY_FDR_OPTIONS and set "
+ "func_duration_threshold_us instead.")
+XRAY_FLAG(int, xray_fdr_log_grace_period_us, 0,
+ "DEPRECATED: use the environment variable XRAY_FDR_OPTIONS and set "
+ "grace_period_ms instead.")
+XRAY_FLAG(int, xray_fdr_log_grace_period_ms, 100,
+ "DEPRECATED: use the environment variable XRAY_FDR_OPTIONS and set "
+ "grace_period_ms instead.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_function_call_trie.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_function_call_trie.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_function_call_trie.h (revision 351984)
@@ -0,0 +1,603 @@
+//===-- xray_function_call_trie.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This file defines the interface for a function call trie.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_FUNCTION_CALL_TRIE_H
+#define XRAY_FUNCTION_CALL_TRIE_H
+
+#include "xray_buffer_queue.h"
+#include "xray_defs.h"
+#include "xray_profiling_flags.h"
+#include "xray_segmented_array.h"
+#include <limits>
+#include <memory> // For placement new.
+#include <utility>
+
+namespace __xray {
+
+/// A FunctionCallTrie represents the stack traces of XRay instrumented
+/// functions that we've encountered, where a node corresponds to a function and
+/// the path from the root to the node its stack trace. Each node in the trie
+/// will contain some useful values, including:
+///
+/// * The cumulative amount of time spent in this particular node/stack.
+/// * The number of times this stack has appeared.
+/// * A histogram of latencies for that particular node.
+///
+/// Each node in the trie will also contain a list of callees, represented using
+/// a Array<NodeIdPair> -- each NodeIdPair instance will contain the function
+/// ID of the callee, and a pointer to the node.
+///
+/// If we visualise this data structure, we'll find the following potential
+/// representation:
+///
+/// [function id node] -> [callees] [cumulative time]
+/// [call counter] [latency histogram]
+///
+/// As an example, when we have a function in this pseudocode:
+///
+/// func f(N) {
+/// g()
+/// h()
+/// for i := 1..N { j() }
+/// }
+///
+/// We may end up with a trie of the following form:
+///
+/// f -> [ g, h, j ] [...] [1] [...]
+/// g -> [ ... ] [...] [1] [...]
+/// h -> [ ... ] [...] [1] [...]
+/// j -> [ ... ] [...] [N] [...]
+///
+/// If for instance the function g() called j() like so:
+///
+/// func g() {
+/// for i := 1..10 { j() }
+/// }
+///
+/// We'll find the following updated trie:
+///
+/// f -> [ g, h, j ] [...] [1] [...]
+/// g -> [ j' ] [...] [1] [...]
+/// h -> [ ... ] [...] [1] [...]
+/// j -> [ ... ] [...] [N] [...]
+/// j' -> [ ... ] [...] [10] [...]
+///
+/// Note that we'll have a new node representing the path `f -> g -> j'` with
+/// isolated data. This isolation gives us a means of representing the stack
+/// traces as a path, as opposed to a key in a table. The alternative
+/// implementation here would be to use a separate table for the path, and use
+/// hashes of the path as an identifier to accumulate the information. We've
+/// moved away from this approach as it takes a lot of time to compute the hash
+/// every time we need to update a function's call information as we're handling
+/// the entry and exit events.
+///
+/// This approach allows us to maintain a shadow stack, which represents the
+/// currently executing path, and on function exits quickly compute the amount
+/// of time elapsed from the entry, then update the counters for the node
+/// already represented in the trie. This necessitates an efficient
+/// representation of the various data structures (the list of callees must be
+/// cache-aware and efficient to look up, and the histogram must be compact and
+/// quick to update) to enable us to keep the overheads of this implementation
+/// to the minimum.
+class FunctionCallTrie {
+public:
+ struct Node;
+
+ // We use a NodeIdPair type instead of a std::pair<...> to not rely on the
+ // standard library types in this header.
+ struct NodeIdPair {
+ Node *NodePtr;
+ int32_t FId;
+ };
+
+ using NodeIdPairArray = Array<NodeIdPair>;
+ using NodeIdPairAllocatorType = NodeIdPairArray::AllocatorType;
+
+ // A Node in the FunctionCallTrie gives us a list of callees, the cumulative
+ // number of times this node actually appeared, the cumulative amount of time
+ // for this particular node including its children call times, and just the
+ // local time spent on this node. Each Node will have the ID of the XRay
+ // instrumented function that it is associated to.
+ struct Node {
+ Node *Parent;
+ NodeIdPairArray Callees;
+ uint64_t CallCount;
+ uint64_t CumulativeLocalTime; // Typically in TSC deltas, not wall-time.
+ int32_t FId;
+
+ // TODO: Include the compact histogram.
+ };
+
+private:
+ struct ShadowStackEntry {
+ uint64_t EntryTSC;
+ Node *NodePtr;
+ uint16_t EntryCPU;
+ };
+
+ using NodeArray = Array<Node>;
+ using RootArray = Array<Node *>;
+ using ShadowStackArray = Array<ShadowStackEntry>;
+
+public:
+ // We collate the allocators we need into a single struct, as a convenience to
+ // allow us to initialize these as a group.
+ struct Allocators {
+ using NodeAllocatorType = NodeArray::AllocatorType;
+ using RootAllocatorType = RootArray::AllocatorType;
+ using ShadowStackAllocatorType = ShadowStackArray::AllocatorType;
+
+ // Use hosted aligned storage members to allow for trivial move and init.
+ // This also allows us to sidestep the potential-failing allocation issue.
+ typename std::aligned_storage<sizeof(NodeAllocatorType),
+ alignof(NodeAllocatorType)>::type
+ NodeAllocatorStorage;
+ typename std::aligned_storage<sizeof(RootAllocatorType),
+ alignof(RootAllocatorType)>::type
+ RootAllocatorStorage;
+ typename std::aligned_storage<sizeof(ShadowStackAllocatorType),
+ alignof(ShadowStackAllocatorType)>::type
+ ShadowStackAllocatorStorage;
+ typename std::aligned_storage<sizeof(NodeIdPairAllocatorType),
+ alignof(NodeIdPairAllocatorType)>::type
+ NodeIdPairAllocatorStorage;
+
+ NodeAllocatorType *NodeAllocator = nullptr;
+ RootAllocatorType *RootAllocator = nullptr;
+ ShadowStackAllocatorType *ShadowStackAllocator = nullptr;
+ NodeIdPairAllocatorType *NodeIdPairAllocator = nullptr;
+
+ Allocators() = default;
+ Allocators(const Allocators &) = delete;
+ Allocators &operator=(const Allocators &) = delete;
+
+ struct Buffers {
+ BufferQueue::Buffer NodeBuffer;
+ BufferQueue::Buffer RootsBuffer;
+ BufferQueue::Buffer ShadowStackBuffer;
+ BufferQueue::Buffer NodeIdPairBuffer;
+ };
+
+ explicit Allocators(Buffers &B) XRAY_NEVER_INSTRUMENT {
+ new (&NodeAllocatorStorage)
+ NodeAllocatorType(B.NodeBuffer.Data, B.NodeBuffer.Size);
+ NodeAllocator =
+ reinterpret_cast<NodeAllocatorType *>(&NodeAllocatorStorage);
+
+ new (&RootAllocatorStorage)
+ RootAllocatorType(B.RootsBuffer.Data, B.RootsBuffer.Size);
+ RootAllocator =
+ reinterpret_cast<RootAllocatorType *>(&RootAllocatorStorage);
+
+ new (&ShadowStackAllocatorStorage) ShadowStackAllocatorType(
+ B.ShadowStackBuffer.Data, B.ShadowStackBuffer.Size);
+ ShadowStackAllocator = reinterpret_cast<ShadowStackAllocatorType *>(
+ &ShadowStackAllocatorStorage);
+
+ new (&NodeIdPairAllocatorStorage) NodeIdPairAllocatorType(
+ B.NodeIdPairBuffer.Data, B.NodeIdPairBuffer.Size);
+ NodeIdPairAllocator = reinterpret_cast<NodeIdPairAllocatorType *>(
+ &NodeIdPairAllocatorStorage);
+ }
+
+ explicit Allocators(uptr Max) XRAY_NEVER_INSTRUMENT {
+ new (&NodeAllocatorStorage) NodeAllocatorType(Max);
+ NodeAllocator =
+ reinterpret_cast<NodeAllocatorType *>(&NodeAllocatorStorage);
+
+ new (&RootAllocatorStorage) RootAllocatorType(Max);
+ RootAllocator =
+ reinterpret_cast<RootAllocatorType *>(&RootAllocatorStorage);
+
+ new (&ShadowStackAllocatorStorage) ShadowStackAllocatorType(Max);
+ ShadowStackAllocator = reinterpret_cast<ShadowStackAllocatorType *>(
+ &ShadowStackAllocatorStorage);
+
+ new (&NodeIdPairAllocatorStorage) NodeIdPairAllocatorType(Max);
+ NodeIdPairAllocator = reinterpret_cast<NodeIdPairAllocatorType *>(
+ &NodeIdPairAllocatorStorage);
+ }
+
+ Allocators(Allocators &&O) XRAY_NEVER_INSTRUMENT {
+ // Here we rely on the safety of memcpy'ing contents of the storage
+ // members, and then pointing the source pointers to nullptr.
+ internal_memcpy(&NodeAllocatorStorage, &O.NodeAllocatorStorage,
+ sizeof(NodeAllocatorType));
+ internal_memcpy(&RootAllocatorStorage, &O.RootAllocatorStorage,
+ sizeof(RootAllocatorType));
+ internal_memcpy(&ShadowStackAllocatorStorage,
+ &O.ShadowStackAllocatorStorage,
+ sizeof(ShadowStackAllocatorType));
+ internal_memcpy(&NodeIdPairAllocatorStorage,
+ &O.NodeIdPairAllocatorStorage,
+ sizeof(NodeIdPairAllocatorType));
+
+ NodeAllocator =
+ reinterpret_cast<NodeAllocatorType *>(&NodeAllocatorStorage);
+ RootAllocator =
+ reinterpret_cast<RootAllocatorType *>(&RootAllocatorStorage);
+ ShadowStackAllocator = reinterpret_cast<ShadowStackAllocatorType *>(
+ &ShadowStackAllocatorStorage);
+ NodeIdPairAllocator = reinterpret_cast<NodeIdPairAllocatorType *>(
+ &NodeIdPairAllocatorStorage);
+
+ O.NodeAllocator = nullptr;
+ O.RootAllocator = nullptr;
+ O.ShadowStackAllocator = nullptr;
+ O.NodeIdPairAllocator = nullptr;
+ }
+
+ Allocators &operator=(Allocators &&O) XRAY_NEVER_INSTRUMENT {
+ // When moving into an existing instance, we ensure that we clean up the
+ // current allocators.
+ if (NodeAllocator)
+ NodeAllocator->~NodeAllocatorType();
+ if (O.NodeAllocator) {
+ new (&NodeAllocatorStorage)
+ NodeAllocatorType(std::move(*O.NodeAllocator));
+ NodeAllocator =
+ reinterpret_cast<NodeAllocatorType *>(&NodeAllocatorStorage);
+ O.NodeAllocator = nullptr;
+ } else {
+ NodeAllocator = nullptr;
+ }
+
+ if (RootAllocator)
+ RootAllocator->~RootAllocatorType();
+ if (O.RootAllocator) {
+ new (&RootAllocatorStorage)
+ RootAllocatorType(std::move(*O.RootAllocator));
+ RootAllocator =
+ reinterpret_cast<RootAllocatorType *>(&RootAllocatorStorage);
+ O.RootAllocator = nullptr;
+ } else {
+ RootAllocator = nullptr;
+ }
+
+ if (ShadowStackAllocator)
+ ShadowStackAllocator->~ShadowStackAllocatorType();
+ if (O.ShadowStackAllocator) {
+ new (&ShadowStackAllocatorStorage)
+ ShadowStackAllocatorType(std::move(*O.ShadowStackAllocator));
+ ShadowStackAllocator = reinterpret_cast<ShadowStackAllocatorType *>(
+ &ShadowStackAllocatorStorage);
+ O.ShadowStackAllocator = nullptr;
+ } else {
+ ShadowStackAllocator = nullptr;
+ }
+
+ if (NodeIdPairAllocator)
+ NodeIdPairAllocator->~NodeIdPairAllocatorType();
+ if (O.NodeIdPairAllocator) {
+ new (&NodeIdPairAllocatorStorage)
+ NodeIdPairAllocatorType(std::move(*O.NodeIdPairAllocator));
+ NodeIdPairAllocator = reinterpret_cast<NodeIdPairAllocatorType *>(
+ &NodeIdPairAllocatorStorage);
+ O.NodeIdPairAllocator = nullptr;
+ } else {
+ NodeIdPairAllocator = nullptr;
+ }
+
+ return *this;
+ }
+
+ ~Allocators() XRAY_NEVER_INSTRUMENT {
+ if (NodeAllocator != nullptr)
+ NodeAllocator->~NodeAllocatorType();
+ if (RootAllocator != nullptr)
+ RootAllocator->~RootAllocatorType();
+ if (ShadowStackAllocator != nullptr)
+ ShadowStackAllocator->~ShadowStackAllocatorType();
+ if (NodeIdPairAllocator != nullptr)
+ NodeIdPairAllocator->~NodeIdPairAllocatorType();
+ }
+ };
+
+ static Allocators InitAllocators() XRAY_NEVER_INSTRUMENT {
+ return InitAllocatorsCustom(profilingFlags()->per_thread_allocator_max);
+ }
+
+ static Allocators InitAllocatorsCustom(uptr Max) XRAY_NEVER_INSTRUMENT {
+ Allocators A(Max);
+ return A;
+ }
+
+ static Allocators
+ InitAllocatorsFromBuffers(Allocators::Buffers &Bufs) XRAY_NEVER_INSTRUMENT {
+ Allocators A(Bufs);
+ return A;
+ }
+
+private:
+ NodeArray Nodes;
+ RootArray Roots;
+ ShadowStackArray ShadowStack;
+ NodeIdPairAllocatorType *NodeIdPairAllocator;
+ uint32_t OverflowedFunctions;
+
+public:
+ explicit FunctionCallTrie(const Allocators &A) XRAY_NEVER_INSTRUMENT
+ : Nodes(*A.NodeAllocator),
+ Roots(*A.RootAllocator),
+ ShadowStack(*A.ShadowStackAllocator),
+ NodeIdPairAllocator(A.NodeIdPairAllocator),
+ OverflowedFunctions(0) {}
+
+ FunctionCallTrie() = delete;
+ FunctionCallTrie(const FunctionCallTrie &) = delete;
+ FunctionCallTrie &operator=(const FunctionCallTrie &) = delete;
+
+ FunctionCallTrie(FunctionCallTrie &&O) XRAY_NEVER_INSTRUMENT
+ : Nodes(std::move(O.Nodes)),
+ Roots(std::move(O.Roots)),
+ ShadowStack(std::move(O.ShadowStack)),
+ NodeIdPairAllocator(O.NodeIdPairAllocator),
+ OverflowedFunctions(O.OverflowedFunctions) {}
+
+ FunctionCallTrie &operator=(FunctionCallTrie &&O) XRAY_NEVER_INSTRUMENT {
+ Nodes = std::move(O.Nodes);
+ Roots = std::move(O.Roots);
+ ShadowStack = std::move(O.ShadowStack);
+ NodeIdPairAllocator = O.NodeIdPairAllocator;
+ OverflowedFunctions = O.OverflowedFunctions;
+ return *this;
+ }
+
+ ~FunctionCallTrie() XRAY_NEVER_INSTRUMENT {}
+
+ void enterFunction(const int32_t FId, uint64_t TSC,
+ uint16_t CPU) XRAY_NEVER_INSTRUMENT {
+ DCHECK_NE(FId, 0);
+
+ // If we're already overflowed the function call stack, do not bother
+ // attempting to record any more function entries.
+ if (UNLIKELY(OverflowedFunctions)) {
+ ++OverflowedFunctions;
+ return;
+ }
+
+ // If this is the first function we've encountered, we want to set up the
+ // node(s) and treat it as a root.
+ if (UNLIKELY(ShadowStack.empty())) {
+ auto *NewRoot = Nodes.AppendEmplace(
+ nullptr, NodeIdPairArray(*NodeIdPairAllocator), 0u, 0u, FId);
+ if (UNLIKELY(NewRoot == nullptr))
+ return;
+ if (Roots.AppendEmplace(NewRoot) == nullptr) {
+ Nodes.trim(1);
+ return;
+ }
+ if (ShadowStack.AppendEmplace(TSC, NewRoot, CPU) == nullptr) {
+ Nodes.trim(1);
+ Roots.trim(1);
+ ++OverflowedFunctions;
+ return;
+ }
+ return;
+ }
+
+ // From this point on, we require that the stack is not empty.
+ DCHECK(!ShadowStack.empty());
+ auto TopNode = ShadowStack.back().NodePtr;
+ DCHECK_NE(TopNode, nullptr);
+
+ // If we've seen this callee before, then we access that node and place that
+ // on the top of the stack.
+ auto* Callee = TopNode->Callees.find_element(
+ [FId](const NodeIdPair &NR) { return NR.FId == FId; });
+ if (Callee != nullptr) {
+ CHECK_NE(Callee->NodePtr, nullptr);
+ if (ShadowStack.AppendEmplace(TSC, Callee->NodePtr, CPU) == nullptr)
+ ++OverflowedFunctions;
+ return;
+ }
+
+ // This means we've never seen this stack before, create a new node here.
+ auto* NewNode = Nodes.AppendEmplace(
+ TopNode, NodeIdPairArray(*NodeIdPairAllocator), 0u, 0u, FId);
+ if (UNLIKELY(NewNode == nullptr))
+ return;
+ DCHECK_NE(NewNode, nullptr);
+ TopNode->Callees.AppendEmplace(NewNode, FId);
+ if (ShadowStack.AppendEmplace(TSC, NewNode, CPU) == nullptr)
+ ++OverflowedFunctions;
+ return;
+ }
+
+ void exitFunction(int32_t FId, uint64_t TSC,
+ uint16_t CPU) XRAY_NEVER_INSTRUMENT {
+ // If we're exiting functions that have "overflowed" or don't fit into the
+ // stack due to allocator constraints, we then decrement that count first.
+ if (OverflowedFunctions) {
+ --OverflowedFunctions;
+ return;
+ }
+
+ // When we exit a function, we look up the ShadowStack to see whether we've
+ // entered this function before. We do as little processing here as we can,
+ // since most of the hard work would have already been done at function
+ // entry.
+ uint64_t CumulativeTreeTime = 0;
+
+ while (!ShadowStack.empty()) {
+ const auto &Top = ShadowStack.back();
+ auto TopNode = Top.NodePtr;
+ DCHECK_NE(TopNode, nullptr);
+
+ // We may encounter overflow on the TSC we're provided, which may end up
+ // being less than the TSC when we first entered the function.
+ //
+ // To get the accurate measurement of cycles, we need to check whether
+ // we've overflowed (TSC < Top.EntryTSC) and then account the difference
+ // between the entry TSC and the max for the TSC counter (max of uint64_t)
+ // then add the value of TSC. We can prove that the maximum delta we will
+ // get is at most the 64-bit unsigned value, since the difference between
+ // a TSC of 0 and a Top.EntryTSC of 1 is (numeric_limits<uint64_t>::max()
+ // - 1) + 1.
+ //
+ // NOTE: This assumes that TSCs are synchronised across CPUs.
+ // TODO: Count the number of times we've seen CPU migrations.
+ uint64_t LocalTime =
+ Top.EntryTSC > TSC
+ ? (std::numeric_limits<uint64_t>::max() - Top.EntryTSC) + TSC
+ : TSC - Top.EntryTSC;
+ TopNode->CallCount++;
+ TopNode->CumulativeLocalTime += LocalTime - CumulativeTreeTime;
+ CumulativeTreeTime += LocalTime;
+ ShadowStack.trim(1);
+
+ // TODO: Update the histogram for the node.
+ if (TopNode->FId == FId)
+ break;
+ }
+ }
+
+ const RootArray &getRoots() const XRAY_NEVER_INSTRUMENT { return Roots; }
+
+ // The deepCopyInto operation will update the provided FunctionCallTrie by
+ // re-creating the contents of this particular FunctionCallTrie in the other
+ // FunctionCallTrie. It will do this using a Depth First Traversal from the
+ // roots, and while doing so recreating the traversal in the provided
+ // FunctionCallTrie.
+ //
+ // This operation will *not* destroy the state in `O`, and thus may cause some
+ // duplicate entries in `O` if it is not empty.
+ //
+ // This function is *not* thread-safe, and may require external
+ // synchronisation of both "this" and |O|.
+ //
+ // This function must *not* be called with a non-empty FunctionCallTrie |O|.
+ void deepCopyInto(FunctionCallTrie &O) const XRAY_NEVER_INSTRUMENT {
+ DCHECK(O.getRoots().empty());
+
+ // We then push the root into a stack, to use as the parent marker for new
+ // nodes we push in as we're traversing depth-first down the call tree.
+ struct NodeAndParent {
+ FunctionCallTrie::Node *Node;
+ FunctionCallTrie::Node *NewNode;
+ };
+ using Stack = Array<NodeAndParent>;
+
+ typename Stack::AllocatorType StackAllocator(
+ profilingFlags()->stack_allocator_max);
+ Stack DFSStack(StackAllocator);
+
+ for (const auto Root : getRoots()) {
+ // Add a node in O for this root.
+ auto NewRoot = O.Nodes.AppendEmplace(
+ nullptr, NodeIdPairArray(*O.NodeIdPairAllocator), Root->CallCount,
+ Root->CumulativeLocalTime, Root->FId);
+
+ // Because we cannot allocate more memory we should bail out right away.
+ if (UNLIKELY(NewRoot == nullptr))
+ return;
+
+ if (UNLIKELY(O.Roots.Append(NewRoot) == nullptr))
+ return;
+
+ // TODO: Figure out what to do if we fail to allocate any more stack
+ // space. Maybe warn or report once?
+ if (DFSStack.AppendEmplace(Root, NewRoot) == nullptr)
+ return;
+ while (!DFSStack.empty()) {
+ NodeAndParent NP = DFSStack.back();
+ DCHECK_NE(NP.Node, nullptr);
+ DCHECK_NE(NP.NewNode, nullptr);
+ DFSStack.trim(1);
+ for (const auto Callee : NP.Node->Callees) {
+ auto NewNode = O.Nodes.AppendEmplace(
+ NP.NewNode, NodeIdPairArray(*O.NodeIdPairAllocator),
+ Callee.NodePtr->CallCount, Callee.NodePtr->CumulativeLocalTime,
+ Callee.FId);
+ if (UNLIKELY(NewNode == nullptr))
+ return;
+ if (UNLIKELY(NP.NewNode->Callees.AppendEmplace(NewNode, Callee.FId) ==
+ nullptr))
+ return;
+ if (UNLIKELY(DFSStack.AppendEmplace(Callee.NodePtr, NewNode) ==
+ nullptr))
+ return;
+ }
+ }
+ }
+ }
+
+ // The mergeInto operation will update the provided FunctionCallTrie by
+ // traversing the current trie's roots and updating (i.e. merging) the data in
+ // the nodes with the data in the target's nodes. If the node doesn't exist in
+ // the provided trie, we add a new one in the right position, and inherit the
+ // data from the original (current) trie, along with all its callees.
+ //
+ // This function is *not* thread-safe, and may require external
+ // synchronisation of both "this" and |O|.
+ void mergeInto(FunctionCallTrie &O) const XRAY_NEVER_INSTRUMENT {
+ struct NodeAndTarget {
+ FunctionCallTrie::Node *OrigNode;
+ FunctionCallTrie::Node *TargetNode;
+ };
+ using Stack = Array<NodeAndTarget>;
+ typename Stack::AllocatorType StackAllocator(
+ profilingFlags()->stack_allocator_max);
+ Stack DFSStack(StackAllocator);
+
+ for (const auto Root : getRoots()) {
+ Node *TargetRoot = nullptr;
+ auto R = O.Roots.find_element(
+ [&](const Node *Node) { return Node->FId == Root->FId; });
+ if (R == nullptr) {
+ TargetRoot = O.Nodes.AppendEmplace(
+ nullptr, NodeIdPairArray(*O.NodeIdPairAllocator), 0u, 0u,
+ Root->FId);
+ if (UNLIKELY(TargetRoot == nullptr))
+ return;
+
+ O.Roots.Append(TargetRoot);
+ } else {
+ TargetRoot = *R;
+ }
+
+ DFSStack.AppendEmplace(Root, TargetRoot);
+ while (!DFSStack.empty()) {
+ NodeAndTarget NT = DFSStack.back();
+ DCHECK_NE(NT.OrigNode, nullptr);
+ DCHECK_NE(NT.TargetNode, nullptr);
+ DFSStack.trim(1);
+ // TODO: Update the histogram as well when we have it ready.
+ NT.TargetNode->CallCount += NT.OrigNode->CallCount;
+ NT.TargetNode->CumulativeLocalTime += NT.OrigNode->CumulativeLocalTime;
+ for (const auto Callee : NT.OrigNode->Callees) {
+ auto TargetCallee = NT.TargetNode->Callees.find_element(
+ [&](const FunctionCallTrie::NodeIdPair &C) {
+ return C.FId == Callee.FId;
+ });
+ if (TargetCallee == nullptr) {
+ auto NewTargetNode = O.Nodes.AppendEmplace(
+ NT.TargetNode, NodeIdPairArray(*O.NodeIdPairAllocator), 0u, 0u,
+ Callee.FId);
+
+ if (UNLIKELY(NewTargetNode == nullptr))
+ return;
+
+ TargetCallee =
+ NT.TargetNode->Callees.AppendEmplace(NewTargetNode, Callee.FId);
+ }
+ DFSStack.AppendEmplace(Callee.NodePtr, TargetCallee->NodePtr);
+ }
+ }
+ }
+ }
+};
+
+} // namespace __xray
+
+#endif // XRAY_FUNCTION_CALL_TRIE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_function_call_trie.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_init.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_init.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_init.cc (revision 351984)
@@ -0,0 +1,115 @@
+//===-- xray_init.cc --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// XRay initialisation logic.
+//===----------------------------------------------------------------------===//
+
+#include <fcntl.h>
+#include <strings.h>
+#include <unistd.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_flags.h"
+#include "xray_interface_internal.h"
+
+extern "C" {
+void __xray_init();
+extern const XRaySledEntry __start_xray_instr_map[] __attribute__((weak));
+extern const XRaySledEntry __stop_xray_instr_map[] __attribute__((weak));
+extern const XRayFunctionSledIndex __start_xray_fn_idx[] __attribute__((weak));
+extern const XRayFunctionSledIndex __stop_xray_fn_idx[] __attribute__((weak));
+
+#if SANITIZER_MAC
+// HACK: This is a temporary workaround to make XRay build on
+// Darwin, but it will probably not work at runtime.
+const XRaySledEntry __start_xray_instr_map[] = {};
+extern const XRaySledEntry __stop_xray_instr_map[] = {};
+extern const XRayFunctionSledIndex __start_xray_fn_idx[] = {};
+extern const XRayFunctionSledIndex __stop_xray_fn_idx[] = {};
+#endif
+}
+
+using namespace __xray;
+
+// When set to 'true' this means the XRay runtime has been initialised. We use
+// the weak symbols defined above (__start_xray_inst_map and
+// __stop_xray_instr_map) to initialise the instrumentation map that XRay uses
+// for runtime patching/unpatching of instrumentation points.
+//
+// FIXME: Support DSO instrumentation maps too. The current solution only works
+// for statically linked executables.
+atomic_uint8_t XRayInitialized{0};
+
+// This should always be updated before XRayInitialized is updated.
+SpinMutex XRayInstrMapMutex;
+XRaySledMap XRayInstrMap;
+
+// Global flag to determine whether the flags have been initialized.
+atomic_uint8_t XRayFlagsInitialized{0};
+
+// A mutex to allow only one thread to initialize the XRay data structures.
+SpinMutex XRayInitMutex;
+
+// __xray_init() will do the actual loading of the current process' memory map
+// and then proceed to look for the .xray_instr_map section/segment.
+void __xray_init() XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayInitMutex);
+ // Short-circuit if we've already initialized XRay before.
+ if (atomic_load(&XRayInitialized, memory_order_acquire))
+ return;
+
+ // XRAY is not compatible with PaX MPROTECT
+ CheckMPROTECT();
+
+ if (!atomic_load(&XRayFlagsInitialized, memory_order_acquire)) {
+ initializeFlags();
+ atomic_store(&XRayFlagsInitialized, true, memory_order_release);
+ }
+
+ if (__start_xray_instr_map == nullptr) {
+ if (Verbosity())
+ Report("XRay instrumentation map missing. Not initializing XRay.\n");
+ return;
+ }
+
+ {
+ SpinMutexLock Guard(&XRayInstrMapMutex);
+ XRayInstrMap.Sleds = __start_xray_instr_map;
+ XRayInstrMap.Entries = __stop_xray_instr_map - __start_xray_instr_map;
+ XRayInstrMap.SledsIndex = __start_xray_fn_idx;
+ XRayInstrMap.Functions = __stop_xray_fn_idx - __start_xray_fn_idx;
+ }
+ atomic_store(&XRayInitialized, true, memory_order_release);
+
+#ifndef XRAY_NO_PREINIT
+ if (flags()->patch_premain)
+ __xray_patch();
+#endif
+}
+
+// FIXME: Make check-xray tests work on FreeBSD without
+// SANITIZER_CAN_USE_PREINIT_ARRAY.
+// See sanitizer_internal_defs.h where the macro is defined.
+// Calling unresolved PLT functions in .preinit_array can lead to deadlock on
+// FreeBSD but here it seems benign.
+#if !defined(XRAY_NO_PREINIT) && \
+ (SANITIZER_CAN_USE_PREINIT_ARRAY || SANITIZER_FREEBSD)
+// Only add the preinit array initialization if the sanitizers can.
+__attribute__((section(".preinit_array"),
+ used)) void (*__local_xray_preinit)(void) = __xray_init;
+#else
+// If we cannot use the .preinit_array section, we should instead use dynamic
+// initialisation.
+__attribute__ ((constructor (0)))
+static void __local_xray_dyninit() {
+ __xray_init();
+}
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_init.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_interface.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_interface.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_interface.cc (revision 351984)
@@ -0,0 +1,480 @@
+//===-- xray_interface.cpp --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of the API functions.
+//
+//===----------------------------------------------------------------------===//
+
+#include "xray_interface_internal.h"
+
+#include <cstdint>
+#include <cstdio>
+#include <errno.h>
+#include <limits>
+#include <string.h>
+#include <sys/mman.h>
+
+#if SANITIZER_FUCHSIA
+#include <zircon/process.h>
+#include <zircon/sanitizer.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+#endif
+
+#include "sanitizer_common/sanitizer_addrhashmap.h"
+#include "sanitizer_common/sanitizer_common.h"
+
+#include "xray_defs.h"
+#include "xray_flags.h"
+
+extern __sanitizer::SpinMutex XRayInstrMapMutex;
+extern __sanitizer::atomic_uint8_t XRayInitialized;
+extern __xray::XRaySledMap XRayInstrMap;
+
+namespace __xray {
+
+#if defined(__x86_64__)
+static const int16_t cSledLength = 12;
+#elif defined(__aarch64__)
+static const int16_t cSledLength = 32;
+#elif defined(__arm__)
+static const int16_t cSledLength = 28;
+#elif SANITIZER_MIPS32
+static const int16_t cSledLength = 48;
+#elif SANITIZER_MIPS64
+static const int16_t cSledLength = 64;
+#elif defined(__powerpc64__)
+static const int16_t cSledLength = 8;
+#else
+#error "Unsupported CPU Architecture"
+#endif /* CPU architecture */
+
+// This is the function to call when we encounter the entry or exit sleds.
+atomic_uintptr_t XRayPatchedFunction{0};
+
+// This is the function to call from the arg1-enabled sleds/trampolines.
+atomic_uintptr_t XRayArgLogger{0};
+
+// This is the function to call when we encounter a custom event log call.
+atomic_uintptr_t XRayPatchedCustomEvent{0};
+
+// This is the function to call when we encounter a typed event log call.
+atomic_uintptr_t XRayPatchedTypedEvent{0};
+
+// This is the global status to determine whether we are currently
+// patching/unpatching.
+atomic_uint8_t XRayPatching{0};
+
+struct TypeDescription {
+ uint32_t type_id;
+ std::size_t description_string_length;
+};
+
+using TypeDescriptorMapType = AddrHashMap<TypeDescription, 11>;
+// An address map from immutable descriptors to type ids.
+TypeDescriptorMapType TypeDescriptorAddressMap{};
+
+atomic_uint32_t TypeEventDescriptorCounter{0};
+
+// MProtectHelper is an RAII wrapper for calls to mprotect(...) that will
+// undo any successful mprotect(...) changes. This is used to make a page
+// writeable and executable, and upon destruction if it was successful in
+// doing so returns the page into a read-only and executable page.
+//
+// This is only used specifically for runtime-patching of the XRay
+// instrumentation points. This assumes that the executable pages are
+// originally read-and-execute only.
+class MProtectHelper {
+ void *PageAlignedAddr;
+ std::size_t MProtectLen;
+ bool MustCleanup;
+
+public:
+ explicit MProtectHelper(void *PageAlignedAddr,
+ std::size_t MProtectLen,
+ std::size_t PageSize) XRAY_NEVER_INSTRUMENT
+ : PageAlignedAddr(PageAlignedAddr),
+ MProtectLen(MProtectLen),
+ MustCleanup(false) {
+#if SANITIZER_FUCHSIA
+ MProtectLen = RoundUpTo(MProtectLen, PageSize);
+#endif
+ }
+
+ int MakeWriteable() XRAY_NEVER_INSTRUMENT {
+#if SANITIZER_FUCHSIA
+ auto R = __sanitizer_change_code_protection(
+ reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, true);
+ if (R != ZX_OK) {
+ Report("XRay: cannot change code protection: %s\n",
+ _zx_status_get_string(R));
+ return -1;
+ }
+ MustCleanup = true;
+ return 0;
+#else
+ auto R = mprotect(PageAlignedAddr, MProtectLen,
+ PROT_READ | PROT_WRITE | PROT_EXEC);
+ if (R != -1)
+ MustCleanup = true;
+ return R;
+#endif
+ }
+
+ ~MProtectHelper() XRAY_NEVER_INSTRUMENT {
+ if (MustCleanup) {
+#if SANITIZER_FUCHSIA
+ auto R = __sanitizer_change_code_protection(
+ reinterpret_cast<uintptr_t>(PageAlignedAddr), MProtectLen, false);
+ if (R != ZX_OK) {
+ Report("XRay: cannot change code protection: %s\n",
+ _zx_status_get_string(R));
+ }
+#else
+ mprotect(PageAlignedAddr, MProtectLen, PROT_READ | PROT_EXEC);
+#endif
+ }
+ }
+};
+
+namespace {
+
+bool patchSled(const XRaySledEntry &Sled, bool Enable,
+ int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ bool Success = false;
+ switch (Sled.Kind) {
+ case XRayEntryType::ENTRY:
+ Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_FunctionEntry);
+ break;
+ case XRayEntryType::EXIT:
+ Success = patchFunctionExit(Enable, FuncId, Sled);
+ break;
+ case XRayEntryType::TAIL:
+ Success = patchFunctionTailExit(Enable, FuncId, Sled);
+ break;
+ case XRayEntryType::LOG_ARGS_ENTRY:
+ Success = patchFunctionEntry(Enable, FuncId, Sled, __xray_ArgLoggerEntry);
+ break;
+ case XRayEntryType::CUSTOM_EVENT:
+ Success = patchCustomEvent(Enable, FuncId, Sled);
+ break;
+ case XRayEntryType::TYPED_EVENT:
+ Success = patchTypedEvent(Enable, FuncId, Sled);
+ break;
+ default:
+ Report("Unsupported sled kind '%d' @%04x\n", Sled.Address, int(Sled.Kind));
+ return false;
+ }
+ return Success;
+}
+
+XRayPatchingStatus patchFunction(int32_t FuncId,
+ bool Enable) XRAY_NEVER_INSTRUMENT {
+ if (!atomic_load(&XRayInitialized,
+ memory_order_acquire))
+ return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
+
+ uint8_t NotPatching = false;
+ if (!atomic_compare_exchange_strong(
+ &XRayPatching, &NotPatching, true, memory_order_acq_rel))
+ return XRayPatchingStatus::ONGOING; // Already patching.
+
+ // Next, we look for the function index.
+ XRaySledMap InstrMap;
+ {
+ SpinMutexLock Guard(&XRayInstrMapMutex);
+ InstrMap = XRayInstrMap;
+ }
+
+ // If we don't have an index, we can't patch individual functions.
+ if (InstrMap.Functions == 0)
+ return XRayPatchingStatus::NOT_INITIALIZED;
+
+ // FuncId must be a positive number, less than the number of functions
+ // instrumented.
+ if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
+ Report("Invalid function id provided: %d\n", FuncId);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ // Now we patch ths sleds for this specific function.
+ auto SledRange = InstrMap.SledsIndex[FuncId - 1];
+ auto *f = SledRange.Begin;
+ auto *e = SledRange.End;
+
+ bool SucceedOnce = false;
+ while (f != e)
+ SucceedOnce |= patchSled(*f++, Enable, FuncId);
+
+ atomic_store(&XRayPatching, false,
+ memory_order_release);
+
+ if (!SucceedOnce) {
+ Report("Failed patching any sled for function '%d'.", FuncId);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ return XRayPatchingStatus::SUCCESS;
+}
+
+// controlPatching implements the common internals of the patching/unpatching
+// implementation. |Enable| defines whether we're enabling or disabling the
+// runtime XRay instrumentation.
+XRayPatchingStatus controlPatching(bool Enable) XRAY_NEVER_INSTRUMENT {
+ if (!atomic_load(&XRayInitialized,
+ memory_order_acquire))
+ return XRayPatchingStatus::NOT_INITIALIZED; // Not initialized.
+
+ uint8_t NotPatching = false;
+ if (!atomic_compare_exchange_strong(
+ &XRayPatching, &NotPatching, true, memory_order_acq_rel))
+ return XRayPatchingStatus::ONGOING; // Already patching.
+
+ uint8_t PatchingSuccess = false;
+ auto XRayPatchingStatusResetter =
+ at_scope_exit([&PatchingSuccess] {
+ if (!PatchingSuccess)
+ atomic_store(&XRayPatching, false,
+ memory_order_release);
+ });
+
+ XRaySledMap InstrMap;
+ {
+ SpinMutexLock Guard(&XRayInstrMapMutex);
+ InstrMap = XRayInstrMap;
+ }
+ if (InstrMap.Entries == 0)
+ return XRayPatchingStatus::NOT_INITIALIZED;
+
+ uint32_t FuncId = 1;
+ uint64_t CurFun = 0;
+
+ // First we want to find the bounds for which we have instrumentation points,
+ // and try to get as few calls to mprotect(...) as possible. We're assuming
+ // that all the sleds for the instrumentation map are contiguous as a single
+ // set of pages. When we do support dynamic shared object instrumentation,
+ // we'll need to do this for each set of page load offsets per DSO loaded. For
+ // now we're assuming we can mprotect the whole section of text between the
+ // minimum sled address and the maximum sled address (+ the largest sled
+ // size).
+ auto MinSled = InstrMap.Sleds[0];
+ auto MaxSled = InstrMap.Sleds[InstrMap.Entries - 1];
+ for (std::size_t I = 0; I < InstrMap.Entries; I++) {
+ const auto &Sled = InstrMap.Sleds[I];
+ if (Sled.Address < MinSled.Address)
+ MinSled = Sled;
+ if (Sled.Address > MaxSled.Address)
+ MaxSled = Sled;
+ }
+
+ const size_t PageSize = flags()->xray_page_size_override > 0
+ ? flags()->xray_page_size_override
+ : GetPageSizeCached();
+ if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
+ Report("System page size is not a power of two: %lld\n", PageSize);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ void *PageAlignedAddr =
+ reinterpret_cast<void *>(MinSled.Address & ~(PageSize - 1));
+ size_t MProtectLen =
+ (MaxSled.Address - reinterpret_cast<uptr>(PageAlignedAddr)) + cSledLength;
+ MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
+ if (Protector.MakeWriteable() == -1) {
+ Report("Failed mprotect: %d\n", errno);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ for (std::size_t I = 0; I < InstrMap.Entries; ++I) {
+ auto &Sled = InstrMap.Sleds[I];
+ auto F = Sled.Function;
+ if (CurFun == 0)
+ CurFun = F;
+ if (F != CurFun) {
+ ++FuncId;
+ CurFun = F;
+ }
+ patchSled(Sled, Enable, FuncId);
+ }
+ atomic_store(&XRayPatching, false,
+ memory_order_release);
+ PatchingSuccess = true;
+ return XRayPatchingStatus::SUCCESS;
+}
+
+XRayPatchingStatus mprotectAndPatchFunction(int32_t FuncId,
+ bool Enable) XRAY_NEVER_INSTRUMENT {
+ XRaySledMap InstrMap;
+ {
+ SpinMutexLock Guard(&XRayInstrMapMutex);
+ InstrMap = XRayInstrMap;
+ }
+
+ // FuncId must be a positive number, less than the number of functions
+ // instrumented.
+ if (FuncId <= 0 || static_cast<size_t>(FuncId) > InstrMap.Functions) {
+ Report("Invalid function id provided: %d\n", FuncId);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ const size_t PageSize = flags()->xray_page_size_override > 0
+ ? flags()->xray_page_size_override
+ : GetPageSizeCached();
+ if ((PageSize == 0) || ((PageSize & (PageSize - 1)) != 0)) {
+ Report("Provided page size is not a power of two: %lld\n", PageSize);
+ return XRayPatchingStatus::FAILED;
+ }
+
+ // Here we compute the minumum sled and maximum sled associated with a
+ // particular function ID.
+ auto SledRange = InstrMap.SledsIndex[FuncId - 1];
+ auto *f = SledRange.Begin;
+ auto *e = SledRange.End;
+ auto MinSled = *f;
+ auto MaxSled = *(SledRange.End - 1);
+ while (f != e) {
+ if (f->Address < MinSled.Address)
+ MinSled = *f;
+ if (f->Address > MaxSled.Address)
+ MaxSled = *f;
+ ++f;
+ }
+
+ void *PageAlignedAddr =
+ reinterpret_cast<void *>(MinSled.Address & ~(PageSize - 1));
+ size_t MProtectLen =
+ (MaxSled.Address - reinterpret_cast<uptr>(PageAlignedAddr)) + cSledLength;
+ MProtectHelper Protector(PageAlignedAddr, MProtectLen, PageSize);
+ if (Protector.MakeWriteable() == -1) {
+ Report("Failed mprotect: %d\n", errno);
+ return XRayPatchingStatus::FAILED;
+ }
+ return patchFunction(FuncId, Enable);
+}
+
+} // namespace
+
+} // namespace __xray
+
+using namespace __xray;
+
+// The following functions are declared `extern "C" {...}` in the header, hence
+// they're defined in the global namespace.
+
+int __xray_set_handler(void (*entry)(int32_t,
+ XRayEntryType)) XRAY_NEVER_INSTRUMENT {
+ if (atomic_load(&XRayInitialized,
+ memory_order_acquire)) {
+
+ atomic_store(&__xray::XRayPatchedFunction,
+ reinterpret_cast<uintptr_t>(entry),
+ memory_order_release);
+ return 1;
+ }
+ return 0;
+}
+
+int __xray_set_customevent_handler(void (*entry)(void *, size_t))
+ XRAY_NEVER_INSTRUMENT {
+ if (atomic_load(&XRayInitialized,
+ memory_order_acquire)) {
+ atomic_store(&__xray::XRayPatchedCustomEvent,
+ reinterpret_cast<uintptr_t>(entry),
+ memory_order_release);
+ return 1;
+ }
+ return 0;
+}
+
+int __xray_set_typedevent_handler(void (*entry)(
+ uint16_t, const void *, size_t)) XRAY_NEVER_INSTRUMENT {
+ if (atomic_load(&XRayInitialized,
+ memory_order_acquire)) {
+ atomic_store(&__xray::XRayPatchedTypedEvent,
+ reinterpret_cast<uintptr_t>(entry),
+ memory_order_release);
+ return 1;
+ }
+ return 0;
+}
+
+int __xray_remove_handler() XRAY_NEVER_INSTRUMENT {
+ return __xray_set_handler(nullptr);
+}
+
+int __xray_remove_customevent_handler() XRAY_NEVER_INSTRUMENT {
+ return __xray_set_customevent_handler(nullptr);
+}
+
+int __xray_remove_typedevent_handler() XRAY_NEVER_INSTRUMENT {
+ return __xray_set_typedevent_handler(nullptr);
+}
+
+uint16_t __xray_register_event_type(
+ const char *const event_type) XRAY_NEVER_INSTRUMENT {
+ TypeDescriptorMapType::Handle h(&TypeDescriptorAddressMap, (uptr)event_type);
+ if (h.created()) {
+ h->type_id = atomic_fetch_add(
+ &TypeEventDescriptorCounter, 1, memory_order_acq_rel);
+ h->description_string_length = strnlen(event_type, 1024);
+ }
+ return h->type_id;
+}
+
+XRayPatchingStatus __xray_patch() XRAY_NEVER_INSTRUMENT {
+ return controlPatching(true);
+}
+
+XRayPatchingStatus __xray_unpatch() XRAY_NEVER_INSTRUMENT {
+ return controlPatching(false);
+}
+
+XRayPatchingStatus __xray_patch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ return mprotectAndPatchFunction(FuncId, true);
+}
+
+XRayPatchingStatus
+__xray_unpatch_function(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ return mprotectAndPatchFunction(FuncId, false);
+}
+
+int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType, uint64_t)) {
+ if (!atomic_load(&XRayInitialized,
+ memory_order_acquire))
+ return 0;
+
+ // A relaxed write might not be visible even if the current thread gets
+ // scheduled on a different CPU/NUMA node. We need to wait for everyone to
+ // have this handler installed for consistency of collected data across CPUs.
+ atomic_store(&XRayArgLogger, reinterpret_cast<uint64_t>(entry),
+ memory_order_release);
+ return 1;
+}
+
+int __xray_remove_handler_arg1() { return __xray_set_handler_arg1(nullptr); }
+
+uintptr_t __xray_function_address(int32_t FuncId) XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayInstrMapMutex);
+ if (FuncId <= 0 || static_cast<size_t>(FuncId) > XRayInstrMap.Functions)
+ return 0;
+ return XRayInstrMap.SledsIndex[FuncId - 1].Begin->Function
+// On PPC, function entries are always aligned to 16 bytes. The beginning of a
+// sled might be a local entry, which is always +8 based on the global entry.
+// Always return the global entry.
+#ifdef __PPC__
+ & ~0xf
+#endif
+ ;
+}
+
+size_t __xray_max_function_id() XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayInstrMapMutex);
+ return XRayInstrMap.Functions;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_interface.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_interface_internal.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_interface_internal.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_interface_internal.h (revision 351984)
@@ -0,0 +1,80 @@
+//===-- xray_interface_internal.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of the API functions. See also include/xray/xray_interface.h.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_INTERFACE_INTERNAL_H
+#define XRAY_INTERFACE_INTERNAL_H
+
+#include "sanitizer_common/sanitizer_platform.h"
+#include "xray/xray_interface.h"
+#include <cstddef>
+#include <cstdint>
+
+extern "C" {
+
+struct XRaySledEntry {
+#if SANITIZER_WORDSIZE == 64
+ uint64_t Address;
+ uint64_t Function;
+ unsigned char Kind;
+ unsigned char AlwaysInstrument;
+ unsigned char Version;
+ unsigned char Padding[13]; // Need 32 bytes
+#elif SANITIZER_WORDSIZE == 32
+ uint32_t Address;
+ uint32_t Function;
+ unsigned char Kind;
+ unsigned char AlwaysInstrument;
+ unsigned char Version;
+ unsigned char Padding[5]; // Need 16 bytes
+#else
+#error "Unsupported word size."
+#endif
+};
+
+struct XRayFunctionSledIndex {
+ const XRaySledEntry *Begin;
+ const XRaySledEntry *End;
+};
+}
+
+namespace __xray {
+
+struct XRaySledMap {
+ const XRaySledEntry *Sleds;
+ size_t Entries;
+ const XRayFunctionSledIndex *SledsIndex;
+ size_t Functions;
+};
+
+bool patchFunctionEntry(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled,
+ void (*Trampoline)());
+bool patchFunctionExit(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled);
+bool patchFunctionTailExit(bool Enable, uint32_t FuncId,
+ const XRaySledEntry &Sled);
+bool patchCustomEvent(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled);
+bool patchTypedEvent(bool Enable, uint32_t FuncId, const XRaySledEntry &Sled);
+
+} // namespace __xray
+
+extern "C" {
+// The following functions have to be defined in assembler, on a per-platform
+// basis. See xray_trampoline_*.S files for implementations.
+extern void __xray_FunctionEntry();
+extern void __xray_FunctionExit();
+extern void __xray_FunctionTailExit();
+extern void __xray_ArgLoggerEntry();
+extern void __xray_CustomEvent();
+extern void __xray_TypedEvent();
+}
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_interface_internal.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_log_interface.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_log_interface.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_log_interface.cc (revision 351984)
@@ -0,0 +1,209 @@
+//===-- xray_log_interface.cc ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+//===----------------------------------------------------------------------===//
+#include "xray/xray_log_interface.h"
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_mutex.h"
+#include "xray/xray_interface.h"
+#include "xray_defs.h"
+
+namespace __xray {
+static SpinMutex XRayImplMutex;
+static XRayLogImpl CurrentXRayImpl{nullptr, nullptr, nullptr, nullptr};
+static XRayLogImpl *GlobalXRayImpl = nullptr;
+
+// This is the default implementation of a buffer iterator, which always yields
+// a null buffer.
+XRayBuffer NullBufferIterator(XRayBuffer) XRAY_NEVER_INSTRUMENT {
+ return {nullptr, 0};
+}
+
+// This is the global function responsible for iterating through given buffers.
+atomic_uintptr_t XRayBufferIterator{
+ reinterpret_cast<uintptr_t>(&NullBufferIterator)};
+
+// We use a linked list of Mode to XRayLogImpl mappings. This is a linked list
+// when it should be a map because we're avoiding having to depend on C++
+// standard library data structures at this level of the implementation.
+struct ModeImpl {
+ ModeImpl *Next;
+ const char *Mode;
+ XRayLogImpl Impl;
+};
+
+static ModeImpl SentinelModeImpl{
+ nullptr, nullptr, {nullptr, nullptr, nullptr, nullptr}};
+static ModeImpl *ModeImpls = &SentinelModeImpl;
+static const ModeImpl *CurrentMode = nullptr;
+
+} // namespace __xray
+
+using namespace __xray;
+
+void __xray_log_set_buffer_iterator(XRayBuffer (*Iterator)(XRayBuffer))
+ XRAY_NEVER_INSTRUMENT {
+ atomic_store(&__xray::XRayBufferIterator,
+ reinterpret_cast<uintptr_t>(Iterator), memory_order_release);
+}
+
+void __xray_log_remove_buffer_iterator() XRAY_NEVER_INSTRUMENT {
+ __xray_log_set_buffer_iterator(&NullBufferIterator);
+}
+
+XRayLogRegisterStatus
+__xray_log_register_mode(const char *Mode,
+ XRayLogImpl Impl) XRAY_NEVER_INSTRUMENT {
+ if (Impl.flush_log == nullptr || Impl.handle_arg0 == nullptr ||
+ Impl.log_finalize == nullptr || Impl.log_init == nullptr)
+ return XRayLogRegisterStatus::XRAY_INCOMPLETE_IMPL;
+
+ SpinMutexLock Guard(&XRayImplMutex);
+ // First, look for whether the mode already has a registered implementation.
+ for (ModeImpl *it = ModeImpls; it != &SentinelModeImpl; it = it->Next) {
+ if (!internal_strcmp(Mode, it->Mode))
+ return XRayLogRegisterStatus::XRAY_DUPLICATE_MODE;
+ }
+ auto *NewModeImpl = static_cast<ModeImpl *>(InternalAlloc(sizeof(ModeImpl)));
+ NewModeImpl->Next = ModeImpls;
+ NewModeImpl->Mode = internal_strdup(Mode);
+ NewModeImpl->Impl = Impl;
+ ModeImpls = NewModeImpl;
+ return XRayLogRegisterStatus::XRAY_REGISTRATION_OK;
+}
+
+XRayLogRegisterStatus
+__xray_log_select_mode(const char *Mode) XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayImplMutex);
+ for (ModeImpl *it = ModeImpls; it != &SentinelModeImpl; it = it->Next) {
+ if (!internal_strcmp(Mode, it->Mode)) {
+ CurrentMode = it;
+ CurrentXRayImpl = it->Impl;
+ GlobalXRayImpl = &CurrentXRayImpl;
+ __xray_set_handler(it->Impl.handle_arg0);
+ return XRayLogRegisterStatus::XRAY_REGISTRATION_OK;
+ }
+ }
+ return XRayLogRegisterStatus::XRAY_MODE_NOT_FOUND;
+}
+
+const char *__xray_log_get_current_mode() XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayImplMutex);
+ if (CurrentMode != nullptr)
+ return CurrentMode->Mode;
+ return nullptr;
+}
+
+void __xray_set_log_impl(XRayLogImpl Impl) XRAY_NEVER_INSTRUMENT {
+ if (Impl.log_init == nullptr || Impl.log_finalize == nullptr ||
+ Impl.handle_arg0 == nullptr || Impl.flush_log == nullptr) {
+ SpinMutexLock Guard(&XRayImplMutex);
+ GlobalXRayImpl = nullptr;
+ CurrentMode = nullptr;
+ __xray_remove_handler();
+ __xray_remove_handler_arg1();
+ return;
+ }
+
+ SpinMutexLock Guard(&XRayImplMutex);
+ CurrentXRayImpl = Impl;
+ GlobalXRayImpl = &CurrentXRayImpl;
+ __xray_set_handler(Impl.handle_arg0);
+}
+
+void __xray_remove_log_impl() XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayImplMutex);
+ GlobalXRayImpl = nullptr;
+ __xray_remove_handler();
+ __xray_remove_handler_arg1();
+}
+
+XRayLogInitStatus __xray_log_init(size_t BufferSize, size_t MaxBuffers,
+ void *Args,
+ size_t ArgsSize) XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayImplMutex);
+ if (!GlobalXRayImpl)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ return GlobalXRayImpl->log_init(BufferSize, MaxBuffers, Args, ArgsSize);
+}
+
+XRayLogInitStatus __xray_log_init_mode(const char *Mode, const char *Config)
+ XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayImplMutex);
+ if (!GlobalXRayImpl)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+
+ if (Config == nullptr)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+
+ // Check first whether the current mode is the same as what we expect.
+ if (CurrentMode == nullptr || internal_strcmp(CurrentMode->Mode, Mode) != 0)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+
+ // Here we do some work to coerce the pointer we're provided, so that
+ // the implementations that still take void* pointers can handle the
+ // data provided in the Config argument.
+ return GlobalXRayImpl->log_init(
+ 0, 0, const_cast<void *>(static_cast<const void *>(Config)), 0);
+}
+
+XRayLogInitStatus
+__xray_log_init_mode_bin(const char *Mode, const char *Config,
+ size_t ConfigSize) XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayImplMutex);
+ if (!GlobalXRayImpl)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+
+ if (Config == nullptr)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+
+ // Check first whether the current mode is the same as what we expect.
+ if (CurrentMode == nullptr || internal_strcmp(CurrentMode->Mode, Mode) != 0)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+
+ // Here we do some work to coerce the pointer we're provided, so that
+ // the implementations that still take void* pointers can handle the
+ // data provided in the Config argument.
+ return GlobalXRayImpl->log_init(
+ 0, 0, const_cast<void *>(static_cast<const void *>(Config)), ConfigSize);
+}
+
+XRayLogInitStatus __xray_log_finalize() XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayImplMutex);
+ if (!GlobalXRayImpl)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ return GlobalXRayImpl->log_finalize();
+}
+
+XRayLogFlushStatus __xray_log_flushLog() XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Guard(&XRayImplMutex);
+ if (!GlobalXRayImpl)
+ return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ return GlobalXRayImpl->flush_log();
+}
+
+XRayLogFlushStatus __xray_log_process_buffers(
+ void (*Processor)(const char *, XRayBuffer)) XRAY_NEVER_INSTRUMENT {
+ // We want to make sure that there will be no changes to the global state for
+ // the log by synchronising on the XRayBufferIteratorMutex.
+ if (!GlobalXRayImpl)
+ return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ auto Iterator = reinterpret_cast<XRayBuffer (*)(XRayBuffer)>(
+ atomic_load(&XRayBufferIterator, memory_order_acquire));
+ auto Buffer = (*Iterator)(XRayBuffer{nullptr, 0});
+ auto Mode = CurrentMode ? CurrentMode->Mode : nullptr;
+ while (Buffer.Data != nullptr) {
+ (*Processor)(Mode, Buffer);
+ Buffer = (*Iterator)(Buffer);
+ }
+ return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_log_interface.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_mips.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_mips.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_mips.cc (revision 351984)
@@ -0,0 +1,170 @@
+//===-- xray_mips.cc --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of MIPS-specific routines (32-bit).
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+
+namespace __xray {
+
+// The machine codes for some instructions used in runtime patching.
+enum PatchOpcodes : uint32_t {
+ PO_ADDIU = 0x24000000, // addiu rt, rs, imm
+ PO_SW = 0xAC000000, // sw rt, offset(sp)
+ PO_LUI = 0x3C000000, // lui rs, %hi(address)
+ PO_ORI = 0x34000000, // ori rt, rs, %lo(address)
+ PO_JALR = 0x0000F809, // jalr rs
+ PO_LW = 0x8C000000, // lw rt, offset(address)
+ PO_B44 = 0x1000000b, // b #44
+ PO_NOP = 0x0, // nop
+};
+
+enum RegNum : uint32_t {
+ RN_T0 = 0x8,
+ RN_T9 = 0x19,
+ RN_RA = 0x1F,
+ RN_SP = 0x1D,
+};
+
+inline static uint32_t encodeInstruction(uint32_t Opcode, uint32_t Rs,
+ uint32_t Rt,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return (Opcode | Rs << 21 | Rt << 16 | Imm);
+}
+
+inline static uint32_t
+encodeSpecialInstruction(uint32_t Opcode, uint32_t Rs, uint32_t Rt, uint32_t Rd,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return (Rs << 21 | Rt << 16 | Rd << 11 | Imm << 6 | Opcode);
+}
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // xray_sled_n:
+ // B .tmpN
+ // 11 NOPs (44 bytes)
+ // .tmpN
+ // ADDIU T9, T9, 44
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n (32-bit):
+ // addiu sp, sp, -8 ;create stack frame
+ // nop
+ // sw ra, 4(sp) ;save return address
+ // sw t9, 0(sp) ;save register t9
+ // lui t9, %hi(__xray_FunctionEntry/Exit)
+ // ori t9, t9, %lo(__xray_FunctionEntry/Exit)
+ // lui t0, %hi(function_id)
+ // jalr t9 ;call Tracing hook
+ // ori t0, t0, %lo(function_id) ;pass function id (delay slot)
+ // lw t9, 0(sp) ;restore register t9
+ // lw ra, 4(sp) ;restore return address
+ // addiu sp, sp, 8 ;delete stack frame
+ //
+ // We add 44 bytes to t9 because we want to adjust the function pointer to
+ // the actual start of function i.e. the address just after the noop sled.
+ // We do this because gp displacement relocation is emitted at the start of
+ // of the function i.e after the nop sled and to correctly calculate the
+ // global offset table address, t9 must hold the address of the instruction
+ // containing the gp displacement relocation.
+ // FIXME: Is this correct for the static relocation model?
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // B #44
+
+ if (Enable) {
+ uint32_t LoTracingHookAddr =
+ reinterpret_cast<int32_t>(TracingHook) & 0xffff;
+ uint32_t HiTracingHookAddr =
+ (reinterpret_cast<int32_t>(TracingHook) >> 16) & 0xffff;
+ uint32_t LoFunctionID = FuncId & 0xffff;
+ uint32_t HiFunctionID = (FuncId >> 16) & 0xffff;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 8) = encodeInstruction(
+ PatchOpcodes::PO_SW, RegNum::RN_SP, RegNum::RN_RA, 0x4);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 12) = encodeInstruction(
+ PatchOpcodes::PO_SW, RegNum::RN_SP, RegNum::RN_T9, 0x0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 16) = encodeInstruction(
+ PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T9, HiTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 20) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9, LoTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 24) = encodeInstruction(
+ PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T0, HiFunctionID);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 28) = encodeSpecialInstruction(
+ PatchOpcodes::PO_JALR, RegNum::RN_T9, 0x0, RegNum::RN_RA, 0X0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 32) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T0, RegNum::RN_T0, LoFunctionID);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 36) = encodeInstruction(
+ PatchOpcodes::PO_LW, RegNum::RN_SP, RegNum::RN_T9, 0x0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 40) = encodeInstruction(
+ PatchOpcodes::PO_LW, RegNum::RN_SP, RegNum::RN_RA, 0x4);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 44) = encodeInstruction(
+ PatchOpcodes::PO_ADDIU, RegNum::RN_SP, RegNum::RN_SP, 0x8);
+ uint32_t CreateStackSpaceInstr = encodeInstruction(
+ PatchOpcodes::PO_ADDIU, RegNum::RN_SP, RegNum::RN_SP, 0xFFF8);
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ uint32_t(CreateStackSpaceInstr), std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ uint32_t(PatchOpcodes::PO_B44), std::memory_order_release);
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: In the future we'd need to distinguish between non-tail exits and
+ // tail exits for better information preservation.
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in mips?
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in mips?
+ return false;
+}
+
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_mips.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_mips64.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_mips64.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_mips64.cc (revision 351984)
@@ -0,0 +1,178 @@
+//===-- xray_mips64.cc ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of MIPS64-specific routines.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include <atomic>
+
+namespace __xray {
+
+// The machine codes for some instructions used in runtime patching.
+enum PatchOpcodes : uint32_t {
+ PO_DADDIU = 0x64000000, // daddiu rt, rs, imm
+ PO_SD = 0xFC000000, // sd rt, base(offset)
+ PO_LUI = 0x3C000000, // lui rt, imm
+ PO_ORI = 0x34000000, // ori rt, rs, imm
+ PO_DSLL = 0x00000038, // dsll rd, rt, sa
+ PO_JALR = 0x00000009, // jalr rs
+ PO_LD = 0xDC000000, // ld rt, base(offset)
+ PO_B60 = 0x1000000f, // b #60
+ PO_NOP = 0x0, // nop
+};
+
+enum RegNum : uint32_t {
+ RN_T0 = 0xC,
+ RN_T9 = 0x19,
+ RN_RA = 0x1F,
+ RN_SP = 0x1D,
+};
+
+inline static uint32_t encodeInstruction(uint32_t Opcode, uint32_t Rs,
+ uint32_t Rt,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return (Opcode | Rs << 21 | Rt << 16 | Imm);
+}
+
+inline static uint32_t
+encodeSpecialInstruction(uint32_t Opcode, uint32_t Rs, uint32_t Rt, uint32_t Rd,
+ uint32_t Imm) XRAY_NEVER_INSTRUMENT {
+ return (Rs << 21 | Rt << 16 | Rd << 11 | Imm << 6 | Opcode);
+}
+
+inline static bool patchSled(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*TracingHook)()) XRAY_NEVER_INSTRUMENT {
+ // When |Enable| == true,
+ // We replace the following compile-time stub (sled):
+ //
+ // xray_sled_n:
+ // B .tmpN
+ // 15 NOPs (60 bytes)
+ // .tmpN
+ //
+ // With the following runtime patch:
+ //
+ // xray_sled_n (64-bit):
+ // daddiu sp, sp, -16 ;create stack frame
+ // nop
+ // sd ra, 8(sp) ;save return address
+ // sd t9, 0(sp) ;save register t9
+ // lui t9, %highest(__xray_FunctionEntry/Exit)
+ // ori t9, t9, %higher(__xray_FunctionEntry/Exit)
+ // dsll t9, t9, 16
+ // ori t9, t9, %hi(__xray_FunctionEntry/Exit)
+ // dsll t9, t9, 16
+ // ori t9, t9, %lo(__xray_FunctionEntry/Exit)
+ // lui t0, %hi(function_id)
+ // jalr t9 ;call Tracing hook
+ // ori t0, t0, %lo(function_id) ;pass function id (delay slot)
+ // ld t9, 0(sp) ;restore register t9
+ // ld ra, 8(sp) ;restore return address
+ // daddiu sp, sp, 16 ;delete stack frame
+ //
+ // Replacement of the first 4-byte instruction should be the last and atomic
+ // operation, so that the user code which reaches the sled concurrently
+ // either jumps over the whole sled, or executes the whole sled when the
+ // latter is ready.
+ //
+ // When |Enable|==false, we set back the first instruction in the sled to be
+ // B #60
+
+ if (Enable) {
+ uint32_t LoTracingHookAddr =
+ reinterpret_cast<int64_t>(TracingHook) & 0xffff;
+ uint32_t HiTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 16) & 0xffff;
+ uint32_t HigherTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 32) & 0xffff;
+ uint32_t HighestTracingHookAddr =
+ (reinterpret_cast<int64_t>(TracingHook) >> 48) & 0xffff;
+ uint32_t LoFunctionID = FuncId & 0xffff;
+ uint32_t HiFunctionID = (FuncId >> 16) & 0xffff;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 8) = encodeInstruction(
+ PatchOpcodes::PO_SD, RegNum::RN_SP, RegNum::RN_RA, 0x8);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 12) = encodeInstruction(
+ PatchOpcodes::PO_SD, RegNum::RN_SP, RegNum::RN_T9, 0x0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 16) = encodeInstruction(
+ PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T9, HighestTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 20) =
+ encodeInstruction(PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9,
+ HigherTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 24) = encodeSpecialInstruction(
+ PatchOpcodes::PO_DSLL, 0x0, RegNum::RN_T9, RegNum::RN_T9, 0x10);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 28) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9, HiTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 32) = encodeSpecialInstruction(
+ PatchOpcodes::PO_DSLL, 0x0, RegNum::RN_T9, RegNum::RN_T9, 0x10);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 36) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T9, RegNum::RN_T9, LoTracingHookAddr);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 40) = encodeInstruction(
+ PatchOpcodes::PO_LUI, 0x0, RegNum::RN_T0, HiFunctionID);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 44) = encodeSpecialInstruction(
+ PatchOpcodes::PO_JALR, RegNum::RN_T9, 0x0, RegNum::RN_RA, 0X0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 48) = encodeInstruction(
+ PatchOpcodes::PO_ORI, RegNum::RN_T0, RegNum::RN_T0, LoFunctionID);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 52) = encodeInstruction(
+ PatchOpcodes::PO_LD, RegNum::RN_SP, RegNum::RN_T9, 0x0);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 56) = encodeInstruction(
+ PatchOpcodes::PO_LD, RegNum::RN_SP, RegNum::RN_RA, 0x8);
+ *reinterpret_cast<uint32_t *>(Sled.Address + 60) = encodeInstruction(
+ PatchOpcodes::PO_DADDIU, RegNum::RN_SP, RegNum::RN_SP, 0x10);
+ uint32_t CreateStackSpace = encodeInstruction(
+ PatchOpcodes::PO_DADDIU, RegNum::RN_SP, RegNum::RN_SP, 0xfff0);
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ CreateStackSpace, std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint32_t> *>(Sled.Address),
+ uint32_t(PatchOpcodes::PO_B60), std::memory_order_release);
+ }
+ return true;
+}
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, Trampoline);
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: In the future we'd need to distinguish between non-tail exits and
+ // tail exits for better information preservation.
+ return patchSled(Enable, FuncId, Sled, __xray_FunctionExit);
+}
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in mips64?
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in mips64?
+ return false;
+}
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_mips64.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_powerpc64.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_powerpc64.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_powerpc64.cc (revision 351984)
@@ -0,0 +1,111 @@
+//===-- xray_powerpc64.cc ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Implementation of powerpc64 and powerpc64le routines.
+//
+//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+#include "xray_utils.h"
+#include <atomic>
+#include <cassert>
+#include <cstring>
+
+#ifndef __LITTLE_ENDIAN__
+#error powerpc64 big endian is not supported for now.
+#endif
+
+namespace {
+
+constexpr unsigned long long JumpOverInstNum = 7;
+
+void clearCache(void *Addr, size_t Len) {
+ const size_t LineSize = 32;
+
+ const intptr_t Mask = ~(LineSize - 1);
+ const intptr_t StartLine = ((intptr_t)Addr) & Mask;
+ const intptr_t EndLine = ((intptr_t)Addr + Len + LineSize - 1) & Mask;
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("dcbf 0, %0" : : "r"(Line));
+ asm volatile("sync");
+
+ for (intptr_t Line = StartLine; Line < EndLine; Line += LineSize)
+ asm volatile("icbi 0, %0" : : "r"(Line));
+ asm volatile("isync");
+}
+
+} // namespace
+
+extern "C" void __clear_cache(void *start, void *end);
+
+namespace __xray {
+
+bool patchFunctionEntry(const bool Enable, uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ if (Enable) {
+ // lis 0, FuncId[16..32]
+ // li 0, FuncId[0..15]
+ *reinterpret_cast<uint64_t *>(Sled.Address) =
+ (0x3c000000ull + (FuncId >> 16)) +
+ ((0x60000000ull + (FuncId & 0xffff)) << 32);
+ } else {
+ // b +JumpOverInstNum instructions.
+ *reinterpret_cast<uint32_t *>(Sled.Address) =
+ 0x48000000ull + (JumpOverInstNum << 2);
+ }
+ clearCache(reinterpret_cast<void *>(Sled.Address), 8);
+ return true;
+}
+
+bool patchFunctionExit(const bool Enable, uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ if (Enable) {
+ // lis 0, FuncId[16..32]
+ // li 0, FuncId[0..15]
+ *reinterpret_cast<uint64_t *>(Sled.Address) =
+ (0x3c000000ull + (FuncId >> 16)) +
+ ((0x60000000ull + (FuncId & 0xffff)) << 32);
+ } else {
+ // Copy the blr/b instruction after JumpOverInstNum instructions.
+ *reinterpret_cast<uint32_t *>(Sled.Address) =
+ *(reinterpret_cast<uint32_t *>(Sled.Address) + JumpOverInstNum);
+ }
+ clearCache(reinterpret_cast<void *>(Sled.Address), 8);
+ return true;
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ return patchFunctionExit(Enable, FuncId, Sled);
+}
+
+// FIXME: Maybe implement this better?
+bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in powerpc64?
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // FIXME: Implement in powerpc64?
+ return false;
+}
+
+} // namespace __xray
+
+extern "C" void __xray_ArgLoggerEntry() XRAY_NEVER_INSTRUMENT {
+ // FIXME: this will have to be implemented in the trampoline assembly file
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_powerpc64.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_powerpc64.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_powerpc64.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_powerpc64.inc (revision 351984)
@@ -0,0 +1,36 @@
+//===-- xray_powerpc64.inc --------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+
+#include <cstdint>
+#include <mutex>
+#include <sys/platform/ppc.h>
+
+#include "xray_defs.h"
+
+namespace __xray {
+
+ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ CPU = 0;
+ return __ppc_get_timebase();
+}
+
+inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
+ static std::mutex M;
+ std::lock_guard<std::mutex> Guard(M);
+ return __ppc_get_timebase_freq();
+}
+
+inline bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT {
+ return true;
+}
+
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_powerpc64.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profile_collector.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profile_collector.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profile_collector.cc (revision 351984)
@@ -0,0 +1,414 @@
+//===-- xray_profile_collector.cc ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the interface for the profileCollectorService.
+//
+//===----------------------------------------------------------------------===//
+#include "xray_profile_collector.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_allocator.h"
+#include "xray_defs.h"
+#include "xray_profiling_flags.h"
+#include "xray_segmented_array.h"
+#include <memory>
+#include <pthread.h>
+#include <utility>
+
+namespace __xray {
+namespace profileCollectorService {
+
+namespace {
+
+SpinMutex GlobalMutex;
+struct ThreadTrie {
+ tid_t TId;
+ typename std::aligned_storage<sizeof(FunctionCallTrie)>::type TrieStorage;
+};
+
+struct ProfileBuffer {
+ void *Data;
+ size_t Size;
+};
+
+// Current version of the profile format.
+constexpr u64 XRayProfilingVersion = 0x20180424;
+
+// Identifier for XRay profiling files 'xrayprof' in hex.
+constexpr u64 XRayMagicBytes = 0x7872617970726f66;
+
+struct XRayProfilingFileHeader {
+ const u64 MagicBytes = XRayMagicBytes;
+ const u64 Version = XRayProfilingVersion;
+ u64 Timestamp = 0; // System time in nanoseconds.
+ u64 PID = 0; // Process ID.
+};
+
+struct BlockHeader {
+ u32 BlockSize;
+ u32 BlockNum;
+ u64 ThreadId;
+};
+
+struct ThreadData {
+ BufferQueue *BQ;
+ FunctionCallTrie::Allocators::Buffers Buffers;
+ FunctionCallTrie::Allocators Allocators;
+ FunctionCallTrie FCT;
+ tid_t TId;
+};
+
+using ThreadDataArray = Array<ThreadData>;
+using ThreadDataAllocator = ThreadDataArray::AllocatorType;
+
+// We use a separate buffer queue for the backing store for the allocator used
+// by the ThreadData array. This lets us host the buffers, allocators, and tries
+// associated with a thread by moving the data into the array instead of
+// attempting to copy the data to a separately backed set of tries.
+static typename std::aligned_storage<
+ sizeof(BufferQueue), alignof(BufferQueue)>::type BufferQueueStorage;
+static BufferQueue *BQ = nullptr;
+static BufferQueue::Buffer Buffer;
+static typename std::aligned_storage<sizeof(ThreadDataAllocator),
+ alignof(ThreadDataAllocator)>::type
+ ThreadDataAllocatorStorage;
+static typename std::aligned_storage<sizeof(ThreadDataArray),
+ alignof(ThreadDataArray)>::type
+ ThreadDataArrayStorage;
+
+static ThreadDataAllocator *TDAllocator = nullptr;
+static ThreadDataArray *TDArray = nullptr;
+
+using ProfileBufferArray = Array<ProfileBuffer>;
+using ProfileBufferArrayAllocator = typename ProfileBufferArray::AllocatorType;
+
+// These need to be global aligned storage to avoid dynamic initialization. We
+// need these to be aligned to allow us to placement new objects into the
+// storage, and have pointers to those objects be appropriately aligned.
+static typename std::aligned_storage<sizeof(ProfileBufferArray)>::type
+ ProfileBuffersStorage;
+static typename std::aligned_storage<sizeof(ProfileBufferArrayAllocator)>::type
+ ProfileBufferArrayAllocatorStorage;
+
+static ProfileBufferArrayAllocator *ProfileBuffersAllocator = nullptr;
+static ProfileBufferArray *ProfileBuffers = nullptr;
+
+// Use a global flag to determine whether the collector implementation has been
+// initialized.
+static atomic_uint8_t CollectorInitialized{0};
+
+} // namespace
+
+void post(BufferQueue *Q, FunctionCallTrie &&T,
+ FunctionCallTrie::Allocators &&A,
+ FunctionCallTrie::Allocators::Buffers &&B,
+ tid_t TId) XRAY_NEVER_INSTRUMENT {
+ DCHECK_NE(Q, nullptr);
+
+ // Bail out early if the collector has not been initialized.
+ if (!atomic_load(&CollectorInitialized, memory_order_acquire)) {
+ T.~FunctionCallTrie();
+ A.~Allocators();
+ Q->releaseBuffer(B.NodeBuffer);
+ Q->releaseBuffer(B.RootsBuffer);
+ Q->releaseBuffer(B.ShadowStackBuffer);
+ Q->releaseBuffer(B.NodeIdPairBuffer);
+ B.~Buffers();
+ return;
+ }
+
+ {
+ SpinMutexLock Lock(&GlobalMutex);
+ DCHECK_NE(TDAllocator, nullptr);
+ DCHECK_NE(TDArray, nullptr);
+
+ if (TDArray->AppendEmplace(Q, std::move(B), std::move(A), std::move(T),
+ TId) == nullptr) {
+ // If we fail to add the data to the array, we should destroy the objects
+ // handed us.
+ T.~FunctionCallTrie();
+ A.~Allocators();
+ Q->releaseBuffer(B.NodeBuffer);
+ Q->releaseBuffer(B.RootsBuffer);
+ Q->releaseBuffer(B.ShadowStackBuffer);
+ Q->releaseBuffer(B.NodeIdPairBuffer);
+ B.~Buffers();
+ }
+ }
+}
+
+// A PathArray represents the function id's representing a stack trace. In this
+// context a path is almost always represented from the leaf function in a call
+// stack to a root of the call trie.
+using PathArray = Array<int32_t>;
+
+struct ProfileRecord {
+ using PathAllocator = typename PathArray::AllocatorType;
+
+ // The Path in this record is the function id's from the leaf to the root of
+ // the function call stack as represented from a FunctionCallTrie.
+ PathArray Path;
+ const FunctionCallTrie::Node *Node;
+};
+
+namespace {
+
+using ProfileRecordArray = Array<ProfileRecord>;
+
+// Walk a depth-first traversal of each root of the FunctionCallTrie to generate
+// the path(s) and the data associated with the path.
+static void
+populateRecords(ProfileRecordArray &PRs, ProfileRecord::PathAllocator &PA,
+ const FunctionCallTrie &Trie) XRAY_NEVER_INSTRUMENT {
+ using StackArray = Array<const FunctionCallTrie::Node *>;
+ using StackAllocator = typename StackArray::AllocatorType;
+ StackAllocator StackAlloc(profilingFlags()->stack_allocator_max);
+ StackArray DFSStack(StackAlloc);
+ for (const auto *R : Trie.getRoots()) {
+ DFSStack.Append(R);
+ while (!DFSStack.empty()) {
+ auto *Node = DFSStack.back();
+ DFSStack.trim(1);
+ if (Node == nullptr)
+ continue;
+ auto Record = PRs.AppendEmplace(PathArray{PA}, Node);
+ if (Record == nullptr)
+ return;
+ DCHECK_NE(Record, nullptr);
+
+ // Traverse the Node's parents and as we're doing so, get the FIds in
+ // the order they appear.
+ for (auto N = Node; N != nullptr; N = N->Parent)
+ Record->Path.Append(N->FId);
+ DCHECK(!Record->Path.empty());
+
+ for (const auto C : Node->Callees)
+ DFSStack.Append(C.NodePtr);
+ }
+ }
+}
+
+static void serializeRecords(ProfileBuffer *Buffer, const BlockHeader &Header,
+ const ProfileRecordArray &ProfileRecords)
+ XRAY_NEVER_INSTRUMENT {
+ auto NextPtr = static_cast<uint8_t *>(
+ internal_memcpy(Buffer->Data, &Header, sizeof(Header))) +
+ sizeof(Header);
+ for (const auto &Record : ProfileRecords) {
+ // List of IDs follow:
+ for (const auto FId : Record.Path)
+ NextPtr =
+ static_cast<uint8_t *>(internal_memcpy(NextPtr, &FId, sizeof(FId))) +
+ sizeof(FId);
+
+ // Add the sentinel here.
+ constexpr int32_t SentinelFId = 0;
+ NextPtr = static_cast<uint8_t *>(
+ internal_memset(NextPtr, SentinelFId, sizeof(SentinelFId))) +
+ sizeof(SentinelFId);
+
+ // Add the node data here.
+ NextPtr =
+ static_cast<uint8_t *>(internal_memcpy(
+ NextPtr, &Record.Node->CallCount, sizeof(Record.Node->CallCount))) +
+ sizeof(Record.Node->CallCount);
+ NextPtr = static_cast<uint8_t *>(
+ internal_memcpy(NextPtr, &Record.Node->CumulativeLocalTime,
+ sizeof(Record.Node->CumulativeLocalTime))) +
+ sizeof(Record.Node->CumulativeLocalTime);
+ }
+
+ DCHECK_EQ(NextPtr - static_cast<uint8_t *>(Buffer->Data), Buffer->Size);
+}
+
+} // namespace
+
+void serialize() XRAY_NEVER_INSTRUMENT {
+ if (!atomic_load(&CollectorInitialized, memory_order_acquire))
+ return;
+
+ SpinMutexLock Lock(&GlobalMutex);
+
+ // Clear out the global ProfileBuffers, if it's not empty.
+ for (auto &B : *ProfileBuffers)
+ deallocateBuffer(reinterpret_cast<unsigned char *>(B.Data), B.Size);
+ ProfileBuffers->trim(ProfileBuffers->size());
+
+ DCHECK_NE(TDArray, nullptr);
+ if (TDArray->empty())
+ return;
+
+ // Then repopulate the global ProfileBuffers.
+ u32 I = 0;
+ auto MaxSize = profilingFlags()->global_allocator_max;
+ auto ProfileArena = allocateBuffer(MaxSize);
+ if (ProfileArena == nullptr)
+ return;
+
+ auto ProfileArenaCleanup = at_scope_exit(
+ [&]() XRAY_NEVER_INSTRUMENT { deallocateBuffer(ProfileArena, MaxSize); });
+
+ auto PathArena = allocateBuffer(profilingFlags()->global_allocator_max);
+ if (PathArena == nullptr)
+ return;
+
+ auto PathArenaCleanup = at_scope_exit(
+ [&]() XRAY_NEVER_INSTRUMENT { deallocateBuffer(PathArena, MaxSize); });
+
+ for (const auto &ThreadTrie : *TDArray) {
+ using ProfileRecordAllocator = typename ProfileRecordArray::AllocatorType;
+ ProfileRecordAllocator PRAlloc(ProfileArena,
+ profilingFlags()->global_allocator_max);
+ ProfileRecord::PathAllocator PathAlloc(
+ PathArena, profilingFlags()->global_allocator_max);
+ ProfileRecordArray ProfileRecords(PRAlloc);
+
+ // First, we want to compute the amount of space we're going to need. We'll
+ // use a local allocator and an __xray::Array<...> to store the intermediary
+ // data, then compute the size as we're going along. Then we'll allocate the
+ // contiguous space to contain the thread buffer data.
+ if (ThreadTrie.FCT.getRoots().empty())
+ continue;
+
+ populateRecords(ProfileRecords, PathAlloc, ThreadTrie.FCT);
+ DCHECK(!ThreadTrie.FCT.getRoots().empty());
+ DCHECK(!ProfileRecords.empty());
+
+ // Go through each record, to compute the sizes.
+ //
+ // header size = block size (4 bytes)
+ // + block number (4 bytes)
+ // + thread id (8 bytes)
+ // record size = path ids (4 bytes * number of ids + sentinel 4 bytes)
+ // + call count (8 bytes)
+ // + local time (8 bytes)
+ // + end of record (8 bytes)
+ u32 CumulativeSizes = 0;
+ for (const auto &Record : ProfileRecords)
+ CumulativeSizes += 20 + (4 * Record.Path.size());
+
+ BlockHeader Header{16 + CumulativeSizes, I++, ThreadTrie.TId};
+ auto B = ProfileBuffers->Append({});
+ B->Size = sizeof(Header) + CumulativeSizes;
+ B->Data = allocateBuffer(B->Size);
+ DCHECK_NE(B->Data, nullptr);
+ serializeRecords(B, Header, ProfileRecords);
+ }
+}
+
+void reset() XRAY_NEVER_INSTRUMENT {
+ atomic_store(&CollectorInitialized, 0, memory_order_release);
+ SpinMutexLock Lock(&GlobalMutex);
+
+ if (ProfileBuffers != nullptr) {
+ // Clear out the profile buffers that have been serialized.
+ for (auto &B : *ProfileBuffers)
+ deallocateBuffer(reinterpret_cast<uint8_t *>(B.Data), B.Size);
+ ProfileBuffers->trim(ProfileBuffers->size());
+ ProfileBuffers = nullptr;
+ }
+
+ if (TDArray != nullptr) {
+ // Release the resources as required.
+ for (auto &TD : *TDArray) {
+ TD.BQ->releaseBuffer(TD.Buffers.NodeBuffer);
+ TD.BQ->releaseBuffer(TD.Buffers.RootsBuffer);
+ TD.BQ->releaseBuffer(TD.Buffers.ShadowStackBuffer);
+ TD.BQ->releaseBuffer(TD.Buffers.NodeIdPairBuffer);
+ }
+ // We don't bother destroying the array here because we've already
+ // potentially freed the backing store for the array. Instead we're going to
+ // reset the pointer to nullptr, and re-use the storage later instead
+ // (placement-new'ing into the storage as-is).
+ TDArray = nullptr;
+ }
+
+ if (TDAllocator != nullptr) {
+ TDAllocator->~Allocator();
+ TDAllocator = nullptr;
+ }
+
+ if (Buffer.Data != nullptr) {
+ BQ->releaseBuffer(Buffer);
+ }
+
+ if (BQ == nullptr) {
+ bool Success = false;
+ new (&BufferQueueStorage)
+ BufferQueue(profilingFlags()->global_allocator_max, 1, Success);
+ if (!Success)
+ return;
+ BQ = reinterpret_cast<BufferQueue *>(&BufferQueueStorage);
+ } else {
+ BQ->finalize();
+
+ if (BQ->init(profilingFlags()->global_allocator_max, 1) !=
+ BufferQueue::ErrorCode::Ok)
+ return;
+ }
+
+ if (BQ->getBuffer(Buffer) != BufferQueue::ErrorCode::Ok)
+ return;
+
+ new (&ProfileBufferArrayAllocatorStorage)
+ ProfileBufferArrayAllocator(profilingFlags()->global_allocator_max);
+ ProfileBuffersAllocator = reinterpret_cast<ProfileBufferArrayAllocator *>(
+ &ProfileBufferArrayAllocatorStorage);
+
+ new (&ProfileBuffersStorage) ProfileBufferArray(*ProfileBuffersAllocator);
+ ProfileBuffers =
+ reinterpret_cast<ProfileBufferArray *>(&ProfileBuffersStorage);
+
+ new (&ThreadDataAllocatorStorage)
+ ThreadDataAllocator(Buffer.Data, Buffer.Size);
+ TDAllocator =
+ reinterpret_cast<ThreadDataAllocator *>(&ThreadDataAllocatorStorage);
+ new (&ThreadDataArrayStorage) ThreadDataArray(*TDAllocator);
+ TDArray = reinterpret_cast<ThreadDataArray *>(&ThreadDataArrayStorage);
+
+ atomic_store(&CollectorInitialized, 1, memory_order_release);
+}
+
+XRayBuffer nextBuffer(XRayBuffer B) XRAY_NEVER_INSTRUMENT {
+ SpinMutexLock Lock(&GlobalMutex);
+
+ if (ProfileBuffers == nullptr || ProfileBuffers->size() == 0)
+ return {nullptr, 0};
+
+ static pthread_once_t Once = PTHREAD_ONCE_INIT;
+ static typename std::aligned_storage<sizeof(XRayProfilingFileHeader)>::type
+ FileHeaderStorage;
+ pthread_once(
+ &Once, +[]() XRAY_NEVER_INSTRUMENT {
+ new (&FileHeaderStorage) XRayProfilingFileHeader{};
+ });
+
+ if (UNLIKELY(B.Data == nullptr)) {
+ // The first buffer should always contain the file header information.
+ auto &FileHeader =
+ *reinterpret_cast<XRayProfilingFileHeader *>(&FileHeaderStorage);
+ FileHeader.Timestamp = NanoTime();
+ FileHeader.PID = internal_getpid();
+ return {&FileHeaderStorage, sizeof(XRayProfilingFileHeader)};
+ }
+
+ if (UNLIKELY(B.Data == &FileHeaderStorage))
+ return {(*ProfileBuffers)[0].Data, (*ProfileBuffers)[0].Size};
+
+ BlockHeader Header;
+ internal_memcpy(&Header, B.Data, sizeof(BlockHeader));
+ auto NextBlock = Header.BlockNum + 1;
+ if (NextBlock < ProfileBuffers->size())
+ return {(*ProfileBuffers)[NextBlock].Data,
+ (*ProfileBuffers)[NextBlock].Size};
+ return {nullptr, 0};
+}
+
+} // namespace profileCollectorService
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profile_collector.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profile_collector.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profile_collector.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profile_collector.h (revision 351984)
@@ -0,0 +1,73 @@
+//===-- xray_profile_collector.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This file defines the interface for a data collection service, for XRay
+// profiling. What we implement here is an in-process service where
+// FunctionCallTrie instances can be handed off by threads, to be
+// consolidated/collected.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_PROFILE_COLLECTOR_H
+#define XRAY_XRAY_PROFILE_COLLECTOR_H
+
+#include "xray_function_call_trie.h"
+
+#include "xray/xray_log_interface.h"
+
+namespace __xray {
+
+/// The ProfileCollectorService implements a centralised mechanism for
+/// collecting FunctionCallTrie instances, indexed by thread ID. On demand, the
+/// ProfileCollectorService can be queried for the most recent state of the
+/// data, in a form that allows traversal.
+namespace profileCollectorService {
+
+/// Posts the FunctionCallTrie associated with a specific Thread ID. This
+/// will:
+///
+/// Moves the collection of FunctionCallTrie, Allocators, and Buffers associated
+/// with a thread's data to the queue. This takes ownership of the memory
+/// associated with a thread, and manages those exclusively.
+///
+void post(BufferQueue *Q, FunctionCallTrie &&T,
+ FunctionCallTrie::Allocators &&A,
+ FunctionCallTrie::Allocators::Buffers &&B, tid_t TId);
+
+/// The serialize will process all FunctionCallTrie instances in memory, and
+/// turn those into specifically formatted blocks, each describing the
+/// function call trie's contents in a compact form. In memory, this looks
+/// like the following layout:
+///
+/// - block size (32 bits)
+/// - block number (32 bits)
+/// - thread id (64 bits)
+/// - list of records:
+/// - function ids in leaf to root order, terminated by
+/// 0 (32 bits per function id)
+/// - call count (64 bit)
+/// - cumulative local time (64 bit)
+/// - record delimiter (64 bit, 0x0)
+///
+void serialize();
+
+/// The reset function will clear out any internal memory held by the
+/// service. The intent is to have the resetting be done in calls to the
+/// initialization routine, or explicitly through the flush log API.
+void reset();
+
+/// This nextBuffer function is meant to implement the iterator functionality,
+/// provided in the XRay API.
+XRayBuffer nextBuffer(XRayBuffer B);
+
+} // namespace profileCollectorService
+
+} // namespace __xray
+
+#endif // XRAY_XRAY_PROFILE_COLLECTOR_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profile_collector.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling.cc (revision 351984)
@@ -0,0 +1,519 @@
+//===-- xray_profiling.cc ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This is the implementation of a profiling handler.
+//
+//===----------------------------------------------------------------------===//
+#include <memory>
+#include <time.h>
+
+#include "sanitizer_common/sanitizer_atomic.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "xray/xray_interface.h"
+#include "xray/xray_log_interface.h"
+#include "xray_buffer_queue.h"
+#include "xray_flags.h"
+#include "xray_profile_collector.h"
+#include "xray_profiling_flags.h"
+#include "xray_recursion_guard.h"
+#include "xray_tsc.h"
+#include "xray_utils.h"
+#include <pthread.h>
+
+namespace __xray {
+
+namespace {
+
+static atomic_sint32_t ProfilerLogFlushStatus = {
+ XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING};
+
+static atomic_sint32_t ProfilerLogStatus = {
+ XRayLogInitStatus::XRAY_LOG_UNINITIALIZED};
+
+static SpinMutex ProfilerOptionsMutex;
+
+struct ProfilingData {
+ atomic_uintptr_t Allocators;
+ atomic_uintptr_t FCT;
+};
+
+static pthread_key_t ProfilingKey;
+
+// We use a global buffer queue, which gets initialized once at initialisation
+// time, and gets reset when profiling is "done".
+static std::aligned_storage<sizeof(BufferQueue), alignof(BufferQueue)>::type
+ BufferQueueStorage;
+static BufferQueue *BQ = nullptr;
+
+thread_local FunctionCallTrie::Allocators::Buffers ThreadBuffers;
+thread_local std::aligned_storage<sizeof(FunctionCallTrie::Allocators),
+ alignof(FunctionCallTrie::Allocators)>::type
+ AllocatorsStorage;
+thread_local std::aligned_storage<sizeof(FunctionCallTrie),
+ alignof(FunctionCallTrie)>::type
+ FunctionCallTrieStorage;
+thread_local ProfilingData TLD{{0}, {0}};
+thread_local atomic_uint8_t ReentranceGuard{0};
+
+// We use a separate guard for ensuring that for this thread, if we're already
+// cleaning up, that any signal handlers don't attempt to cleanup nor
+// initialise.
+thread_local atomic_uint8_t TLDInitGuard{0};
+
+// We also use a separate latch to signal that the thread is exiting, and
+// non-essential work should be ignored (things like recording events, etc.).
+thread_local atomic_uint8_t ThreadExitingLatch{0};
+
+static ProfilingData *getThreadLocalData() XRAY_NEVER_INSTRUMENT {
+ thread_local auto ThreadOnce = []() XRAY_NEVER_INSTRUMENT {
+ pthread_setspecific(ProfilingKey, &TLD);
+ return false;
+ }();
+ (void)ThreadOnce;
+
+ RecursionGuard TLDInit(TLDInitGuard);
+ if (!TLDInit)
+ return nullptr;
+
+ if (atomic_load_relaxed(&ThreadExitingLatch))
+ return nullptr;
+
+ uptr Allocators = 0;
+ if (atomic_compare_exchange_strong(&TLD.Allocators, &Allocators, 1,
+ memory_order_acq_rel)) {
+ bool Success = false;
+ auto AllocatorsUndo = at_scope_exit([&]() XRAY_NEVER_INSTRUMENT {
+ if (!Success)
+ atomic_store(&TLD.Allocators, 0, memory_order_release);
+ });
+
+ // Acquire a set of buffers for this thread.
+ if (BQ == nullptr)
+ return nullptr;
+
+ if (BQ->getBuffer(ThreadBuffers.NodeBuffer) != BufferQueue::ErrorCode::Ok)
+ return nullptr;
+ auto NodeBufferUndo = at_scope_exit([&]() XRAY_NEVER_INSTRUMENT {
+ if (!Success)
+ BQ->releaseBuffer(ThreadBuffers.NodeBuffer);
+ });
+
+ if (BQ->getBuffer(ThreadBuffers.RootsBuffer) != BufferQueue::ErrorCode::Ok)
+ return nullptr;
+ auto RootsBufferUndo = at_scope_exit([&]() XRAY_NEVER_INSTRUMENT {
+ if (!Success)
+ BQ->releaseBuffer(ThreadBuffers.RootsBuffer);
+ });
+
+ if (BQ->getBuffer(ThreadBuffers.ShadowStackBuffer) !=
+ BufferQueue::ErrorCode::Ok)
+ return nullptr;
+ auto ShadowStackBufferUndo = at_scope_exit([&]() XRAY_NEVER_INSTRUMENT {
+ if (!Success)
+ BQ->releaseBuffer(ThreadBuffers.ShadowStackBuffer);
+ });
+
+ if (BQ->getBuffer(ThreadBuffers.NodeIdPairBuffer) !=
+ BufferQueue::ErrorCode::Ok)
+ return nullptr;
+
+ Success = true;
+ new (&AllocatorsStorage) FunctionCallTrie::Allocators(
+ FunctionCallTrie::InitAllocatorsFromBuffers(ThreadBuffers));
+ Allocators = reinterpret_cast<uptr>(
+ reinterpret_cast<FunctionCallTrie::Allocators *>(&AllocatorsStorage));
+ atomic_store(&TLD.Allocators, Allocators, memory_order_release);
+ }
+
+ if (Allocators == 1)
+ return nullptr;
+
+ uptr FCT = 0;
+ if (atomic_compare_exchange_strong(&TLD.FCT, &FCT, 1, memory_order_acq_rel)) {
+ new (&FunctionCallTrieStorage)
+ FunctionCallTrie(*reinterpret_cast<FunctionCallTrie::Allocators *>(
+ atomic_load_relaxed(&TLD.Allocators)));
+ FCT = reinterpret_cast<uptr>(
+ reinterpret_cast<FunctionCallTrie *>(&FunctionCallTrieStorage));
+ atomic_store(&TLD.FCT, FCT, memory_order_release);
+ }
+
+ if (FCT == 1)
+ return nullptr;
+
+ return &TLD;
+}
+
+static void cleanupTLD() XRAY_NEVER_INSTRUMENT {
+ auto FCT = atomic_exchange(&TLD.FCT, 0, memory_order_acq_rel);
+ if (FCT == reinterpret_cast<uptr>(reinterpret_cast<FunctionCallTrie *>(
+ &FunctionCallTrieStorage)))
+ reinterpret_cast<FunctionCallTrie *>(FCT)->~FunctionCallTrie();
+
+ auto Allocators = atomic_exchange(&TLD.Allocators, 0, memory_order_acq_rel);
+ if (Allocators ==
+ reinterpret_cast<uptr>(
+ reinterpret_cast<FunctionCallTrie::Allocators *>(&AllocatorsStorage)))
+ reinterpret_cast<FunctionCallTrie::Allocators *>(Allocators)->~Allocators();
+}
+
+static void postCurrentThreadFCT(ProfilingData &T) XRAY_NEVER_INSTRUMENT {
+ RecursionGuard TLDInit(TLDInitGuard);
+ if (!TLDInit)
+ return;
+
+ uptr P = atomic_exchange(&T.FCT, 0, memory_order_acq_rel);
+ if (P != reinterpret_cast<uptr>(
+ reinterpret_cast<FunctionCallTrie *>(&FunctionCallTrieStorage)))
+ return;
+
+ auto FCT = reinterpret_cast<FunctionCallTrie *>(P);
+ DCHECK_NE(FCT, nullptr);
+
+ uptr A = atomic_exchange(&T.Allocators, 0, memory_order_acq_rel);
+ if (A !=
+ reinterpret_cast<uptr>(
+ reinterpret_cast<FunctionCallTrie::Allocators *>(&AllocatorsStorage)))
+ return;
+
+ auto Allocators = reinterpret_cast<FunctionCallTrie::Allocators *>(A);
+ DCHECK_NE(Allocators, nullptr);
+
+ // Always move the data into the profile collector.
+ profileCollectorService::post(BQ, std::move(*FCT), std::move(*Allocators),
+ std::move(ThreadBuffers), GetTid());
+
+ // Re-initialize the ThreadBuffers object to a known "default" state.
+ ThreadBuffers = FunctionCallTrie::Allocators::Buffers{};
+}
+
+} // namespace
+
+const char *profilingCompilerDefinedFlags() XRAY_NEVER_INSTRUMENT {
+#ifdef XRAY_PROFILER_DEFAULT_OPTIONS
+ return SANITIZER_STRINGIFY(XRAY_PROFILER_DEFAULT_OPTIONS);
+#else
+ return "";
+#endif
+}
+
+XRayLogFlushStatus profilingFlush() XRAY_NEVER_INSTRUMENT {
+ if (atomic_load(&ProfilerLogStatus, memory_order_acquire) !=
+ XRayLogInitStatus::XRAY_LOG_FINALIZED) {
+ if (Verbosity())
+ Report("Not flushing profiles, profiling not been finalized.\n");
+ return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ }
+
+ RecursionGuard SignalGuard(ReentranceGuard);
+ if (!SignalGuard) {
+ if (Verbosity())
+ Report("Cannot finalize properly inside a signal handler!\n");
+ atomic_store(&ProfilerLogFlushStatus,
+ XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING,
+ memory_order_release);
+ return XRayLogFlushStatus::XRAY_LOG_NOT_FLUSHING;
+ }
+
+ s32 Previous = atomic_exchange(&ProfilerLogFlushStatus,
+ XRayLogFlushStatus::XRAY_LOG_FLUSHING,
+ memory_order_acq_rel);
+ if (Previous == XRayLogFlushStatus::XRAY_LOG_FLUSHING) {
+ if (Verbosity())
+ Report("Not flushing profiles, implementation still flushing.\n");
+ return XRayLogFlushStatus::XRAY_LOG_FLUSHING;
+ }
+
+ // At this point, we'll create the file that will contain the profile, but
+ // only if the options say so.
+ if (!profilingFlags()->no_flush) {
+ // First check whether we have data in the profile collector service
+ // before we try and write anything down.
+ XRayBuffer B = profileCollectorService::nextBuffer({nullptr, 0});
+ if (B.Data == nullptr) {
+ if (Verbosity())
+ Report("profiling: No data to flush.\n");
+ } else {
+ LogWriter *LW = LogWriter::Open();
+ if (LW == nullptr) {
+ if (Verbosity())
+ Report("profiling: Failed to flush to file, dropping data.\n");
+ } else {
+ // Now for each of the buffers, write out the profile data as we would
+ // see it in memory, verbatim.
+ while (B.Data != nullptr && B.Size != 0) {
+ LW->WriteAll(reinterpret_cast<const char *>(B.Data),
+ reinterpret_cast<const char *>(B.Data) + B.Size);
+ B = profileCollectorService::nextBuffer(B);
+ }
+ }
+ LogWriter::Close(LW);
+ }
+ }
+
+ profileCollectorService::reset();
+
+ atomic_store(&ProfilerLogFlushStatus, XRayLogFlushStatus::XRAY_LOG_FLUSHED,
+ memory_order_release);
+ atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_UNINITIALIZED,
+ memory_order_release);
+
+ return XRayLogFlushStatus::XRAY_LOG_FLUSHED;
+}
+
+void profilingHandleArg0(int32_t FuncId,
+ XRayEntryType Entry) XRAY_NEVER_INSTRUMENT {
+ unsigned char CPU;
+ auto TSC = readTSC(CPU);
+ RecursionGuard G(ReentranceGuard);
+ if (!G)
+ return;
+
+ auto Status = atomic_load(&ProfilerLogStatus, memory_order_acquire);
+ if (UNLIKELY(Status == XRayLogInitStatus::XRAY_LOG_UNINITIALIZED ||
+ Status == XRayLogInitStatus::XRAY_LOG_INITIALIZING))
+ return;
+
+ if (UNLIKELY(Status == XRayLogInitStatus::XRAY_LOG_FINALIZED ||
+ Status == XRayLogInitStatus::XRAY_LOG_FINALIZING)) {
+ postCurrentThreadFCT(TLD);
+ return;
+ }
+
+ auto T = getThreadLocalData();
+ if (T == nullptr)
+ return;
+
+ auto FCT = reinterpret_cast<FunctionCallTrie *>(atomic_load_relaxed(&T->FCT));
+ switch (Entry) {
+ case XRayEntryType::ENTRY:
+ case XRayEntryType::LOG_ARGS_ENTRY:
+ FCT->enterFunction(FuncId, TSC, CPU);
+ break;
+ case XRayEntryType::EXIT:
+ case XRayEntryType::TAIL:
+ FCT->exitFunction(FuncId, TSC, CPU);
+ break;
+ default:
+ // FIXME: Handle bugs.
+ break;
+ }
+}
+
+void profilingHandleArg1(int32_t FuncId, XRayEntryType Entry,
+ uint64_t) XRAY_NEVER_INSTRUMENT {
+ return profilingHandleArg0(FuncId, Entry);
+}
+
+XRayLogInitStatus profilingFinalize() XRAY_NEVER_INSTRUMENT {
+ s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_INITIALIZED;
+ if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus,
+ XRayLogInitStatus::XRAY_LOG_FINALIZING,
+ memory_order_release)) {
+ if (Verbosity())
+ Report("Cannot finalize profile, the profiling is not initialized.\n");
+ return static_cast<XRayLogInitStatus>(CurrentStatus);
+ }
+
+ // Mark then finalize the current generation of buffers. This allows us to let
+ // the threads currently holding onto new buffers still use them, but let the
+ // last reference do the memory cleanup.
+ DCHECK_NE(BQ, nullptr);
+ BQ->finalize();
+
+ // Wait a grace period to allow threads to see that we're finalizing.
+ SleepForMillis(profilingFlags()->grace_period_ms);
+
+ // If we for some reason are entering this function from an instrumented
+ // handler, we bail out.
+ RecursionGuard G(ReentranceGuard);
+ if (!G)
+ return static_cast<XRayLogInitStatus>(CurrentStatus);
+
+ // Post the current thread's data if we have any.
+ postCurrentThreadFCT(TLD);
+
+ // Then we force serialize the log data.
+ profileCollectorService::serialize();
+
+ atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_FINALIZED,
+ memory_order_release);
+ return XRayLogInitStatus::XRAY_LOG_FINALIZED;
+}
+
+XRayLogInitStatus
+profilingLoggingInit(size_t, size_t, void *Options,
+ size_t OptionsSize) XRAY_NEVER_INSTRUMENT {
+ RecursionGuard G(ReentranceGuard);
+ if (!G)
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+
+ s32 CurrentStatus = XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ if (!atomic_compare_exchange_strong(&ProfilerLogStatus, &CurrentStatus,
+ XRayLogInitStatus::XRAY_LOG_INITIALIZING,
+ memory_order_acq_rel)) {
+ if (Verbosity())
+ Report("Cannot initialize already initialised profiling "
+ "implementation.\n");
+ return static_cast<XRayLogInitStatus>(CurrentStatus);
+ }
+
+ {
+ SpinMutexLock Lock(&ProfilerOptionsMutex);
+ FlagParser ConfigParser;
+ ProfilerFlags Flags;
+ Flags.setDefaults();
+ registerProfilerFlags(&ConfigParser, &Flags);
+ ConfigParser.ParseString(profilingCompilerDefinedFlags());
+ const char *Env = GetEnv("XRAY_PROFILING_OPTIONS");
+ if (Env == nullptr)
+ Env = "";
+ ConfigParser.ParseString(Env);
+
+ // Then parse the configuration string provided.
+ ConfigParser.ParseString(static_cast<const char *>(Options));
+ if (Verbosity())
+ ReportUnrecognizedFlags();
+ *profilingFlags() = Flags;
+ }
+
+ // We need to reset the profile data collection implementation now.
+ profileCollectorService::reset();
+
+ // Then also reset the buffer queue implementation.
+ if (BQ == nullptr) {
+ bool Success = false;
+ new (&BufferQueueStorage)
+ BufferQueue(profilingFlags()->per_thread_allocator_max,
+ profilingFlags()->buffers_max, Success);
+ if (!Success) {
+ if (Verbosity())
+ Report("Failed to initialize preallocated memory buffers!");
+ atomic_store(&ProfilerLogStatus,
+ XRayLogInitStatus::XRAY_LOG_UNINITIALIZED,
+ memory_order_release);
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ }
+
+ // If we've succeded, set the global pointer to the initialised storage.
+ BQ = reinterpret_cast<BufferQueue *>(&BufferQueueStorage);
+ } else {
+ BQ->finalize();
+ auto InitStatus = BQ->init(profilingFlags()->per_thread_allocator_max,
+ profilingFlags()->buffers_max);
+
+ if (InitStatus != BufferQueue::ErrorCode::Ok) {
+ if (Verbosity())
+ Report("Failed to initialize preallocated memory buffers; error: %s",
+ BufferQueue::getErrorString(InitStatus));
+ atomic_store(&ProfilerLogStatus,
+ XRayLogInitStatus::XRAY_LOG_UNINITIALIZED,
+ memory_order_release);
+ return XRayLogInitStatus::XRAY_LOG_UNINITIALIZED;
+ }
+
+ DCHECK(!BQ->finalizing());
+ }
+
+ // We need to set up the exit handlers.
+ static pthread_once_t Once = PTHREAD_ONCE_INIT;
+ pthread_once(
+ &Once, +[] {
+ pthread_key_create(
+ &ProfilingKey, +[](void *P) XRAY_NEVER_INSTRUMENT {
+ if (atomic_exchange(&ThreadExitingLatch, 1, memory_order_acq_rel))
+ return;
+
+ if (P == nullptr)
+ return;
+
+ auto T = reinterpret_cast<ProfilingData *>(P);
+ if (atomic_load_relaxed(&T->Allocators) == 0)
+ return;
+
+ {
+ // If we're somehow executing this while inside a
+ // non-reentrant-friendly context, we skip attempting to post
+ // the current thread's data.
+ RecursionGuard G(ReentranceGuard);
+ if (!G)
+ return;
+
+ postCurrentThreadFCT(*T);
+ }
+ });
+
+ // We also need to set up an exit handler, so that we can get the
+ // profile information at exit time. We use the C API to do this, to not
+ // rely on C++ ABI functions for registering exit handlers.
+ Atexit(+[]() XRAY_NEVER_INSTRUMENT {
+ if (atomic_exchange(&ThreadExitingLatch, 1, memory_order_acq_rel))
+ return;
+
+ auto Cleanup =
+ at_scope_exit([]() XRAY_NEVER_INSTRUMENT { cleanupTLD(); });
+
+ // Finalize and flush.
+ if (profilingFinalize() != XRAY_LOG_FINALIZED ||
+ profilingFlush() != XRAY_LOG_FLUSHED)
+ return;
+
+ if (Verbosity())
+ Report("XRay Profile flushed at exit.");
+ });
+ });
+
+ __xray_log_set_buffer_iterator(profileCollectorService::nextBuffer);
+ __xray_set_handler(profilingHandleArg0);
+ __xray_set_handler_arg1(profilingHandleArg1);
+
+ atomic_store(&ProfilerLogStatus, XRayLogInitStatus::XRAY_LOG_INITIALIZED,
+ memory_order_release);
+ if (Verbosity())
+ Report("XRay Profiling init successful.\n");
+
+ return XRayLogInitStatus::XRAY_LOG_INITIALIZED;
+}
+
+bool profilingDynamicInitializer() XRAY_NEVER_INSTRUMENT {
+ // Set up the flag defaults from the static defaults and the
+ // compiler-provided defaults.
+ {
+ SpinMutexLock Lock(&ProfilerOptionsMutex);
+ auto *F = profilingFlags();
+ F->setDefaults();
+ FlagParser ProfilingParser;
+ registerProfilerFlags(&ProfilingParser, F);
+ ProfilingParser.ParseString(profilingCompilerDefinedFlags());
+ }
+
+ XRayLogImpl Impl{
+ profilingLoggingInit,
+ profilingFinalize,
+ profilingHandleArg0,
+ profilingFlush,
+ };
+ auto RegistrationResult = __xray_log_register_mode("xray-profiling", Impl);
+ if (RegistrationResult != XRayLogRegisterStatus::XRAY_REGISTRATION_OK) {
+ if (Verbosity())
+ Report("Cannot register XRay Profiling mode to 'xray-profiling'; error = "
+ "%d\n",
+ RegistrationResult);
+ return false;
+ }
+
+ if (!internal_strcmp(flags()->xray_mode, "xray-profiling"))
+ __xray_log_select_mode("xray_profiling");
+ return true;
+}
+
+} // namespace __xray
+
+static auto UNUSED Unused = __xray::profilingDynamicInitializer();
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.cc (revision 351984)
@@ -0,0 +1,39 @@
+//===-- xray_flags.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// XRay runtime flags.
+//===----------------------------------------------------------------------===//
+
+#include "xray_profiling_flags.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_libc.h"
+#include "xray_defs.h"
+
+namespace __xray {
+
+// Storage for the profiling flags.
+ProfilerFlags xray_profiling_flags_dont_use_directly;
+
+void ProfilerFlags::setDefaults() XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "xray_profiling_flags.inc"
+#undef XRAY_FLAG
+}
+
+void registerProfilerFlags(FlagParser *P,
+ ProfilerFlags *F) XRAY_NEVER_INSTRUMENT {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) \
+ RegisterFlag(P, #Name, Description, &F->Name);
+#include "xray_profiling_flags.inc"
+#undef XRAY_FLAG
+}
+
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.h (revision 351984)
@@ -0,0 +1,38 @@
+//===-- xray_profiling_flags.h ----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// XRay profiling runtime flags.
+//===----------------------------------------------------------------------===//
+
+#ifndef XRAY_PROFILER_FLAGS_H
+#define XRAY_PROFILER_FLAGS_H
+
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __xray {
+
+struct ProfilerFlags {
+#define XRAY_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "xray_profiling_flags.inc"
+#undef XRAY_FLAG
+
+ void setDefaults();
+};
+
+extern ProfilerFlags xray_profiling_flags_dont_use_directly;
+inline ProfilerFlags *profilingFlags() {
+ return &xray_profiling_flags_dont_use_directly;
+}
+void registerProfilerFlags(FlagParser *P, ProfilerFlags *F);
+
+} // namespace __xray
+
+#endif // XRAY_PROFILER_FLAGS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.inc (revision 351984)
@@ -0,0 +1,31 @@
+//===-- xray_profiling_flags.inc --------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// XRay profiling runtime flags.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_FLAG
+#error "Define XRAY_FLAG prior to including this file!"
+#endif
+
+XRAY_FLAG(uptr, per_thread_allocator_max, 16384,
+ "Maximum size of any single per-thread allocator.")
+XRAY_FLAG(uptr, global_allocator_max, 2 << 24,
+ "Maximum size of the global allocator for profile storage.")
+XRAY_FLAG(uptr, stack_allocator_max, 2 << 20,
+ "Maximum size of the traversal stack allocator.")
+XRAY_FLAG(int, grace_period_ms, 1,
+ "Profile collection will wait this much time in milliseconds before "
+ "resetting the global state. This gives a chance to threads to "
+ "notice that the profiler has been finalized and clean up.")
+XRAY_FLAG(bool, no_flush, false,
+ "Set to true if we want the profiling implementation to not write "
+ "out files.")
+XRAY_FLAG(int, buffers_max, 128,
+ "The number of buffers to pre-allocate used by the profiling "
+ "implementation.")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_profiling_flags.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_recursion_guard.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_recursion_guard.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_recursion_guard.h (revision 351984)
@@ -0,0 +1,56 @@
+//===-- xray_recursion_guard.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_XRAY_RECURSION_GUARD_H
+#define XRAY_XRAY_RECURSION_GUARD_H
+
+#include "sanitizer_common/sanitizer_atomic.h"
+
+namespace __xray {
+
+/// The RecursionGuard is useful for guarding against signal handlers which are
+/// also potentially calling XRay-instrumented functions. To use the
+/// RecursionGuard, you'll typically need a thread_local atomic_uint8_t:
+///
+/// thread_local atomic_uint8_t Guard{0};
+///
+/// // In a handler function:
+/// void handleArg0(int32_t F, XRayEntryType T) {
+/// RecursionGuard G(Guard);
+/// if (!G)
+/// return; // Failed to acquire the guard.
+/// ...
+/// }
+///
+class RecursionGuard {
+ atomic_uint8_t &Running;
+ const bool Valid;
+
+public:
+ explicit inline RecursionGuard(atomic_uint8_t &R)
+ : Running(R), Valid(!atomic_exchange(&R, 1, memory_order_acq_rel)) {}
+
+ inline RecursionGuard(const RecursionGuard &) = delete;
+ inline RecursionGuard(RecursionGuard &&) = delete;
+ inline RecursionGuard &operator=(const RecursionGuard &) = delete;
+ inline RecursionGuard &operator=(RecursionGuard &&) = delete;
+
+ explicit inline operator bool() const { return Valid; }
+
+ inline ~RecursionGuard() noexcept {
+ if (Valid)
+ atomic_store(&Running, 0, memory_order_release);
+ }
+};
+
+} // namespace __xray
+
+#endif // XRAY_XRAY_RECURSION_GUARD_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_recursion_guard.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_segmented_array.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_segmented_array.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_segmented_array.h (revision 351984)
@@ -0,0 +1,650 @@
+//===-- xray_segmented_array.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Defines the implementation of a segmented array, with fixed-size segments
+// backing the segments.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_SEGMENTED_ARRAY_H
+#define XRAY_SEGMENTED_ARRAY_H
+
+#include "sanitizer_common/sanitizer_allocator.h"
+#include "xray_allocator.h"
+#include "xray_utils.h"
+#include <cassert>
+#include <type_traits>
+#include <utility>
+
+namespace __xray {
+
+/// The Array type provides an interface similar to std::vector<...> but does
+/// not shrink in size. Once constructed, elements can be appended but cannot be
+/// removed. The implementation is heavily dependent on the contract provided by
+/// the Allocator type, in that all memory will be released when the Allocator
+/// is destroyed. When an Array is destroyed, it will destroy elements in the
+/// backing store but will not free the memory.
+template <class T> class Array {
+ struct Segment {
+ Segment *Prev;
+ Segment *Next;
+ char Data[1];
+ };
+
+public:
+ // Each segment of the array will be laid out with the following assumptions:
+ //
+ // - Each segment will be on a cache-line address boundary (kCacheLineSize
+ // aligned).
+ //
+ // - The elements will be accessed through an aligned pointer, dependent on
+ // the alignment of T.
+ //
+ // - Each element is at least two-pointers worth from the beginning of the
+ // Segment, aligned properly, and the rest of the elements are accessed
+ // through appropriate alignment.
+ //
+ // We then compute the size of the segment to follow this logic:
+ //
+ // - Compute the number of elements that can fit within
+ // kCacheLineSize-multiple segments, minus the size of two pointers.
+ //
+ // - Request cacheline-multiple sized elements from the allocator.
+ static constexpr uint64_t AlignedElementStorageSize =
+ sizeof(typename std::aligned_storage<sizeof(T), alignof(T)>::type);
+
+ static constexpr uint64_t SegmentControlBlockSize = sizeof(Segment *) * 2;
+
+ static constexpr uint64_t SegmentSize = nearest_boundary(
+ SegmentControlBlockSize + next_pow2(sizeof(T)), kCacheLineSize);
+
+ using AllocatorType = Allocator<SegmentSize>;
+
+ static constexpr uint64_t ElementsPerSegment =
+ (SegmentSize - SegmentControlBlockSize) / next_pow2(sizeof(T));
+
+ static_assert(ElementsPerSegment > 0,
+ "Must have at least 1 element per segment.");
+
+ static Segment SentinelSegment;
+
+ using size_type = uint64_t;
+
+private:
+ // This Iterator models a BidirectionalIterator.
+ template <class U> class Iterator {
+ Segment *S = &SentinelSegment;
+ uint64_t Offset = 0;
+ uint64_t Size = 0;
+
+ public:
+ Iterator(Segment *IS, uint64_t Off, uint64_t S) XRAY_NEVER_INSTRUMENT
+ : S(IS),
+ Offset(Off),
+ Size(S) {}
+ Iterator(const Iterator &) NOEXCEPT XRAY_NEVER_INSTRUMENT = default;
+ Iterator() NOEXCEPT XRAY_NEVER_INSTRUMENT = default;
+ Iterator(Iterator &&) NOEXCEPT XRAY_NEVER_INSTRUMENT = default;
+ Iterator &operator=(const Iterator &) XRAY_NEVER_INSTRUMENT = default;
+ Iterator &operator=(Iterator &&) XRAY_NEVER_INSTRUMENT = default;
+ ~Iterator() XRAY_NEVER_INSTRUMENT = default;
+
+ Iterator &operator++() XRAY_NEVER_INSTRUMENT {
+ if (++Offset % ElementsPerSegment || Offset == Size)
+ return *this;
+
+ // At this point, we know that Offset % N == 0, so we must advance the
+ // segment pointer.
+ DCHECK_EQ(Offset % ElementsPerSegment, 0);
+ DCHECK_NE(Offset, Size);
+ DCHECK_NE(S, &SentinelSegment);
+ DCHECK_NE(S->Next, &SentinelSegment);
+ S = S->Next;
+ DCHECK_NE(S, &SentinelSegment);
+ return *this;
+ }
+
+ Iterator &operator--() XRAY_NEVER_INSTRUMENT {
+ DCHECK_NE(S, &SentinelSegment);
+ DCHECK_GT(Offset, 0);
+
+ auto PreviousOffset = Offset--;
+ if (PreviousOffset != Size && PreviousOffset % ElementsPerSegment == 0) {
+ DCHECK_NE(S->Prev, &SentinelSegment);
+ S = S->Prev;
+ }
+
+ return *this;
+ }
+
+ Iterator operator++(int) XRAY_NEVER_INSTRUMENT {
+ Iterator Copy(*this);
+ ++(*this);
+ return Copy;
+ }
+
+ Iterator operator--(int) XRAY_NEVER_INSTRUMENT {
+ Iterator Copy(*this);
+ --(*this);
+ return Copy;
+ }
+
+ template <class V, class W>
+ friend bool operator==(const Iterator<V> &L,
+ const Iterator<W> &R) XRAY_NEVER_INSTRUMENT {
+ return L.S == R.S && L.Offset == R.Offset;
+ }
+
+ template <class V, class W>
+ friend bool operator!=(const Iterator<V> &L,
+ const Iterator<W> &R) XRAY_NEVER_INSTRUMENT {
+ return !(L == R);
+ }
+
+ U &operator*() const XRAY_NEVER_INSTRUMENT {
+ DCHECK_NE(S, &SentinelSegment);
+ auto RelOff = Offset % ElementsPerSegment;
+
+ // We need to compute the character-aligned pointer, offset from the
+ // segment's Data location to get the element in the position of Offset.
+ auto Base = &S->Data;
+ auto AlignedOffset = Base + (RelOff * AlignedElementStorageSize);
+ return *reinterpret_cast<U *>(AlignedOffset);
+ }
+
+ U *operator->() const XRAY_NEVER_INSTRUMENT { return &(**this); }
+ };
+
+ AllocatorType *Alloc;
+ Segment *Head;
+ Segment *Tail;
+
+ // Here we keep track of segments in the freelist, to allow us to re-use
+ // segments when elements are trimmed off the end.
+ Segment *Freelist;
+ uint64_t Size;
+
+ // ===============================
+ // In the following implementation, we work through the algorithms and the
+ // list operations using the following notation:
+ //
+ // - pred(s) is the predecessor (previous node accessor) and succ(s) is
+ // the successor (next node accessor).
+ //
+ // - S is a sentinel segment, which has the following property:
+ //
+ // pred(S) == succ(S) == S
+ //
+ // - @ is a loop operator, which can imply pred(s) == s if it appears on
+ // the left of s, or succ(s) == S if it appears on the right of s.
+ //
+ // - sL <-> sR : means a bidirectional relation between sL and sR, which
+ // means:
+ //
+ // succ(sL) == sR && pred(SR) == sL
+ //
+ // - sL -> sR : implies a unidirectional relation between sL and SR,
+ // with the following properties:
+ //
+ // succ(sL) == sR
+ //
+ // sL <- sR : implies a unidirectional relation between sR and sL,
+ // with the following properties:
+ //
+ // pred(sR) == sL
+ //
+ // ===============================
+
+ Segment *NewSegment() XRAY_NEVER_INSTRUMENT {
+ // We need to handle the case in which enough elements have been trimmed to
+ // allow us to re-use segments we've allocated before. For this we look into
+ // the Freelist, to see whether we need to actually allocate new blocks or
+ // just re-use blocks we've already seen before.
+ if (Freelist != &SentinelSegment) {
+ // The current state of lists resemble something like this at this point:
+ //
+ // Freelist: @S@<-f0->...<->fN->@S@
+ // ^ Freelist
+ //
+ // We want to perform a splice of `f0` from Freelist to a temporary list,
+ // which looks like:
+ //
+ // Templist: @S@<-f0->@S@
+ // ^ FreeSegment
+ //
+ // Our algorithm preconditions are:
+ DCHECK_EQ(Freelist->Prev, &SentinelSegment);
+
+ // Then the algorithm we implement is:
+ //
+ // SFS = Freelist
+ // Freelist = succ(Freelist)
+ // if (Freelist != S)
+ // pred(Freelist) = S
+ // succ(SFS) = S
+ // pred(SFS) = S
+ //
+ auto *FreeSegment = Freelist;
+ Freelist = Freelist->Next;
+
+ // Note that we need to handle the case where Freelist is now pointing to
+ // S, which we don't want to be overwriting.
+ // TODO: Determine whether the cost of the branch is higher than the cost
+ // of the blind assignment.
+ if (Freelist != &SentinelSegment)
+ Freelist->Prev = &SentinelSegment;
+
+ FreeSegment->Next = &SentinelSegment;
+ FreeSegment->Prev = &SentinelSegment;
+
+ // Our postconditions are:
+ DCHECK_EQ(Freelist->Prev, &SentinelSegment);
+ DCHECK_NE(FreeSegment, &SentinelSegment);
+ return FreeSegment;
+ }
+
+ auto SegmentBlock = Alloc->Allocate();
+ if (SegmentBlock.Data == nullptr)
+ return nullptr;
+
+ // Placement-new the Segment element at the beginning of the SegmentBlock.
+ new (SegmentBlock.Data) Segment{&SentinelSegment, &SentinelSegment, {0}};
+ auto SB = reinterpret_cast<Segment *>(SegmentBlock.Data);
+ return SB;
+ }
+
+ Segment *InitHeadAndTail() XRAY_NEVER_INSTRUMENT {
+ DCHECK_EQ(Head, &SentinelSegment);
+ DCHECK_EQ(Tail, &SentinelSegment);
+ auto S = NewSegment();
+ if (S == nullptr)
+ return nullptr;
+ DCHECK_EQ(S->Next, &SentinelSegment);
+ DCHECK_EQ(S->Prev, &SentinelSegment);
+ DCHECK_NE(S, &SentinelSegment);
+ Head = S;
+ Tail = S;
+ DCHECK_EQ(Head, Tail);
+ DCHECK_EQ(Tail->Next, &SentinelSegment);
+ DCHECK_EQ(Tail->Prev, &SentinelSegment);
+ return S;
+ }
+
+ Segment *AppendNewSegment() XRAY_NEVER_INSTRUMENT {
+ auto S = NewSegment();
+ if (S == nullptr)
+ return nullptr;
+ DCHECK_NE(Tail, &SentinelSegment);
+ DCHECK_EQ(Tail->Next, &SentinelSegment);
+ DCHECK_EQ(S->Prev, &SentinelSegment);
+ DCHECK_EQ(S->Next, &SentinelSegment);
+ S->Prev = Tail;
+ Tail->Next = S;
+ Tail = S;
+ DCHECK_EQ(S, S->Prev->Next);
+ DCHECK_EQ(Tail->Next, &SentinelSegment);
+ return S;
+ }
+
+public:
+ explicit Array(AllocatorType &A) XRAY_NEVER_INSTRUMENT
+ : Alloc(&A),
+ Head(&SentinelSegment),
+ Tail(&SentinelSegment),
+ Freelist(&SentinelSegment),
+ Size(0) {}
+
+ Array() XRAY_NEVER_INSTRUMENT : Alloc(nullptr),
+ Head(&SentinelSegment),
+ Tail(&SentinelSegment),
+ Freelist(&SentinelSegment),
+ Size(0) {}
+
+ Array(const Array &) = delete;
+ Array &operator=(const Array &) = delete;
+
+ Array(Array &&O) XRAY_NEVER_INSTRUMENT : Alloc(O.Alloc),
+ Head(O.Head),
+ Tail(O.Tail),
+ Freelist(O.Freelist),
+ Size(O.Size) {
+ O.Alloc = nullptr;
+ O.Head = &SentinelSegment;
+ O.Tail = &SentinelSegment;
+ O.Size = 0;
+ O.Freelist = &SentinelSegment;
+ }
+
+ Array &operator=(Array &&O) XRAY_NEVER_INSTRUMENT {
+ Alloc = O.Alloc;
+ O.Alloc = nullptr;
+ Head = O.Head;
+ O.Head = &SentinelSegment;
+ Tail = O.Tail;
+ O.Tail = &SentinelSegment;
+ Freelist = O.Freelist;
+ O.Freelist = &SentinelSegment;
+ Size = O.Size;
+ O.Size = 0;
+ return *this;
+ }
+
+ ~Array() XRAY_NEVER_INSTRUMENT {
+ for (auto &E : *this)
+ (&E)->~T();
+ }
+
+ bool empty() const XRAY_NEVER_INSTRUMENT { return Size == 0; }
+
+ AllocatorType &allocator() const XRAY_NEVER_INSTRUMENT {
+ DCHECK_NE(Alloc, nullptr);
+ return *Alloc;
+ }
+
+ uint64_t size() const XRAY_NEVER_INSTRUMENT { return Size; }
+
+ template <class... Args>
+ T *AppendEmplace(Args &&... args) XRAY_NEVER_INSTRUMENT {
+ DCHECK((Size == 0 && Head == &SentinelSegment && Head == Tail) ||
+ (Size != 0 && Head != &SentinelSegment && Tail != &SentinelSegment));
+ if (UNLIKELY(Head == &SentinelSegment)) {
+ auto R = InitHeadAndTail();
+ if (R == nullptr)
+ return nullptr;
+ }
+
+ DCHECK_NE(Head, &SentinelSegment);
+ DCHECK_NE(Tail, &SentinelSegment);
+
+ auto Offset = Size % ElementsPerSegment;
+ if (UNLIKELY(Size != 0 && Offset == 0))
+ if (AppendNewSegment() == nullptr)
+ return nullptr;
+
+ DCHECK_NE(Tail, &SentinelSegment);
+ auto Base = &Tail->Data;
+ auto AlignedOffset = Base + (Offset * AlignedElementStorageSize);
+ DCHECK_LE(AlignedOffset + sizeof(T),
+ reinterpret_cast<unsigned char *>(Base) + SegmentSize);
+
+ // In-place construct at Position.
+ new (AlignedOffset) T{std::forward<Args>(args)...};
+ ++Size;
+ return reinterpret_cast<T *>(AlignedOffset);
+ }
+
+ T *Append(const T &E) XRAY_NEVER_INSTRUMENT {
+ // FIXME: This is a duplication of AppenEmplace with the copy semantics
+ // explicitly used, as a work-around to GCC 4.8 not invoking the copy
+ // constructor with the placement new with braced-init syntax.
+ DCHECK((Size == 0 && Head == &SentinelSegment && Head == Tail) ||
+ (Size != 0 && Head != &SentinelSegment && Tail != &SentinelSegment));
+ if (UNLIKELY(Head == &SentinelSegment)) {
+ auto R = InitHeadAndTail();
+ if (R == nullptr)
+ return nullptr;
+ }
+
+ DCHECK_NE(Head, &SentinelSegment);
+ DCHECK_NE(Tail, &SentinelSegment);
+
+ auto Offset = Size % ElementsPerSegment;
+ if (UNLIKELY(Size != 0 && Offset == 0))
+ if (AppendNewSegment() == nullptr)
+ return nullptr;
+
+ DCHECK_NE(Tail, &SentinelSegment);
+ auto Base = &Tail->Data;
+ auto AlignedOffset = Base + (Offset * AlignedElementStorageSize);
+ DCHECK_LE(AlignedOffset + sizeof(T),
+ reinterpret_cast<unsigned char *>(Tail) + SegmentSize);
+
+ // In-place construct at Position.
+ new (AlignedOffset) T(E);
+ ++Size;
+ return reinterpret_cast<T *>(AlignedOffset);
+ }
+
+ T &operator[](uint64_t Offset) const XRAY_NEVER_INSTRUMENT {
+ DCHECK_LE(Offset, Size);
+ // We need to traverse the array enough times to find the element at Offset.
+ auto S = Head;
+ while (Offset >= ElementsPerSegment) {
+ S = S->Next;
+ Offset -= ElementsPerSegment;
+ DCHECK_NE(S, &SentinelSegment);
+ }
+ auto Base = &S->Data;
+ auto AlignedOffset = Base + (Offset * AlignedElementStorageSize);
+ auto Position = reinterpret_cast<T *>(AlignedOffset);
+ return *reinterpret_cast<T *>(Position);
+ }
+
+ T &front() const XRAY_NEVER_INSTRUMENT {
+ DCHECK_NE(Head, &SentinelSegment);
+ DCHECK_NE(Size, 0u);
+ return *begin();
+ }
+
+ T &back() const XRAY_NEVER_INSTRUMENT {
+ DCHECK_NE(Tail, &SentinelSegment);
+ DCHECK_NE(Size, 0u);
+ auto It = end();
+ --It;
+ return *It;
+ }
+
+ template <class Predicate>
+ T *find_element(Predicate P) const XRAY_NEVER_INSTRUMENT {
+ if (empty())
+ return nullptr;
+
+ auto E = end();
+ for (auto I = begin(); I != E; ++I)
+ if (P(*I))
+ return &(*I);
+
+ return nullptr;
+ }
+
+ /// Remove N Elements from the end. This leaves the blocks behind, and not
+ /// require allocation of new blocks for new elements added after trimming.
+ void trim(uint64_t Elements) XRAY_NEVER_INSTRUMENT {
+ auto OldSize = Size;
+ Elements = Elements > Size ? Size : Elements;
+ Size -= Elements;
+
+ // We compute the number of segments we're going to return from the tail by
+ // counting how many elements have been trimmed. Given the following:
+ //
+ // - Each segment has N valid positions, where N > 0
+ // - The previous size > current size
+ //
+ // To compute the number of segments to return, we need to perform the
+ // following calculations for the number of segments required given 'x'
+ // elements:
+ //
+ // f(x) = {
+ // x == 0 : 0
+ // , 0 < x <= N : 1
+ // , N < x <= max : x / N + (x % N ? 1 : 0)
+ // }
+ //
+ // We can simplify this down to:
+ //
+ // f(x) = {
+ // x == 0 : 0,
+ // , 0 < x <= max : x / N + (x < N || x % N ? 1 : 0)
+ // }
+ //
+ // And further down to:
+ //
+ // f(x) = x ? x / N + (x < N || x % N ? 1 : 0) : 0
+ //
+ // We can then perform the following calculation `s` which counts the number
+ // of segments we need to remove from the end of the data structure:
+ //
+ // s(p, c) = f(p) - f(c)
+ //
+ // If we treat p = previous size, and c = current size, and given the
+ // properties above, the possible range for s(...) is [0..max(typeof(p))/N]
+ // given that typeof(p) == typeof(c).
+ auto F = [](uint64_t X) {
+ return X ? (X / ElementsPerSegment) +
+ (X < ElementsPerSegment || X % ElementsPerSegment ? 1 : 0)
+ : 0;
+ };
+ auto PS = F(OldSize);
+ auto CS = F(Size);
+ DCHECK_GE(PS, CS);
+ auto SegmentsToTrim = PS - CS;
+ for (auto I = 0uL; I < SegmentsToTrim; ++I) {
+ // Here we place the current tail segment to the freelist. To do this
+ // appropriately, we need to perform a splice operation on two
+ // bidirectional linked-lists. In particular, we have the current state of
+ // the doubly-linked list of segments:
+ //
+ // @S@ <- s0 <-> s1 <-> ... <-> sT -> @S@
+ //
+ DCHECK_NE(Head, &SentinelSegment);
+ DCHECK_NE(Tail, &SentinelSegment);
+ DCHECK_EQ(Tail->Next, &SentinelSegment);
+
+ if (Freelist == &SentinelSegment) {
+ // Our two lists at this point are in this configuration:
+ //
+ // Freelist: (potentially) @S@
+ // Mainlist: @S@<-s0<->s1<->...<->sPT<->sT->@S@
+ // ^ Head ^ Tail
+ //
+ // The end state for us will be this configuration:
+ //
+ // Freelist: @S@<-sT->@S@
+ // Mainlist: @S@<-s0<->s1<->...<->sPT->@S@
+ // ^ Head ^ Tail
+ //
+ // The first step for us is to hold a reference to the tail of Mainlist,
+ // which in our notation is represented by sT. We call this our "free
+ // segment" which is the segment we are placing on the Freelist.
+ //
+ // sF = sT
+ //
+ // Then, we also hold a reference to the "pre-tail" element, which we
+ // call sPT:
+ //
+ // sPT = pred(sT)
+ //
+ // We want to splice sT into the beginning of the Freelist, which in
+ // an empty Freelist means placing a segment whose predecessor and
+ // successor is the sentinel segment.
+ //
+ // The splice operation then can be performed in the following
+ // algorithm:
+ //
+ // succ(sPT) = S
+ // pred(sT) = S
+ // succ(sT) = Freelist
+ // Freelist = sT
+ // Tail = sPT
+ //
+ auto SPT = Tail->Prev;
+ SPT->Next = &SentinelSegment;
+ Tail->Prev = &SentinelSegment;
+ Tail->Next = Freelist;
+ Freelist = Tail;
+ Tail = SPT;
+
+ // Our post-conditions here are:
+ DCHECK_EQ(Tail->Next, &SentinelSegment);
+ DCHECK_EQ(Freelist->Prev, &SentinelSegment);
+ } else {
+ // In the other case, where the Freelist is not empty, we perform the
+ // following transformation instead:
+ //
+ // This transforms the current state:
+ //
+ // Freelist: @S@<-f0->@S@
+ // ^ Freelist
+ // Mainlist: @S@<-s0<->s1<->...<->sPT<->sT->@S@
+ // ^ Head ^ Tail
+ //
+ // Into the following:
+ //
+ // Freelist: @S@<-sT<->f0->@S@
+ // ^ Freelist
+ // Mainlist: @S@<-s0<->s1<->...<->sPT->@S@
+ // ^ Head ^ Tail
+ //
+ // The algorithm is:
+ //
+ // sFH = Freelist
+ // sPT = pred(sT)
+ // pred(SFH) = sT
+ // succ(sT) = Freelist
+ // pred(sT) = S
+ // succ(sPT) = S
+ // Tail = sPT
+ // Freelist = sT
+ //
+ auto SFH = Freelist;
+ auto SPT = Tail->Prev;
+ auto ST = Tail;
+ SFH->Prev = ST;
+ ST->Next = Freelist;
+ ST->Prev = &SentinelSegment;
+ SPT->Next = &SentinelSegment;
+ Tail = SPT;
+ Freelist = ST;
+
+ // Our post-conditions here are:
+ DCHECK_EQ(Tail->Next, &SentinelSegment);
+ DCHECK_EQ(Freelist->Prev, &SentinelSegment);
+ DCHECK_EQ(Freelist->Next->Prev, Freelist);
+ }
+ }
+
+ // Now in case we've spliced all the segments in the end, we ensure that the
+ // main list is "empty", or both the head and tail pointing to the sentinel
+ // segment.
+ if (Tail == &SentinelSegment)
+ Head = Tail;
+
+ DCHECK(
+ (Size == 0 && Head == &SentinelSegment && Tail == &SentinelSegment) ||
+ (Size != 0 && Head != &SentinelSegment && Tail != &SentinelSegment));
+ DCHECK(
+ (Freelist != &SentinelSegment && Freelist->Prev == &SentinelSegment) ||
+ (Freelist == &SentinelSegment && Tail->Next == &SentinelSegment));
+ }
+
+ // Provide iterators.
+ Iterator<T> begin() const XRAY_NEVER_INSTRUMENT {
+ return Iterator<T>(Head, 0, Size);
+ }
+ Iterator<T> end() const XRAY_NEVER_INSTRUMENT {
+ return Iterator<T>(Tail, Size, Size);
+ }
+ Iterator<const T> cbegin() const XRAY_NEVER_INSTRUMENT {
+ return Iterator<const T>(Head, 0, Size);
+ }
+ Iterator<const T> cend() const XRAY_NEVER_INSTRUMENT {
+ return Iterator<const T>(Tail, Size, Size);
+ }
+};
+
+// We need to have this storage definition out-of-line so that the compiler can
+// ensure that storage for the SentinelSegment is defined and has a single
+// address.
+template <class T>
+typename Array<T>::Segment Array<T>::SentinelSegment{
+ &Array<T>::SentinelSegment, &Array<T>::SentinelSegment, {'\0'}};
+
+} // namespace __xray
+
+#endif // XRAY_SEGMENTED_ARRAY_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_segmented_array.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_mips.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_mips.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_mips.S (revision 351984)
@@ -0,0 +1,109 @@
+//===-- xray_trampoline_mips.s ----------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the MIPS-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .file "xray_trampoline_mips.S"
+ .globl __xray_FunctionEntry
+ .p2align 2
+ .type __xray_FunctionEntry,@function
+__xray_FunctionEntry:
+ .cfi_startproc
+ .set noreorder
+ .cpload $t9
+ .set reorder
+ // Save argument registers before doing any actual work
+ .cfi_def_cfa_offset 36
+ addiu $sp, $sp, -36
+ sw $ra, 32($sp)
+ .cfi_offset 31, -4
+ sw $a3, 28($sp)
+ sw $a2, 24($sp)
+ sw $a1, 20($sp)
+ sw $a0, 16($sp)
+ sdc1 $f14, 8($sp)
+ sdc1 $f12, 0($sp)
+
+ la $t9, _ZN6__xray19XRayPatchedFunctionE
+ lw $t9, 0($t9)
+
+ beqz $t9, FunctionEntry_restore
+
+ // a1=0 means that we are tracing an entry event
+ move $a1, $zero
+ // Function ID is in t0 (the first parameter).
+ move $a0, $t0
+ jalr $t9
+
+FunctionEntry_restore:
+ // Restore argument registers
+ ldc1 $f12, 0($sp)
+ ldc1 $f14, 8($sp)
+ lw $a0, 16($sp)
+ lw $a1, 20($sp)
+ lw $a2, 24($sp)
+ lw $a3, 28($sp)
+ lw $ra, 32($sp)
+ addiu $sp, $sp, 36
+ jr $ra
+FunctionEntry_end:
+ .size __xray_FunctionEntry, FunctionEntry_end-__xray_FunctionEntry
+ .cfi_endproc
+
+ .text
+ .globl __xray_FunctionExit
+ .p2align 2
+ .type __xray_FunctionExit,@function
+__xray_FunctionExit:
+ .cfi_startproc
+ .set noreorder
+ .cpload $t9
+ .set reorder
+ // Save return registers before doing any actual work.
+ .cfi_def_cfa_offset 36
+ addiu $sp, $sp, -36
+ sw $ra, 32($sp)
+ .cfi_offset 31, -4
+ sw $a1, 28($sp)
+ sw $a0, 24($sp)
+ sw $v1, 20($sp)
+ sw $v0, 16($sp)
+ sdc1 $f2, 8($sp)
+ sdc1 $f0, 0($sp)
+
+ la $t9, _ZN6__xray19XRayPatchedFunctionE
+ lw $t9, 0($t9)
+
+ beqz $t9, FunctionExit_restore
+
+ // a1=1 means that we are tracing an exit event
+ li $a1, 1
+ // Function ID is in t0 (the first parameter).
+ move $a0, $t0
+ jalr $t9
+
+FunctionExit_restore:
+ // Restore return registers
+ ldc1 $f0, 0($sp)
+ ldc1 $f2, 8($sp)
+ lw $v0, 16($sp)
+ lw $v1, 20($sp)
+ lw $a0, 24($sp)
+ lw $a1, 28($sp)
+ lw $ra, 32($sp)
+ addiu $sp, $sp, 36
+ jr $ra
+
+FunctionExit_end:
+ .size __xray_FunctionExit, FunctionExit_end-__xray_FunctionExit
+ .cfi_endproc
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_mips.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_mips64.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_mips64.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_mips64.S (revision 351984)
@@ -0,0 +1,135 @@
+//===-- xray_trampoline_mips64.s --------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the MIPS64-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+ .text
+ .file "xray_trampoline_mips64.S"
+ .globl __xray_FunctionEntry
+ .p2align 2
+ .type __xray_FunctionEntry,@function
+__xray_FunctionEntry:
+ .cfi_startproc
+ // Save argument registers before doing any actual work.
+ .cfi_def_cfa_offset 144
+ daddiu $sp, $sp, -144
+ sd $ra, 136($sp)
+ .cfi_offset 31, -8
+ sd $gp, 128($sp)
+ sd $a7, 120($sp)
+ sd $a6, 112($sp)
+ sd $a5, 104($sp)
+ sd $a4, 96($sp)
+ sd $a3, 88($sp)
+ sd $a2, 80($sp)
+ sd $a1, 72($sp)
+ sd $a0, 64($sp)
+ sdc1 $f19, 56($sp)
+ sdc1 $f18, 48($sp)
+ sdc1 $f17, 40($sp)
+ sdc1 $f16, 32($sp)
+ sdc1 $f15, 24($sp)
+ sdc1 $f14, 16($sp)
+ sdc1 $f13, 8($sp)
+ sdc1 $f12, 0($sp)
+
+ lui $gp, %hi(%neg(%gp_rel(__xray_FunctionEntry)))
+ daddu $gp, $gp, $t9
+ daddiu $gp ,$gp, %lo(%neg(%gp_rel(__xray_FunctionEntry)))
+
+ dla $t9, _ZN6__xray19XRayPatchedFunctionE
+ ld $t9, 0($t9)
+
+ beqz $t9, FunctionEntry_restore
+
+ // a1=0 means that we are tracing an entry event
+ move $a1, $zero
+ // Function ID is in t0 (the first parameter).
+ move $a0, $t0
+ jalr $t9
+
+FunctionEntry_restore:
+ // Restore argument registers
+ ldc1 $f12, 0($sp)
+ ldc1 $f13, 8($sp)
+ ldc1 $f14, 16($sp)
+ ldc1 $f15, 24($sp)
+ ldc1 $f16, 32($sp)
+ ldc1 $f17, 40($sp)
+ ldc1 $f18, 48($sp)
+ ldc1 $f19, 56($sp)
+ ld $a0, 64($sp)
+ ld $a1, 72($sp)
+ ld $a2, 80($sp)
+ ld $a3, 88($sp)
+ ld $a4, 96($sp)
+ ld $a5, 104($sp)
+ ld $a6, 112($sp)
+ ld $a7, 120($sp)
+ ld $gp, 128($sp)
+ ld $ra, 136($sp)
+ daddiu $sp, $sp, 144
+ jr $ra
+FunctionEntry_end:
+ .size __xray_FunctionEntry, FunctionEntry_end-__xray_FunctionEntry
+ .cfi_endproc
+
+ .text
+ .globl __xray_FunctionExit
+ .p2align 2
+ .type __xray_FunctionExit,@function
+__xray_FunctionExit:
+ .cfi_startproc
+ // Save return registers before doing any actual work.
+ .cfi_def_cfa_offset 64
+ daddiu $sp, $sp, -64
+ sd $ra, 56($sp)
+ .cfi_offset 31, -8
+ sd $gp, 48($sp)
+ sd $a0, 40($sp)
+ sd $v1, 32($sp)
+ sd $v0, 24($sp)
+ sdc1 $f2, 16($sp)
+ sdc1 $f1, 8($sp)
+ sdc1 $f0, 0($sp)
+
+ lui $gp, %hi(%neg(%gp_rel(__xray_FunctionExit)))
+ daddu $gp, $gp, $t9
+ daddiu $gp ,$gp, %lo(%neg(%gp_rel(__xray_FunctionExit)))
+
+ dla $t9, _ZN6__xray19XRayPatchedFunctionE
+ ld $t9, 0($t9)
+
+ beqz $t9, FunctionExit_restore
+
+ // a1=1 means that we are tracing an exit event
+ li $a1, 1
+ // Function ID is in t0 (the first parameter).
+ move $a0, $t0
+ jalr $t9
+
+FunctionExit_restore:
+ // Restore return registers
+ ldc1 $f0, 0($sp)
+ ldc1 $f1, 8($sp)
+ ldc1 $f2, 16($sp)
+ ld $v0, 24($sp)
+ ld $v1, 32($sp)
+ ld $a0, 40($sp)
+ ld $gp, 48($sp)
+ ld $ra, 56($sp)
+ daddiu $sp, $sp, 64
+ jr $ra
+
+FunctionExit_end:
+ .size __xray_FunctionExit, FunctionExit_end-__xray_FunctionExit
+ .cfi_endproc
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_mips64.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_x86_64.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_x86_64.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_x86_64.S (revision 351984)
@@ -0,0 +1,283 @@
+//===-- xray_trampoline_x86.s -----------------------------------*- ASM -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This implements the X86-specific assembler for the trampolines.
+//
+//===----------------------------------------------------------------------===//
+
+#include "../builtins/assembly.h"
+#include "../sanitizer_common/sanitizer_asm.h"
+
+
+
+.macro SAVE_REGISTERS
+ pushfq
+ subq $240, %rsp
+ CFI_DEF_CFA_OFFSET(248)
+ movq %rbp, 232(%rsp)
+ movupd %xmm0, 216(%rsp)
+ movupd %xmm1, 200(%rsp)
+ movupd %xmm2, 184(%rsp)
+ movupd %xmm3, 168(%rsp)
+ movupd %xmm4, 152(%rsp)
+ movupd %xmm5, 136(%rsp)
+ movupd %xmm6, 120(%rsp)
+ movupd %xmm7, 104(%rsp)
+ movq %rdi, 96(%rsp)
+ movq %rax, 88(%rsp)
+ movq %rdx, 80(%rsp)
+ movq %rsi, 72(%rsp)
+ movq %rcx, 64(%rsp)
+ movq %r8, 56(%rsp)
+ movq %r9, 48(%rsp)
+ movq %r10, 40(%rsp)
+ movq %r11, 32(%rsp)
+ movq %r12, 24(%rsp)
+ movq %r13, 16(%rsp)
+ movq %r14, 8(%rsp)
+ movq %r15, 0(%rsp)
+.endm
+
+.macro RESTORE_REGISTERS
+ movq 232(%rsp), %rbp
+ movupd 216(%rsp), %xmm0
+ movupd 200(%rsp), %xmm1
+ movupd 184(%rsp), %xmm2
+ movupd 168(%rsp), %xmm3
+ movupd 152(%rsp), %xmm4
+ movupd 136(%rsp), %xmm5
+ movupd 120(%rsp) , %xmm6
+ movupd 104(%rsp) , %xmm7
+ movq 96(%rsp), %rdi
+ movq 88(%rsp), %rax
+ movq 80(%rsp), %rdx
+ movq 72(%rsp), %rsi
+ movq 64(%rsp), %rcx
+ movq 56(%rsp), %r8
+ movq 48(%rsp), %r9
+ movq 40(%rsp), %r10
+ movq 32(%rsp), %r11
+ movq 24(%rsp), %r12
+ movq 16(%rsp), %r13
+ movq 8(%rsp), %r14
+ movq 0(%rsp), %r15
+ addq $240, %rsp
+ popfq
+ CFI_DEF_CFA_OFFSET(8)
+.endm
+
+.macro ALIGNED_CALL_RAX
+ // Call the logging handler, after aligning the stack to a 16-byte boundary.
+ // The approach we're taking here uses additional stack space to stash the
+ // stack pointer twice before aligning the pointer to 16-bytes. If the stack
+ // was 8-byte aligned, it will become 16-byte aligned -- when restoring the
+ // pointer, we can always look -8 bytes from the current position to get
+ // either of the values we've stashed in the first place.
+ pushq %rsp
+ pushq (%rsp)
+ andq $-0x10, %rsp
+ callq *%rax
+ movq 8(%rsp), %rsp
+.endm
+
+ .text
+#if !defined(__APPLE__)
+ .section .text
+ .file "xray_trampoline_x86.S"
+#else
+ .section __TEXT,__text
+#endif
+
+//===----------------------------------------------------------------------===//
+
+ .globl ASM_SYMBOL(__xray_FunctionEntry)
+ .align 16, 0x90
+ ASM_TYPE_FUNCTION(__xray_FunctionEntry)
+# LLVM-MCA-BEGIN __xray_FunctionEntry
+ASM_SYMBOL(__xray_FunctionEntry):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ // This load has to be atomic, it's concurrent with __xray_patch().
+ // On x86/amd64, a simple (type-aligned) MOV instruction is enough.
+ movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
+ testq %rax, %rax
+ je .Ltmp0
+
+ // The patched function prologue puts its xray_instr_map index into %r10d.
+ movl %r10d, %edi
+ xor %esi,%esi
+ ALIGNED_CALL_RAX
+
+.Ltmp0:
+ RESTORE_REGISTERS
+ retq
+# LLVM-MCA-END
+ ASM_SIZE(__xray_FunctionEntry)
+ CFI_ENDPROC
+
+//===----------------------------------------------------------------------===//
+
+ .globl ASM_SYMBOL(__xray_FunctionExit)
+ .align 16, 0x90
+ ASM_TYPE_FUNCTION(__xray_FunctionExit)
+# LLVM-MCA-BEGIN __xray_FunctionExit
+ASM_SYMBOL(__xray_FunctionExit):
+ CFI_STARTPROC
+ // Save the important registers first. Since we're assuming that this
+ // function is only jumped into, we only preserve the registers for
+ // returning.
+ subq $56, %rsp
+ CFI_DEF_CFA_OFFSET(64)
+ movq %rbp, 48(%rsp)
+ movupd %xmm0, 32(%rsp)
+ movupd %xmm1, 16(%rsp)
+ movq %rax, 8(%rsp)
+ movq %rdx, 0(%rsp)
+ movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
+ testq %rax,%rax
+ je .Ltmp2
+
+ movl %r10d, %edi
+ movl $1, %esi
+ ALIGNED_CALL_RAX
+
+.Ltmp2:
+ // Restore the important registers.
+ movq 48(%rsp), %rbp
+ movupd 32(%rsp), %xmm0
+ movupd 16(%rsp), %xmm1
+ movq 8(%rsp), %rax
+ movq 0(%rsp), %rdx
+ addq $56, %rsp
+ CFI_DEF_CFA_OFFSET(8)
+ retq
+# LLVM-MCA-END
+ ASM_SIZE(__xray_FunctionExit)
+ CFI_ENDPROC
+
+//===----------------------------------------------------------------------===//
+
+ .globl ASM_SYMBOL(__xray_FunctionTailExit)
+ .align 16, 0x90
+ ASM_TYPE_FUNCTION(__xray_FunctionTailExit)
+# LLVM-MCA-BEGIN __xray_FunctionTailExit
+ASM_SYMBOL(__xray_FunctionTailExit):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
+ testq %rax,%rax
+ je .Ltmp4
+
+ movl %r10d, %edi
+ movl $2, %esi
+
+ ALIGNED_CALL_RAX
+
+.Ltmp4:
+ RESTORE_REGISTERS
+ retq
+# LLVM-MCA-END
+ ASM_SIZE(__xray_FunctionTailExit)
+ CFI_ENDPROC
+
+//===----------------------------------------------------------------------===//
+
+ .globl ASM_SYMBOL(__xray_ArgLoggerEntry)
+ .align 16, 0x90
+ ASM_TYPE_FUNCTION(__xray_ArgLoggerEntry)
+# LLVM-MCA-BEGIN __xray_ArgLoggerEntry
+ASM_SYMBOL(__xray_ArgLoggerEntry):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ // Again, these function pointer loads must be atomic; MOV is fine.
+ movq ASM_SYMBOL(_ZN6__xray13XRayArgLoggerE)(%rip), %rax
+ testq %rax, %rax
+ jne .Larg1entryLog
+
+ // If [arg1 logging handler] not set, defer to no-arg logging.
+ movq ASM_SYMBOL(_ZN6__xray19XRayPatchedFunctionE)(%rip), %rax
+ testq %rax, %rax
+ je .Larg1entryFail
+
+.Larg1entryLog:
+
+ // First argument will become the third
+ movq %rdi, %rdx
+
+ // XRayEntryType::LOG_ARGS_ENTRY into the second
+ mov $0x3, %esi
+
+ // 32-bit function ID becomes the first
+ movl %r10d, %edi
+ ALIGNED_CALL_RAX
+
+.Larg1entryFail:
+ RESTORE_REGISTERS
+ retq
+# LLVM-MCA-END
+ ASM_SIZE(__xray_ArgLoggerEntry)
+ CFI_ENDPROC
+
+//===----------------------------------------------------------------------===//
+
+ .global ASM_SYMBOL(__xray_CustomEvent)
+ .align 16, 0x90
+ ASM_TYPE_FUNCTION(__xray_CustomEvent)
+# LLVM-MCA-BEGIN __xray_CustomEvent
+ASM_SYMBOL(__xray_CustomEvent):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ // We take two arguments to this trampoline, which should be in rdi and rsi
+ // already.
+ movq ASM_SYMBOL(_ZN6__xray22XRayPatchedCustomEventE)(%rip), %rax
+ testq %rax,%rax
+ je .LcustomEventCleanup
+
+ ALIGNED_CALL_RAX
+
+.LcustomEventCleanup:
+ RESTORE_REGISTERS
+ retq
+# LLVM-MCA-END
+ ASM_SIZE(__xray_CustomEvent)
+ CFI_ENDPROC
+
+//===----------------------------------------------------------------------===//
+
+ .global ASM_SYMBOL(__xray_TypedEvent)
+ .align 16, 0x90
+ ASM_TYPE_FUNCTION(__xray_TypedEvent)
+# LLVM-MCA-BEGIN __xray_TypedEvent
+ASM_SYMBOL(__xray_TypedEvent):
+ CFI_STARTPROC
+ SAVE_REGISTERS
+
+ // We pass three arguments to this trampoline, which should be in rdi, rsi
+ // and rdx without our intervention.
+ movq ASM_SYMBOL(_ZN6__xray21XRayPatchedTypedEventE)(%rip), %rax
+ testq %rax,%rax
+ je .LtypedEventCleanup
+
+ ALIGNED_CALL_RAX
+
+.LtypedEventCleanup:
+ RESTORE_REGISTERS
+ retq
+# LLVM-MCA-END
+ ASM_SIZE(__xray_TypedEvent)
+ CFI_ENDPROC
+
+//===----------------------------------------------------------------------===//
+
+NO_EXEC_STACK_DIRECTIVE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_x86_64.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_tsc.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_tsc.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_tsc.h (revision 351984)
@@ -0,0 +1,90 @@
+//===-- xray_tsc.h ----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_EMULATE_TSC_H
+#define XRAY_EMULATE_TSC_H
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __xray {
+static constexpr uint64_t NanosecondsPerSecond = 1000ULL * 1000 * 1000;
+}
+
+#if SANITIZER_FUCHSIA
+#include <zircon/syscalls.h>
+
+namespace __xray {
+
+inline bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
+
+ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ CPU = 0;
+ return _zx_ticks_get();
+}
+
+inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
+ return _zx_ticks_per_second();
+}
+
+} // namespace __xray
+
+#else // SANITIZER_FUCHSIA
+
+#if defined(__x86_64__)
+#include "xray_x86_64.inc"
+#elif defined(__powerpc64__)
+#include "xray_powerpc64.inc"
+#elif defined(__arm__) || defined(__aarch64__) || defined(__mips__)
+// Emulated TSC.
+// There is no instruction like RDTSCP in user mode on ARM. ARM's CP15 does
+// not have a constant frequency like TSC on x86(_64), it may go faster
+// or slower depending on CPU turbo or power saving mode. Furthermore,
+// to read from CP15 on ARM a kernel modification or a driver is needed.
+// We can not require this from users of compiler-rt.
+// So on ARM we use clock_gettime() which gives the result in nanoseconds.
+// To get the measurements per second, we scale this by the number of
+// nanoseconds per second, pretending that the TSC frequency is 1GHz and
+// one TSC tick is 1 nanosecond.
+#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "xray_defs.h"
+#include <cerrno>
+#include <cstdint>
+#include <time.h>
+
+namespace __xray {
+
+inline bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT { return true; }
+
+ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ timespec TS;
+ int result = clock_gettime(CLOCK_REALTIME, &TS);
+ if (result != 0) {
+ Report("clock_gettime(2) returned %d, errno=%d.", result, int(errno));
+ TS.tv_sec = 0;
+ TS.tv_nsec = 0;
+ }
+ CPU = 0;
+ return TS.tv_sec * NanosecondsPerSecond + TS.tv_nsec;
+}
+
+inline uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
+ return NanosecondsPerSecond;
+}
+
+} // namespace __xray
+
+#else
+#error Target architecture is not supported.
+#endif // CPU architecture
+#endif // SANITIZER_FUCHSIA
+
+#endif // XRAY_EMULATE_TSC_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_tsc.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_utils.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_utils.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_utils.cc (revision 351984)
@@ -0,0 +1,195 @@
+//===-- xray_utils.cc -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+#include "xray_utils.h"
+
+#include "sanitizer_common/sanitizer_allocator_internal.h"
+#include "sanitizer_common/sanitizer_common.h"
+#include "xray_allocator.h"
+#include "xray_defs.h"
+#include "xray_flags.h"
+#include <cstdio>
+#include <errno.h>
+#include <fcntl.h>
+#include <iterator>
+#include <stdlib.h>
+#include <sys/types.h>
+#include <tuple>
+#include <unistd.h>
+#include <utility>
+
+#if SANITIZER_FUCHSIA
+#include "sanitizer_common/sanitizer_symbolizer_fuchsia.h"
+
+#include <inttypes.h>
+#include <zircon/process.h>
+#include <zircon/sanitizer.h>
+#include <zircon/status.h>
+#include <zircon/syscalls.h>
+#endif
+
+namespace __xray {
+
+#if SANITIZER_FUCHSIA
+constexpr const char* ProfileSinkName = "llvm-xray";
+
+LogWriter::~LogWriter() {
+ _zx_handle_close(Vmo);
+}
+
+void LogWriter::WriteAll(const char *Begin, const char *End) XRAY_NEVER_INSTRUMENT {
+ if (Begin == End)
+ return;
+ auto TotalBytes = std::distance(Begin, End);
+
+ const size_t PageSize = flags()->xray_page_size_override > 0
+ ? flags()->xray_page_size_override
+ : GetPageSizeCached();
+ if (RoundUpTo(Offset, PageSize) != RoundUpTo(Offset + TotalBytes, PageSize)) {
+ // Resize the VMO to ensure there's sufficient space for the data.
+ zx_status_t Status = _zx_vmo_set_size(Vmo, Offset + TotalBytes);
+ if (Status != ZX_OK) {
+ Report("Failed to resize VMO: %s\n", _zx_status_get_string(Status));
+ return;
+ }
+ }
+
+ // Write the data into VMO.
+ zx_status_t Status = _zx_vmo_write(Vmo, Begin, Offset, TotalBytes);
+ if (Status != ZX_OK) {
+ Report("Failed to write: %s\n", _zx_status_get_string(Status));
+ return;
+ }
+ Offset += TotalBytes;
+}
+
+void LogWriter::Flush() XRAY_NEVER_INSTRUMENT {
+ // Nothing to do here since WriteAll writes directly into the VMO.
+}
+
+LogWriter *LogWriter::Open() XRAY_NEVER_INSTRUMENT {
+ // Create VMO to hold the profile data.
+ zx_handle_t Vmo;
+ zx_status_t Status = _zx_vmo_create(0, ZX_VMO_RESIZABLE, &Vmo);
+ if (Status != ZX_OK) {
+ Report("XRay: cannot create VMO: %s\n", _zx_status_get_string(Status));
+ return nullptr;
+ }
+
+ // Get the KOID of the current process to use in the VMO name.
+ zx_info_handle_basic_t Info;
+ Status = _zx_object_get_info(_zx_process_self(), ZX_INFO_HANDLE_BASIC, &Info,
+ sizeof(Info), NULL, NULL);
+ if (Status != ZX_OK) {
+ Report("XRay: cannot get basic info about current process handle: %s\n",
+ _zx_status_get_string(Status));
+ return nullptr;
+ }
+
+ // Give the VMO a name including our process KOID so it's easy to spot.
+ char VmoName[ZX_MAX_NAME_LEN];
+ internal_snprintf(VmoName, sizeof(VmoName), "%s.%zu", ProfileSinkName,
+ Info.koid);
+ _zx_object_set_property(Vmo, ZX_PROP_NAME, VmoName, strlen(VmoName));
+
+ // Duplicate the handle since __sanitizer_publish_data consumes it and
+ // LogWriter needs to hold onto it.
+ zx_handle_t Handle;
+ Status =_zx_handle_duplicate(Vmo, ZX_RIGHT_SAME_RIGHTS, &Handle);
+ if (Status != ZX_OK) {
+ Report("XRay: cannot duplicate VMO handle: %s\n",
+ _zx_status_get_string(Status));
+ return nullptr;
+ }
+
+ // Publish the VMO that receives the logging. Note the VMO's contents can
+ // grow and change after publication. The contents won't be read out until
+ // after the process exits.
+ __sanitizer_publish_data(ProfileSinkName, Handle);
+
+ // Use the dumpfile symbolizer markup element to write the name of the VMO.
+ Report("XRay: " FORMAT_DUMPFILE "\n", ProfileSinkName, VmoName);
+
+ LogWriter *LW = reinterpret_cast<LogWriter *>(InternalAlloc(sizeof(LogWriter)));
+ new (LW) LogWriter(Vmo);
+ return LW;
+}
+
+void LogWriter::Close(LogWriter *LW) {
+ LW->~LogWriter();
+ InternalFree(LW);
+}
+#else // SANITIZER_FUCHSIA
+LogWriter::~LogWriter() {
+ internal_close(Fd);
+}
+
+void LogWriter::WriteAll(const char *Begin, const char *End) XRAY_NEVER_INSTRUMENT {
+ if (Begin == End)
+ return;
+ auto TotalBytes = std::distance(Begin, End);
+ while (auto Written = write(Fd, Begin, TotalBytes)) {
+ if (Written < 0) {
+ if (errno == EINTR)
+ continue; // Try again.
+ Report("Failed to write; errno = %d\n", errno);
+ return;
+ }
+ TotalBytes -= Written;
+ if (TotalBytes == 0)
+ break;
+ Begin += Written;
+ }
+}
+
+void LogWriter::Flush() XRAY_NEVER_INSTRUMENT {
+ fsync(Fd);
+}
+
+LogWriter *LogWriter::Open() XRAY_NEVER_INSTRUMENT {
+ // Open a temporary file once for the log.
+ char TmpFilename[256] = {};
+ char TmpWildcardPattern[] = "XXXXXX";
+ auto **Argv = GetArgv();
+ const char *Progname = !Argv ? "(unknown)" : Argv[0];
+ const char *LastSlash = internal_strrchr(Progname, '/');
+
+ if (LastSlash != nullptr)
+ Progname = LastSlash + 1;
+
+ int NeededLength = internal_snprintf(
+ TmpFilename, sizeof(TmpFilename), "%s%s.%s",
+ flags()->xray_logfile_base, Progname, TmpWildcardPattern);
+ if (NeededLength > int(sizeof(TmpFilename))) {
+ Report("XRay log file name too long (%d): %s\n", NeededLength, TmpFilename);
+ return nullptr;
+ }
+ int Fd = mkstemp(TmpFilename);
+ if (Fd == -1) {
+ Report("XRay: Failed opening temporary file '%s'; not logging events.\n",
+ TmpFilename);
+ return nullptr;
+ }
+ if (Verbosity())
+ Report("XRay: Log file in '%s'\n", TmpFilename);
+
+ LogWriter *LW = allocate<LogWriter>();
+ new (LW) LogWriter(Fd);
+ return LW;
+}
+
+void LogWriter::Close(LogWriter *LW) {
+ LW->~LogWriter();
+ deallocate(LW);
+}
+#endif // SANITIZER_FUCHSIA
+
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_utils.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_utils.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_utils.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_utils.h (revision 351984)
@@ -0,0 +1,85 @@
+//===-- xray_utils.h --------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// Some shared utilities for the XRay runtime implementation.
+//
+//===----------------------------------------------------------------------===//
+#ifndef XRAY_UTILS_H
+#define XRAY_UTILS_H
+
+#include <cstddef>
+#include <cstdint>
+#include <sys/types.h>
+#include <utility>
+
+#include "sanitizer_common/sanitizer_common.h"
+#if SANITIZER_FUCHSIA
+#include <zircon/types.h>
+#endif
+
+namespace __xray {
+
+class LogWriter {
+public:
+#if SANITIZER_FUCHSIA
+ LogWriter(zx_handle_t Vmo) : Vmo(Vmo) {}
+#else
+ explicit LogWriter(int Fd) : Fd(Fd) {}
+#endif
+ ~LogWriter();
+
+ // Write a character range into a log.
+ void WriteAll(const char *Begin, const char *End);
+
+ void Flush();
+
+ // Returns a new log instance initialized using the flag-provided values.
+ static LogWriter *Open();
+ // Closes and deallocates the log instance.
+ static void Close(LogWriter *LogWriter);
+
+private:
+#if SANITIZER_FUCHSIA
+ zx_handle_t Vmo = ZX_HANDLE_INVALID;
+ uint64_t Offset = 0;
+#else
+ int Fd = -1;
+#endif
+};
+
+constexpr size_t gcd(size_t a, size_t b) {
+ return (b == 0) ? a : gcd(b, a % b);
+}
+
+constexpr size_t lcm(size_t a, size_t b) { return a * b / gcd(a, b); }
+
+constexpr size_t nearest_boundary(size_t number, size_t multiple) {
+ return multiple * ((number / multiple) + (number % multiple ? 1 : 0));
+}
+
+constexpr size_t next_pow2_helper(size_t num, size_t acc) {
+ return (1u << acc) >= num ? (1u << acc) : next_pow2_helper(num, acc + 1);
+}
+
+constexpr size_t next_pow2(size_t number) {
+ return next_pow2_helper(number, 1);
+}
+
+template <class T> constexpr T &max(T &A, T &B) { return A > B ? A : B; }
+
+template <class T> constexpr T &min(T &A, T &B) { return A <= B ? A : B; }
+
+constexpr ptrdiff_t diff(uintptr_t A, uintptr_t B) {
+ return max(A, B) - min(A, B);
+}
+
+} // namespace __xray
+
+#endif // XRAY_UTILS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_utils.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_x86_64.inc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_x86_64.inc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_x86_64.inc (revision 351984)
@@ -0,0 +1,33 @@
+//===-- xray_x86_64.inc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+//===----------------------------------------------------------------------===//
+
+#include <cstdint>
+#include <x86intrin.h>
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+#include "xray_defs.h"
+
+namespace __xray {
+
+ALWAYS_INLINE uint64_t readTSC(uint8_t &CPU) XRAY_NEVER_INSTRUMENT {
+ unsigned LongCPU;
+ unsigned long Rax, Rdx;
+ __asm__ __volatile__("rdtscp\n" : "=a"(Rax), "=d"(Rdx), "=c"(LongCPU) ::);
+ CPU = LongCPU;
+ return (Rdx << 32) + Rax;
+}
+
+uint64_t getTSCFrequency();
+
+bool probeRequiredCPUFeatures();
+
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_x86_64.inc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_x86_64.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_x86_64.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_x86_64.cc (revision 351984)
@@ -0,0 +1,353 @@
+#include "cpuid.h"
+#include "sanitizer_common/sanitizer_common.h"
+#if !SANITIZER_FUCHSIA
+#include "sanitizer_common/sanitizer_posix.h"
+#endif
+#include "xray_defs.h"
+#include "xray_interface_internal.h"
+
+#if SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_MAC
+#include <sys/types.h>
+#if SANITIZER_OPENBSD
+#include <sys/time.h>
+#include <machine/cpu.h>
+#endif
+#include <sys/sysctl.h>
+#elif SANITIZER_FUCHSIA
+#include <zircon/syscalls.h>
+#endif
+
+#include <atomic>
+#include <cstdint>
+#include <errno.h>
+#include <fcntl.h>
+#include <iterator>
+#include <limits>
+#include <tuple>
+#include <unistd.h>
+
+namespace __xray {
+
+#if SANITIZER_LINUX
+static std::pair<ssize_t, bool>
+retryingReadSome(int Fd, char *Begin, char *End) XRAY_NEVER_INSTRUMENT {
+ auto BytesToRead = std::distance(Begin, End);
+ ssize_t BytesRead;
+ ssize_t TotalBytesRead = 0;
+ while (BytesToRead && (BytesRead = read(Fd, Begin, BytesToRead))) {
+ if (BytesRead == -1) {
+ if (errno == EINTR)
+ continue;
+ Report("Read error; errno = %d\n", errno);
+ return std::make_pair(TotalBytesRead, false);
+ }
+
+ TotalBytesRead += BytesRead;
+ BytesToRead -= BytesRead;
+ Begin += BytesRead;
+ }
+ return std::make_pair(TotalBytesRead, true);
+}
+
+static bool readValueFromFile(const char *Filename,
+ long long *Value) XRAY_NEVER_INSTRUMENT {
+ int Fd = open(Filename, O_RDONLY | O_CLOEXEC);
+ if (Fd == -1)
+ return false;
+ static constexpr size_t BufSize = 256;
+ char Line[BufSize] = {};
+ ssize_t BytesRead;
+ bool Success;
+ std::tie(BytesRead, Success) = retryingReadSome(Fd, Line, Line + BufSize);
+ close(Fd);
+ if (!Success)
+ return false;
+ const char *End = nullptr;
+ long long Tmp = internal_simple_strtoll(Line, &End, 10);
+ bool Result = false;
+ if (Line[0] != '\0' && (*End == '\n' || *End == '\0')) {
+ *Value = Tmp;
+ Result = true;
+ }
+ return Result;
+}
+
+uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
+ long long TSCFrequency = -1;
+ if (readValueFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz",
+ &TSCFrequency)) {
+ TSCFrequency *= 1000;
+ } else if (readValueFromFile(
+ "/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq",
+ &TSCFrequency)) {
+ TSCFrequency *= 1000;
+ } else {
+ Report("Unable to determine CPU frequency for TSC accounting.\n");
+ }
+ return TSCFrequency == -1 ? 0 : static_cast<uint64_t>(TSCFrequency);
+}
+#elif SANITIZER_FREEBSD || SANITIZER_NETBSD || SANITIZER_OPENBSD || SANITIZER_MAC
+uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
+ long long TSCFrequency = -1;
+ size_t tscfreqsz = sizeof(TSCFrequency);
+#if SANITIZER_OPENBSD
+ int Mib[2] = { CTL_MACHDEP, CPU_TSCFREQ };
+ if (internal_sysctl(Mib, 2, &TSCFrequency, &tscfreqsz, NULL, 0) != -1) {
+#elif SANITIZER_MAC
+ if (internal_sysctlbyname("machdep.tsc.frequency", &TSCFrequency,
+ &tscfreqsz, NULL, 0) != -1) {
+
+#else
+ if (internal_sysctlbyname("machdep.tsc_freq", &TSCFrequency, &tscfreqsz,
+ NULL, 0) != -1) {
+#endif
+ return static_cast<uint64_t>(TSCFrequency);
+ } else {
+ Report("Unable to determine CPU frequency for TSC accounting.\n");
+ }
+
+ return 0;
+}
+#elif !SANITIZER_FUCHSIA
+uint64_t getTSCFrequency() XRAY_NEVER_INSTRUMENT {
+ /* Not supported */
+ return 0;
+}
+#endif
+
+static constexpr uint8_t CallOpCode = 0xe8;
+static constexpr uint16_t MovR10Seq = 0xba41;
+static constexpr uint16_t Jmp9Seq = 0x09eb;
+static constexpr uint16_t Jmp20Seq = 0x14eb;
+static constexpr uint16_t Jmp15Seq = 0x0feb;
+static constexpr uint8_t JmpOpCode = 0xe9;
+static constexpr uint8_t RetOpCode = 0xc3;
+static constexpr uint16_t NopwSeq = 0x9066;
+
+static constexpr int64_t MinOffset{std::numeric_limits<int32_t>::min()};
+static constexpr int64_t MaxOffset{std::numeric_limits<int32_t>::max()};
+
+bool patchFunctionEntry(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled,
+ void (*Trampoline)()) XRAY_NEVER_INSTRUMENT {
+ // Here we do the dance of replacing the following sled:
+ //
+ // xray_sled_n:
+ // jmp +9
+ // <9 byte nop>
+ //
+ // With the following:
+ //
+ // mov r10d, <function id>
+ // call <relative 32bit offset to entry trampoline>
+ //
+ // We need to do this in the following order:
+ //
+ // 1. Put the function id first, 2 bytes from the start of the sled (just
+ // after the 2-byte jmp instruction).
+ // 2. Put the call opcode 6 bytes from the start of the sled.
+ // 3. Put the relative offset 7 bytes from the start of the sled.
+ // 4. Do an atomic write over the jmp instruction for the "mov r10d"
+ // opcode and first operand.
+ //
+ // Prerequisite is to compute the relative offset to the trampoline's address.
+ int64_t TrampolineOffset = reinterpret_cast<int64_t>(Trampoline) -
+ (static_cast<int64_t>(Sled.Address) + 11);
+ if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
+ Report("XRay Entry trampoline (%p) too far from sled (%p)\n",
+ Trampoline, reinterpret_cast<void *>(Sled.Address));
+ return false;
+ }
+ if (Enable) {
+ *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq,
+ std::memory_order_release);
+ // FIXME: Write out the nops still?
+ }
+ return true;
+}
+
+bool patchFunctionExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // Here we do the dance of replacing the following sled:
+ //
+ // xray_sled_n:
+ // ret
+ // <10 byte nop>
+ //
+ // With the following:
+ //
+ // mov r10d, <function id>
+ // jmp <relative 32bit offset to exit trampoline>
+ //
+ // 1. Put the function id first, 2 bytes from the start of the sled (just
+ // after the 1-byte ret instruction).
+ // 2. Put the jmp opcode 6 bytes from the start of the sled.
+ // 3. Put the relative offset 7 bytes from the start of the sled.
+ // 4. Do an atomic write over the jmp instruction for the "mov r10d"
+ // opcode and first operand.
+ //
+ // Prerequisite is to compute the relative offset fo the
+ // __xray_FunctionExit function's address.
+ int64_t TrampolineOffset = reinterpret_cast<int64_t>(__xray_FunctionExit) -
+ (static_cast<int64_t>(Sled.Address) + 11);
+ if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
+ Report("XRay Exit trampoline (%p) too far from sled (%p)\n",
+ __xray_FunctionExit, reinterpret_cast<void *>(Sled.Address));
+ return false;
+ }
+ if (Enable) {
+ *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Sled.Address + 6) = JmpOpCode;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint8_t> *>(Sled.Address), RetOpCode,
+ std::memory_order_release);
+ // FIXME: Write out the nops still?
+ }
+ return true;
+}
+
+bool patchFunctionTailExit(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // Here we do the dance of replacing the tail call sled with a similar
+ // sequence as the entry sled, but calls the tail exit sled instead.
+ int64_t TrampolineOffset =
+ reinterpret_cast<int64_t>(__xray_FunctionTailExit) -
+ (static_cast<int64_t>(Sled.Address) + 11);
+ if (TrampolineOffset < MinOffset || TrampolineOffset > MaxOffset) {
+ Report("XRay Tail Exit trampoline (%p) too far from sled (%p)\n",
+ __xray_FunctionTailExit, reinterpret_cast<void *>(Sled.Address));
+ return false;
+ }
+ if (Enable) {
+ *reinterpret_cast<uint32_t *>(Sled.Address + 2) = FuncId;
+ *reinterpret_cast<uint8_t *>(Sled.Address + 6) = CallOpCode;
+ *reinterpret_cast<uint32_t *>(Sled.Address + 7) = TrampolineOffset;
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), MovR10Seq,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp9Seq,
+ std::memory_order_release);
+ // FIXME: Write out the nops still?
+ }
+ return true;
+}
+
+bool patchCustomEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // Here we do the dance of replacing the following sled:
+ //
+ // In Version 0:
+ //
+ // xray_sled_n:
+ // jmp +20 // 2 bytes
+ // ...
+ //
+ // With the following:
+ //
+ // nopw // 2 bytes*
+ // ...
+ //
+ //
+ // The "unpatch" should just turn the 'nopw' back to a 'jmp +20'.
+ //
+ // ---
+ //
+ // In Version 1:
+ //
+ // The jump offset is now 15 bytes (0x0f), so when restoring the nopw back
+ // to a jmp, use 15 bytes instead.
+ //
+ if (Enable) {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), NopwSeq,
+ std::memory_order_release);
+ } else {
+ switch (Sled.Version) {
+ case 1:
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp15Seq,
+ std::memory_order_release);
+ break;
+ case 0:
+ default:
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp20Seq,
+ std::memory_order_release);
+ break;
+ }
+ }
+ return false;
+}
+
+bool patchTypedEvent(const bool Enable, const uint32_t FuncId,
+ const XRaySledEntry &Sled) XRAY_NEVER_INSTRUMENT {
+ // Here we do the dance of replacing the following sled:
+ //
+ // xray_sled_n:
+ // jmp +20 // 2 byte instruction
+ // ...
+ //
+ // With the following:
+ //
+ // nopw // 2 bytes
+ // ...
+ //
+ //
+ // The "unpatch" should just turn the 'nopw' back to a 'jmp +20'.
+ // The 20 byte sled stashes three argument registers, calls the trampoline,
+ // unstashes the registers and returns. If the arguments are already in
+ // the correct registers, the stashing and unstashing become equivalently
+ // sized nops.
+ if (Enable) {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), NopwSeq,
+ std::memory_order_release);
+ } else {
+ std::atomic_store_explicit(
+ reinterpret_cast<std::atomic<uint16_t> *>(Sled.Address), Jmp20Seq,
+ std::memory_order_release);
+ }
+ return false;
+}
+
+#if !SANITIZER_FUCHSIA
+// We determine whether the CPU we're running on has the correct features we
+// need. In x86_64 this will be rdtscp support.
+bool probeRequiredCPUFeatures() XRAY_NEVER_INSTRUMENT {
+ unsigned int EAX, EBX, ECX, EDX;
+
+ // We check whether rdtscp support is enabled. According to the x86_64 manual,
+ // level should be set at 0x80000001, and we should have a look at bit 27 in
+ // EDX. That's 0x8000000 (or 1u << 27).
+ __asm__ __volatile__("cpuid" : "=a"(EAX), "=b"(EBX), "=c"(ECX), "=d"(EDX)
+ : "0"(0x80000001));
+ if (!(EDX & (1u << 27))) {
+ Report("Missing rdtscp support.\n");
+ return false;
+ }
+ // Also check whether we can determine the CPU frequency, since if we cannot,
+ // we should use the emulated TSC instead.
+ if (!getTSCFrequency()) {
+ Report("Unable to determine CPU frequency.\n");
+ return false;
+ }
+ return true;
+}
+#endif
+
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_x86_64.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/weak_symbols.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/weak_symbols.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/weak_symbols.txt (revision 351984)
@@ -0,0 +1,4 @@
+___start_xray_fn_idx
+___start_xray_instr_map
+___stop_xray_fn_idx
+___stop_xray_instr_map
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/weak_symbols.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_always_instrument.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_always_instrument.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_always_instrument.txt (revision 351984)
@@ -0,0 +1,6 @@
+# List of function matchers common to C/C++ applications that make sense to
+# always instrument. You can use this as an argument to
+# -fxray-always-instrument=<path> along with your project-specific lists.
+
+# Always instrument the main function.
+fun:main
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_always_instrument.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_never_instrument.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_never_instrument.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_never_instrument.txt (revision 351984)
@@ -0,0 +1,6 @@
+# List of function matchers common to C/C++ applications that make sense to
+# never instrument. You can use this as an argument to
+# -fxray-never-instrument=<path> along with your project-specific lists.
+
+# Never instrument any function whose symbol starts with __xray.
+fun:__xray*
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_never_instrument.txt
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_AArch64.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_AArch64.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_AArch64.S (revision 351984)
@@ -0,0 +1,144 @@
+#include "../builtins/assembly.h"
+
+ .text
+ /* The variable containing the handler function pointer */
+ .global _ZN6__xray19XRayPatchedFunctionE
+ /* Word-aligned function entry point */
+ .p2align 2
+ /* Let C/C++ see the symbol */
+ .global __xray_FunctionEntry
+ .type __xray_FunctionEntry, %function
+ /* In C++ it is void extern "C" __xray_FunctionEntry(uint32_t FuncId) with
+ FuncId passed in W0 register. */
+__xray_FunctionEntry:
+ /* Move the return address beyond the end of sled data. The 12 bytes of
+ data are inserted in the code of the runtime patch, between the call
+ instruction and the instruction returned into. The data contains 32
+ bits of instrumented function ID and 64 bits of the address of
+ the current trampoline. */
+ ADD X30, X30, #12
+ /* Push the registers which may be modified by the handler function */
+ STP X1, X2, [SP, #-16]!
+ STP X3, X4, [SP, #-16]!
+ STP X5, X6, [SP, #-16]!
+ STP X7, X30, [SP, #-16]!
+ STP Q0, Q1, [SP, #-32]!
+ STP Q2, Q3, [SP, #-32]!
+ STP Q4, Q5, [SP, #-32]!
+ STP Q6, Q7, [SP, #-32]!
+ /* Load the address of _ZN6__xray19XRayPatchedFunctionE into X1 */
+ LDR X1, =_ZN6__xray19XRayPatchedFunctionE
+ /* Load the handler function pointer into X2 */
+ LDR X2, [X1]
+ /* Handler address is nullptr if handler is not set */
+ CMP X2, #0
+ BEQ FunctionEntry_restore
+ /* Function ID is already in W0 (the first parameter).
+ X1=0 means that we are tracing an entry event */
+ MOV X1, #0
+ /* Call the handler with 2 parameters in W0 and X1 */
+ BLR X2
+FunctionEntry_restore:
+ /* Pop the saved registers */
+ LDP Q6, Q7, [SP], #32
+ LDP Q4, Q5, [SP], #32
+ LDP Q2, Q3, [SP], #32
+ LDP Q0, Q1, [SP], #32
+ LDP X7, X30, [SP], #16
+ LDP X5, X6, [SP], #16
+ LDP X3, X4, [SP], #16
+ LDP X1, X2, [SP], #16
+ RET
+
+ /* Word-aligned function entry point */
+ .p2align 2
+ /* Let C/C++ see the symbol */
+ .global __xray_FunctionExit
+ .type __xray_FunctionExit, %function
+ /* In C++ it is void extern "C" __xray_FunctionExit(uint32_t FuncId) with
+ FuncId passed in W0 register. */
+__xray_FunctionExit:
+ /* Move the return address beyond the end of sled data. The 12 bytes of
+ data are inserted in the code of the runtime patch, between the call
+ instruction and the instruction returned into. The data contains 32
+ bits of instrumented function ID and 64 bits of the address of
+ the current trampoline. */
+ ADD X30, X30, #12
+ /* Push the registers which may be modified by the handler function */
+ STP X1, X2, [SP, #-16]!
+ STP X3, X4, [SP, #-16]!
+ STP X5, X6, [SP, #-16]!
+ STP X7, X30, [SP, #-16]!
+ STR Q0, [SP, #-16]!
+ /* Load the address of _ZN6__xray19XRayPatchedFunctionE into X1 */
+ LDR X1, =_ZN6__xray19XRayPatchedFunctionE
+ /* Load the handler function pointer into X2 */
+ LDR X2, [X1]
+ /* Handler address is nullptr if handler is not set */
+ CMP X2, #0
+ BEQ FunctionExit_restore
+ /* Function ID is already in W0 (the first parameter).
+ X1=1 means that we are tracing an exit event */
+ MOV X1, #1
+ /* Call the handler with 2 parameters in W0 and X1 */
+ BLR X2
+FunctionExit_restore:
+ LDR Q0, [SP], #16
+ LDP X7, X30, [SP], #16
+ LDP X5, X6, [SP], #16
+ LDP X3, X4, [SP], #16
+ LDP X1, X2, [SP], #16
+ RET
+
+ /* Word-aligned function entry point */
+ .p2align 2
+ /* Let C/C++ see the symbol */
+ .global __xray_FunctionTailExit
+ .type __xray_FunctionTailExit, %function
+ /* In C++ it is void extern "C" __xray_FunctionTailExit(uint32_t FuncId)
+ with FuncId passed in W0 register. */
+__xray_FunctionTailExit:
+ /* Move the return address beyond the end of sled data. The 12 bytes of
+ data are inserted in the code of the runtime patch, between the call
+ instruction and the instruction returned into. The data contains 32
+ bits of instrumented function ID and 64 bits of the address of
+ the current trampoline. */
+ ADD X30, X30, #12
+ /* Push the registers which may be modified by the handler function */
+ STP X1, X2, [SP, #-16]!
+ STP X3, X4, [SP, #-16]!
+ STP X5, X6, [SP, #-16]!
+ STP X7, X30, [SP, #-16]!
+ /* Push the parameters of the tail called function */
+ STP Q0, Q1, [SP, #-32]!
+ STP Q2, Q3, [SP, #-32]!
+ STP Q4, Q5, [SP, #-32]!
+ STP Q6, Q7, [SP, #-32]!
+ /* Load the address of _ZN6__xray19XRayPatchedFunctionE into X1 */
+ LDR X1, =_ZN6__xray19XRayPatchedFunctionE
+ /* Load the handler function pointer into X2 */
+ LDR X2, [X1]
+ /* Handler address is nullptr if handler is not set */
+ CMP X2, #0
+ BEQ FunctionTailExit_restore
+ /* Function ID is already in W0 (the first parameter).
+ X1=2 means that we are tracing a tail exit event, but before the
+ logging part of XRay is ready, we pretend that here a normal function
+ exit happens, so we give the handler code 1 */
+ MOV X1, #1
+ /* Call the handler with 2 parameters in W0 and X1 */
+ BLR X2
+FunctionTailExit_restore:
+ /* Pop the parameters of the tail called function */
+ LDP Q6, Q7, [SP], #32
+ LDP Q4, Q5, [SP], #32
+ LDP Q2, Q3, [SP], #32
+ LDP Q0, Q1, [SP], #32
+ /* Pop the registers which may be modified by the handler function */
+ LDP X7, X30, [SP], #16
+ LDP X5, X6, [SP], #16
+ LDP X3, X4, [SP], #16
+ LDP X1, X2, [SP], #16
+ RET
+
+NO_EXEC_STACK_DIRECTIVE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_AArch64.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_arm.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_arm.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_arm.S (revision 351984)
@@ -0,0 +1,102 @@
+#include "../builtins/assembly.h"
+
+ .syntax unified
+ .arch armv6t2
+ .fpu vfpv2
+ .code 32
+ .global _ZN6__xray19XRayPatchedFunctionE
+
+ @ Word-aligned function entry point
+ .p2align 2
+ @ Let C/C++ see the symbol
+ .global __xray_FunctionEntry
+ @ It preserves all registers except r0, r12(ip), r14(lr) and r15(pc)
+ @ Assume that "q" part of the floating-point registers is not used
+ @ for passing parameters to C/C++ functions.
+ .type __xray_FunctionEntry, %function
+ @ In C++ it is void extern "C" __xray_FunctionEntry(uint32_t FuncId) with
+ @ FuncId passed in r0 register.
+__xray_FunctionEntry:
+ PUSH {r1-r3,lr}
+ @ Save floating-point parameters of the instrumented function
+ VPUSH {d0-d7}
+ MOVW r1,#:lower16:_ZN6__xray19XRayPatchedFunctionE
+ MOVT r1,#:upper16:_ZN6__xray19XRayPatchedFunctionE
+ LDR r2, [r1]
+ @ Handler address is nullptr if handler is not set
+ CMP r2, #0
+ BEQ FunctionEntry_restore
+ @ Function ID is already in r0 (the first parameter).
+ @ r1=0 means that we are tracing an entry event
+ MOV r1, #0
+ @ Call the handler with 2 parameters in r0 and r1
+ BLX r2
+FunctionEntry_restore:
+ @ Restore floating-point parameters of the instrumented function
+ VPOP {d0-d7}
+ POP {r1-r3,pc}
+
+ @ Word-aligned function entry point
+ .p2align 2
+ @ Let C/C++ see the symbol
+ .global __xray_FunctionExit
+ @ Assume that d1-d7 are not used for the return value.
+ @ Assume that "q" part of the floating-point registers is not used for the
+ @ return value in C/C++.
+ .type __xray_FunctionExit, %function
+ @ In C++ it is extern "C" void __xray_FunctionExit(uint32_t FuncId) with
+ @ FuncId passed in r0 register.
+__xray_FunctionExit:
+ PUSH {r1-r3,lr}
+ @ Save the floating-point return value of the instrumented function
+ VPUSH {d0}
+ @ Load the handler address
+ MOVW r1,#:lower16:_ZN6__xray19XRayPatchedFunctionE
+ MOVT r1,#:upper16:_ZN6__xray19XRayPatchedFunctionE
+ LDR r2, [r1]
+ @ Handler address is nullptr if handler is not set
+ CMP r2, #0
+ BEQ FunctionExit_restore
+ @ Function ID is already in r0 (the first parameter).
+ @ 1 means that we are tracing an exit event
+ MOV r1, #1
+ @ Call the handler with 2 parameters in r0 and r1
+ BLX r2
+FunctionExit_restore:
+ @ Restore the floating-point return value of the instrumented function
+ VPOP {d0}
+ POP {r1-r3,pc}
+
+ @ Word-aligned function entry point
+ .p2align 2
+ @ Let C/C++ see the symbol
+ .global __xray_FunctionTailExit
+ @ It preserves all registers except r0, r12(ip), r14(lr) and r15(pc)
+ @ Assume that "q" part of the floating-point registers is not used
+ @ for passing parameters to C/C++ functions.
+ .type __xray_FunctionTailExit, %function
+ @ In C++ it is void extern "C" __xray_FunctionTailExit(uint32_t FuncId)
+ @ with FuncId passed in r0 register.
+__xray_FunctionTailExit:
+ PUSH {r1-r3,lr}
+ @ Save floating-point parameters of the instrumented function
+ VPUSH {d0-d7}
+ MOVW r1,#:lower16:_ZN6__xray19XRayPatchedFunctionE
+ MOVT r1,#:upper16:_ZN6__xray19XRayPatchedFunctionE
+ LDR r2, [r1]
+ @ Handler address is nullptr if handler is not set
+ CMP r2, #0
+ BEQ FunctionTailExit_restore
+ @ Function ID is already in r0 (the first parameter).
+ @ r1=2 means that we are tracing a tail exit event
+ @ But before the logging part of XRay is ready, we pretend that here a
+ @ normal function exit happens, so we give the handler code 1
+ MOV r1, #1
+ @ Call the handler with 2 parameters in r0 and r1
+ BLX r2
+FunctionTailExit_restore:
+ @ Restore floating-point parameters of the instrumented function
+ VPOP {d0-d7}
+ POP {r1-r3,pc}
+
+NO_EXEC_STACK_DIRECTIVE
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_arm.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_powerpc64.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_powerpc64.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_powerpc64.cc (revision 351984)
@@ -0,0 +1,15 @@
+#include <atomic>
+#include <xray/xray_interface.h>
+
+namespace __xray {
+
+extern std::atomic<void (*)(int32_t, XRayEntryType)> XRayPatchedFunction;
+
+// Implement this in C++ instead of assembly, to avoid dealing with ToC by hand.
+void CallXRayPatchedFunction(int32_t FuncId, XRayEntryType Type) {
+ auto fptr = __xray::XRayPatchedFunction.load();
+ if (fptr != nullptr)
+ (*fptr)(FuncId, Type);
+}
+
+} // namespace __xray
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_powerpc64.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_powerpc64_asm.S
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_powerpc64_asm.S (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_powerpc64_asm.S (revision 351984)
@@ -0,0 +1,235 @@
+ .text
+ .abiversion 2
+ .globl __xray_FunctionEntry
+ .p2align 4
+__xray_FunctionEntry:
+ std 0, 16(1)
+ stdu 1, -408(1)
+# Spill r3-r10, f1-f13, and vsr34-vsr45, which are parameter registers.
+# If this appears to be slow, the caller needs to pass in number of generic,
+# floating point, and vector parameters, so that we only spill those live ones.
+ std 3, 32(1)
+ ld 3, 400(1) # FuncId
+ std 4, 40(1)
+ std 5, 48(1)
+ std 6, 56(1)
+ std 7, 64(1)
+ std 8, 72(1)
+ std 9, 80(1)
+ std 10, 88(1)
+ addi 4, 1, 96
+ stxsdx 1, 0, 4
+ addi 4, 1, 104
+ stxsdx 2, 0, 4
+ addi 4, 1, 112
+ stxsdx 3, 0, 4
+ addi 4, 1, 120
+ stxsdx 4, 0, 4
+ addi 4, 1, 128
+ stxsdx 5, 0, 4
+ addi 4, 1, 136
+ stxsdx 6, 0, 4
+ addi 4, 1, 144
+ stxsdx 7, 0, 4
+ addi 4, 1, 152
+ stxsdx 8, 0, 4
+ addi 4, 1, 160
+ stxsdx 9, 0, 4
+ addi 4, 1, 168
+ stxsdx 10, 0, 4
+ addi 4, 1, 176
+ stxsdx 11, 0, 4
+ addi 4, 1, 184
+ stxsdx 12, 0, 4
+ addi 4, 1, 192
+ stxsdx 13, 0, 4
+ addi 4, 1, 200
+ stxvd2x 34, 0, 4
+ addi 4, 1, 216
+ stxvd2x 35, 0, 4
+ addi 4, 1, 232
+ stxvd2x 36, 0, 4
+ addi 4, 1, 248
+ stxvd2x 37, 0, 4
+ addi 4, 1, 264
+ stxvd2x 38, 0, 4
+ addi 4, 1, 280
+ stxvd2x 39, 0, 4
+ addi 4, 1, 296
+ stxvd2x 40, 0, 4
+ addi 4, 1, 312
+ stxvd2x 41, 0, 4
+ addi 4, 1, 328
+ stxvd2x 42, 0, 4
+ addi 4, 1, 344
+ stxvd2x 43, 0, 4
+ addi 4, 1, 360
+ stxvd2x 44, 0, 4
+ addi 4, 1, 376
+ stxvd2x 45, 0, 4
+ std 2, 392(1)
+ mflr 0
+ std 0, 400(1)
+
+ li 4, 0
+ bl _ZN6__xray23CallXRayPatchedFunctionEi13XRayEntryType
+ nop
+
+ addi 4, 1, 96
+ lxsdx 1, 0, 4
+ addi 4, 1, 104
+ lxsdx 2, 0, 4
+ addi 4, 1, 112
+ lxsdx 3, 0, 4
+ addi 4, 1, 120
+ lxsdx 4, 0, 4
+ addi 4, 1, 128
+ lxsdx 5, 0, 4
+ addi 4, 1, 136
+ lxsdx 6, 0, 4
+ addi 4, 1, 144
+ lxsdx 7, 0, 4
+ addi 4, 1, 152
+ lxsdx 8, 0, 4
+ addi 4, 1, 160
+ lxsdx 9, 0, 4
+ addi 4, 1, 168
+ lxsdx 10, 0, 4
+ addi 4, 1, 176
+ lxsdx 11, 0, 4
+ addi 4, 1, 184
+ lxsdx 12, 0, 4
+ addi 4, 1, 192
+ lxsdx 13, 0, 4
+ addi 4, 1, 200
+ lxvd2x 34, 0, 4
+ addi 4, 1, 216
+ lxvd2x 35, 0, 4
+ addi 4, 1, 232
+ lxvd2x 36, 0, 4
+ addi 4, 1, 248
+ lxvd2x 37, 0, 4
+ addi 4, 1, 264
+ lxvd2x 38, 0, 4
+ addi 4, 1, 280
+ lxvd2x 39, 0, 4
+ addi 4, 1, 296
+ lxvd2x 40, 0, 4
+ addi 4, 1, 312
+ lxvd2x 41, 0, 4
+ addi 4, 1, 328
+ lxvd2x 42, 0, 4
+ addi 4, 1, 344
+ lxvd2x 43, 0, 4
+ addi 4, 1, 360
+ lxvd2x 44, 0, 4
+ addi 4, 1, 376
+ lxvd2x 45, 0, 4
+ ld 0, 400(1)
+ mtlr 0
+ ld 2, 392(1)
+ ld 3, 32(1)
+ ld 4, 40(1)
+ ld 5, 48(1)
+ ld 6, 56(1)
+ ld 7, 64(1)
+ ld 8, 72(1)
+ ld 9, 80(1)
+ ld 10, 88(1)
+
+ addi 1, 1, 408
+ ld 0, 16(1)
+ blr
+
+ .globl __xray_FunctionExit
+ .p2align 4
+__xray_FunctionExit:
+ std 0, 16(1)
+ stdu 1, -256(1)
+# Spill r3-r4, f1-f8, and vsr34-vsr41, which are return registers.
+# If this appears to be slow, the caller needs to pass in number of generic,
+# floating point, and vector parameters, so that we only spill those live ones.
+ std 3, 32(1)
+ ld 3, 248(1) # FuncId
+ std 4, 40(1)
+ addi 4, 1, 48
+ stxsdx 1, 0, 4
+ addi 4, 1, 56
+ stxsdx 2, 0, 4
+ addi 4, 1, 64
+ stxsdx 3, 0, 4
+ addi 4, 1, 72
+ stxsdx 4, 0, 4
+ addi 4, 1, 80
+ stxsdx 5, 0, 4
+ addi 4, 1, 88
+ stxsdx 6, 0, 4
+ addi 4, 1, 96
+ stxsdx 7, 0, 4
+ addi 4, 1, 104
+ stxsdx 8, 0, 4
+ addi 4, 1, 112
+ stxvd2x 34, 0, 4
+ addi 4, 1, 128
+ stxvd2x 35, 0, 4
+ addi 4, 1, 144
+ stxvd2x 36, 0, 4
+ addi 4, 1, 160
+ stxvd2x 37, 0, 4
+ addi 4, 1, 176
+ stxvd2x 38, 0, 4
+ addi 4, 1, 192
+ stxvd2x 39, 0, 4
+ addi 4, 1, 208
+ stxvd2x 40, 0, 4
+ addi 4, 1, 224
+ stxvd2x 41, 0, 4
+ std 2, 240(1)
+ mflr 0
+ std 0, 248(1)
+
+ li 4, 1
+ bl _ZN6__xray23CallXRayPatchedFunctionEi13XRayEntryType
+ nop
+
+ addi 4, 1, 48
+ lxsdx 1, 0, 4
+ addi 4, 1, 56
+ lxsdx 2, 0, 4
+ addi 4, 1, 64
+ lxsdx 3, 0, 4
+ addi 4, 1, 72
+ lxsdx 4, 0, 4
+ addi 4, 1, 80
+ lxsdx 5, 0, 4
+ addi 4, 1, 88
+ lxsdx 6, 0, 4
+ addi 4, 1, 96
+ lxsdx 7, 0, 4
+ addi 4, 1, 104
+ lxsdx 8, 0, 4
+ addi 4, 1, 112
+ lxvd2x 34, 0, 4
+ addi 4, 1, 128
+ lxvd2x 35, 0, 4
+ addi 4, 1, 144
+ lxvd2x 36, 0, 4
+ addi 4, 1, 160
+ lxvd2x 37, 0, 4
+ addi 4, 1, 176
+ lxvd2x 38, 0, 4
+ addi 4, 1, 192
+ lxvd2x 39, 0, 4
+ addi 4, 1, 208
+ lxvd2x 40, 0, 4
+ addi 4, 1, 224
+ lxvd2x 41, 0, 4
+ ld 0, 248(1)
+ mtlr 0
+ ld 2, 240(1)
+ ld 3, 32(1)
+ ld 4, 40(1)
+
+ addi 1, 1, 256
+ ld 0, 16(1)
+ blr
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/xray/xray_trampoline_powerpc64_asm.S
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan_minimal/ubsan_minimal_handlers.cc
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan_minimal/ubsan_minimal_handlers.cc (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan_minimal/ubsan_minimal_handlers.cc (revision 351984)
@@ -0,0 +1,119 @@
+#include "sanitizer_common/sanitizer_atomic.h"
+
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+
+#ifdef KERNEL_USE
+extern "C" void ubsan_message(const char *msg);
+static void message(const char *msg) { ubsan_message(msg); }
+#else
+static void message(const char *msg) {
+ write(2, msg, strlen(msg));
+}
+#endif
+
+static const int kMaxCallerPcs = 20;
+static __sanitizer::atomic_uintptr_t caller_pcs[kMaxCallerPcs];
+// Number of elements in caller_pcs. A special value of kMaxCallerPcs + 1 means
+// that "too many errors" has already been reported.
+static __sanitizer::atomic_uint32_t caller_pcs_sz;
+
+__attribute__((noinline)) static bool report_this_error(void *caller_p) {
+ uintptr_t caller = reinterpret_cast<uintptr_t>(caller_p);
+ if (caller == 0) return false;
+ while (true) {
+ unsigned sz = __sanitizer::atomic_load_relaxed(&caller_pcs_sz);
+ if (sz > kMaxCallerPcs) return false; // early exit
+ // when sz==kMaxCallerPcs print "too many errors", but only when cmpxchg
+ // succeeds in order to not print it multiple times.
+ if (sz > 0 && sz < kMaxCallerPcs) {
+ uintptr_t p;
+ for (unsigned i = 0; i < sz; ++i) {
+ p = __sanitizer::atomic_load_relaxed(&caller_pcs[i]);
+ if (p == 0) break; // Concurrent update.
+ if (p == caller) return false;
+ }
+ if (p == 0) continue; // FIXME: yield?
+ }
+
+ if (!__sanitizer::atomic_compare_exchange_strong(
+ &caller_pcs_sz, &sz, sz + 1, __sanitizer::memory_order_seq_cst))
+ continue; // Concurrent update! Try again from the start.
+
+ if (sz == kMaxCallerPcs) {
+ message("ubsan: too many errors\n");
+ return false;
+ }
+ __sanitizer::atomic_store_relaxed(&caller_pcs[sz], caller);
+ return true;
+ }
+}
+
+#if defined(__ANDROID__)
+extern "C" __attribute__((weak)) void android_set_abort_message(const char *);
+static void abort_with_message(const char *msg) {
+ if (&android_set_abort_message) android_set_abort_message(msg);
+ abort();
+}
+#else
+static void abort_with_message(const char *) { abort(); }
+#endif
+
+#if SANITIZER_DEBUG
+namespace __sanitizer {
+// The DCHECK macro needs this symbol to be defined.
+void NORETURN CheckFailed(const char *file, int, const char *cond, u64, u64) {
+ message("Sanitizer CHECK failed: ");
+ message(file);
+ message(":?? : "); // FIXME: Show line number.
+ message(cond);
+ abort();
+}
+} // namespace __sanitizer
+#endif
+
+#define INTERFACE extern "C" __attribute__((visibility("default")))
+
+// FIXME: add caller pc to the error message (possibly as "ubsan: error-type
+// @1234ABCD").
+#define HANDLER_RECOVER(name, msg) \
+ INTERFACE void __ubsan_handle_##name##_minimal() { \
+ if (!report_this_error(__builtin_return_address(0))) return; \
+ message("ubsan: " msg "\n"); \
+ }
+
+#define HANDLER_NORECOVER(name, msg) \
+ INTERFACE void __ubsan_handle_##name##_minimal_abort() { \
+ message("ubsan: " msg "\n"); \
+ abort_with_message("ubsan: " msg); \
+ }
+
+#define HANDLER(name, msg) \
+ HANDLER_RECOVER(name, msg) \
+ HANDLER_NORECOVER(name, msg)
+
+HANDLER(type_mismatch, "type-mismatch")
+HANDLER(alignment_assumption, "alignment-assumption")
+HANDLER(add_overflow, "add-overflow")
+HANDLER(sub_overflow, "sub-overflow")
+HANDLER(mul_overflow, "mul-overflow")
+HANDLER(negate_overflow, "negate-overflow")
+HANDLER(divrem_overflow, "divrem-overflow")
+HANDLER(shift_out_of_bounds, "shift-out-of-bounds")
+HANDLER(out_of_bounds, "out-of-bounds")
+HANDLER_RECOVER(builtin_unreachable, "builtin-unreachable")
+HANDLER_RECOVER(missing_return, "missing-return")
+HANDLER(vla_bound_not_positive, "vla-bound-not-positive")
+HANDLER(float_cast_overflow, "float-cast-overflow")
+HANDLER(load_invalid_value, "load-invalid-value")
+HANDLER(invalid_builtin, "invalid-builtin")
+HANDLER(function_type_mismatch, "function-type-mismatch")
+HANDLER(implicit_conversion, "implicit-conversion")
+HANDLER(nonnull_arg, "nonnull-arg")
+HANDLER(nonnull_return, "nonnull-return")
+HANDLER(nullability_arg, "nullability-arg")
+HANDLER(nullability_return, "nullability-return")
+HANDLER(pointer_overflow, "pointer-overflow")
+HANDLER(cfi_check_fail, "cfi-check-fail")
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan_minimal/ubsan_minimal_handlers.cc
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan_minimal/ubsan.syms.extra
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan_minimal/ubsan.syms.extra (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/ubsan_minimal/ubsan.syms.extra (revision 351984)
@@ -0,0 +1 @@
+__ubsan_*
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/Block.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/Block.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/Block.h (revision 351984)
@@ -0,0 +1,59 @@
+/*
+ * Block.h
+ *
+ * Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
+ * to any person obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to permit
+ * persons to whom the Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _BLOCK_H_
+#define _BLOCK_H_
+
+#if !defined(BLOCK_EXPORT)
+# if defined(__cplusplus)
+# define BLOCK_EXPORT extern "C"
+# else
+# define BLOCK_EXPORT extern
+# endif
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+/* Create a heap based copy of a Block or simply add a reference to an existing one.
+ * This must be paired with Block_release to recover memory, even when running
+ * under Objective-C Garbage Collection.
+ */
+BLOCK_EXPORT void *_Block_copy(const void *aBlock);
+
+/* Lose the reference, and if heap based and last reference, recover the memory. */
+BLOCK_EXPORT void _Block_release(const void *aBlock);
+
+#if defined(__cplusplus)
+}
+#endif
+
+/* Type correct macros. */
+
+#define Block_copy(...) ((__typeof(__VA_ARGS__))_Block_copy((const void *)(__VA_ARGS__)))
+#define Block_release(...) _Block_release((const void *)(__VA_ARGS__))
+
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/Block.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/Block_private.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/Block_private.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/Block_private.h (revision 351984)
@@ -0,0 +1,179 @@
+/*
+ * Block_private.h
+ *
+ * Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
+ * to any person obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to permit
+ * persons to whom the Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#ifndef _BLOCK_PRIVATE_H_
+#define _BLOCK_PRIVATE_H_
+
+#if !defined(BLOCK_EXPORT)
+# if defined(__cplusplus)
+# define BLOCK_EXPORT extern "C"
+# else
+# define BLOCK_EXPORT extern
+# endif
+#endif
+
+#ifndef _MSC_VER
+#include <stdbool.h>
+#else
+/* MSVC doesn't have <stdbool.h>. Compensate. */
+typedef char bool;
+#define true (bool)1
+#define false (bool)0
+#endif
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+enum {
+ BLOCK_REFCOUNT_MASK = (0xffff),
+ BLOCK_NEEDS_FREE = (1 << 24),
+ BLOCK_HAS_COPY_DISPOSE = (1 << 25),
+ BLOCK_HAS_CTOR = (1 << 26), /* Helpers have C++ code. */
+ BLOCK_IS_GC = (1 << 27),
+ BLOCK_IS_GLOBAL = (1 << 28),
+ BLOCK_HAS_DESCRIPTOR = (1 << 29)
+};
+
+
+/* Revised new layout. */
+struct Block_descriptor {
+ unsigned long int reserved;
+ unsigned long int size;
+ void (*copy)(void *dst, void *src);
+ void (*dispose)(void *);
+};
+
+
+struct Block_layout {
+ void *isa;
+ int flags;
+ int reserved;
+ void (*invoke)(void *, ...);
+ struct Block_descriptor *descriptor;
+ /* Imported variables. */
+};
+
+
+struct Block_byref {
+ void *isa;
+ struct Block_byref *forwarding;
+ int flags; /* refcount; */
+ int size;
+ void (*byref_keep)(struct Block_byref *dst, struct Block_byref *src);
+ void (*byref_destroy)(struct Block_byref *);
+ /* long shared[0]; */
+};
+
+
+struct Block_byref_header {
+ void *isa;
+ struct Block_byref *forwarding;
+ int flags;
+ int size;
+};
+
+
+/* Runtime support functions used by compiler when generating copy/dispose helpers. */
+
+enum {
+ /* See function implementation for a more complete description of these fields and combinations */
+ BLOCK_FIELD_IS_OBJECT = 3, /* id, NSObject, __attribute__((NSObject)), block, ... */
+ BLOCK_FIELD_IS_BLOCK = 7, /* a block variable */
+ BLOCK_FIELD_IS_BYREF = 8, /* the on stack structure holding the __block variable */
+ BLOCK_FIELD_IS_WEAK = 16, /* declared __weak, only used in byref copy helpers */
+ BLOCK_BYREF_CALLER = 128 /* called from __block (byref) copy/dispose support routines. */
+};
+
+/* Runtime entry point called by compiler when assigning objects inside copy helper routines */
+BLOCK_EXPORT void _Block_object_assign(void *destAddr, const void *object, const int flags);
+ /* BLOCK_FIELD_IS_BYREF is only used from within block copy helpers */
+
+
+/* runtime entry point called by the compiler when disposing of objects inside dispose helper routine */
+BLOCK_EXPORT void _Block_object_dispose(const void *object, const int flags);
+
+
+
+/* Other support functions */
+
+/* Runtime entry to get total size of a closure */
+BLOCK_EXPORT unsigned long int Block_size(void *block_basic);
+
+
+
+/* the raw data space for runtime classes for blocks */
+/* class+meta used for stack, malloc, and collectable based blocks */
+BLOCK_EXPORT void * _NSConcreteStackBlock[32];
+BLOCK_EXPORT void * _NSConcreteMallocBlock[32];
+BLOCK_EXPORT void * _NSConcreteAutoBlock[32];
+BLOCK_EXPORT void * _NSConcreteFinalizingBlock[32];
+BLOCK_EXPORT void * _NSConcreteGlobalBlock[32];
+BLOCK_EXPORT void * _NSConcreteWeakBlockVariable[32];
+
+
+/* the intercept routines that must be used under GC */
+BLOCK_EXPORT void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
+ void (*setHasRefcount)(const void *, const bool),
+ void (*gc_assign_strong)(void *, void **),
+ void (*gc_assign_weak)(const void *, void *),
+ void (*gc_memmove)(void *, void *, unsigned long));
+
+/* earlier version, now simply transitional */
+BLOCK_EXPORT void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
+ void (*setHasRefcount)(const void *, const bool),
+ void (*gc_assign_strong)(void *, void **),
+ void (*gc_assign_weak)(const void *, void *));
+
+BLOCK_EXPORT void _Block_use_RR( void (*retain)(const void *),
+ void (*release)(const void *));
+
+/* make a collectable GC heap based Block. Not useful under non-GC. */
+BLOCK_EXPORT void *_Block_copy_collectable(const void *aBlock);
+
+/* thread-unsafe diagnostic */
+BLOCK_EXPORT const char *_Block_dump(const void *block);
+
+
+/* Obsolete */
+
+/* first layout */
+struct Block_basic {
+ void *isa;
+ int Block_flags; /* int32_t */
+ int Block_size; /* XXX should be packed into Block_flags */
+ void (*Block_invoke)(void *);
+ void (*Block_copy)(void *dst, void *src); /* iff BLOCK_HAS_COPY_DISPOSE */
+ void (*Block_dispose)(void *); /* iff BLOCK_HAS_COPY_DISPOSE */
+ /* long params[0]; // where const imports, __block storage references, etc. get laid down */
+};
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+
+#endif /* _BLOCK_PRIVATE_H_ */
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/Block_private.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/data.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/data.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/data.c (revision 351984)
@@ -0,0 +1,41 @@
+/*
+ * data.c
+ *
+ * Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
+ * to any person obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to permit
+ * persons to whom the Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+/********************
+NSBlock support
+
+We allocate space and export a symbol to be used as the Class for the on-stack and malloc'ed copies until ObjC arrives on the scene. These data areas are set up by Foundation to link in as real classes post facto.
+
+We keep these in a separate file so that we can include the runtime code in test subprojects but not include the data so that compiled code that sees the data in libSystem doesn't get confused by a second copy. Somehow these don't get unified in a common block.
+**********************/
+
+void * _NSConcreteStackBlock[32] = { 0 };
+void * _NSConcreteMallocBlock[32] = { 0 };
+void * _NSConcreteAutoBlock[32] = { 0 };
+void * _NSConcreteFinalizingBlock[32] = { 0 };
+void * _NSConcreteGlobalBlock[32] = { 0 };
+void * _NSConcreteWeakBlockVariable[32] = { 0 };
+
+void _Block_copy_error(void) {
+}
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/data.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/runtime.c
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/runtime.c (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/runtime.c (revision 351984)
@@ -0,0 +1,700 @@
+/*
+ * runtime.c
+ *
+ * Copyright 2008-2010 Apple, Inc. Permission is hereby granted, free of charge,
+ * to any person obtaining a copy of this software and associated documentation
+ * files (the "Software"), to deal in the Software without restriction,
+ * including without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to permit
+ * persons to whom the Software is furnished to do so, subject to the following
+ * conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ */
+
+#include "Block_private.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdint.h>
+
+#include "config.h"
+
+#ifdef HAVE_AVAILABILITY_MACROS_H
+#include <AvailabilityMacros.h>
+#endif /* HAVE_AVAILABILITY_MACROS_H */
+
+#ifdef HAVE_TARGET_CONDITIONALS_H
+#include <TargetConditionals.h>
+#endif /* HAVE_TARGET_CONDITIONALS_H */
+
+#if defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_INT) && defined(HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG)
+
+#ifdef HAVE_LIBKERN_OSATOMIC_H
+#include <libkern/OSAtomic.h>
+#endif /* HAVE_LIBKERN_OSATOMIC_H */
+
+#elif defined(__WIN32__) || defined(_WIN32)
+#define _CRT_SECURE_NO_WARNINGS 1
+#include <windows.h>
+
+static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
+ /* fixme barrier is overkill -- see objc-os.h */
+ long original = InterlockedCompareExchange(dst, newl, oldl);
+ return (original == oldl);
+}
+
+static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
+ /* fixme barrier is overkill -- see objc-os.h */
+ int original = InterlockedCompareExchange(dst, newi, oldi);
+ return (original == oldi);
+}
+
+/*
+ * Check to see if the GCC atomic built-ins are available. If we're on
+ * a 64-bit system, make sure we have an 8-byte atomic function
+ * available.
+ *
+ */
+
+#elif defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_INT) && defined(HAVE_SYNC_BOOL_COMPARE_AND_SWAP_LONG)
+
+static __inline bool OSAtomicCompareAndSwapLong(long oldl, long newl, long volatile *dst) {
+ return __sync_bool_compare_and_swap(dst, oldl, newl);
+}
+
+static __inline bool OSAtomicCompareAndSwapInt(int oldi, int newi, int volatile *dst) {
+ return __sync_bool_compare_and_swap(dst, oldi, newi);
+}
+
+#else
+#error unknown atomic compare-and-swap primitive
+#endif /* HAVE_OSATOMIC_COMPARE_AND_SWAP_INT && HAVE_OSATOMIC_COMPARE_AND_SWAP_LONG */
+
+
+/*
+ * Globals:
+ */
+
+static void *_Block_copy_class = _NSConcreteMallocBlock;
+static void *_Block_copy_finalizing_class = _NSConcreteMallocBlock;
+static int _Block_copy_flag = BLOCK_NEEDS_FREE;
+static int _Byref_flag_initial_value = BLOCK_NEEDS_FREE | 2;
+
+static const int WANTS_ONE = (1 << 16);
+
+static bool isGC = false;
+
+/*
+ * Internal Utilities:
+ */
+
+#if 0
+static unsigned long int latching_incr_long(unsigned long int *where) {
+ while (1) {
+ unsigned long int old_value = *(volatile unsigned long int *)where;
+ if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
+ return BLOCK_REFCOUNT_MASK;
+ }
+ if (OSAtomicCompareAndSwapLong(old_value, old_value+1, (volatile long int *)where)) {
+ return old_value+1;
+ }
+ }
+}
+#endif /* if 0 */
+
+static int latching_incr_int(int *where) {
+ while (1) {
+ int old_value = *(volatile int *)where;
+ if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
+ return BLOCK_REFCOUNT_MASK;
+ }
+ if (OSAtomicCompareAndSwapInt(old_value, old_value+1, (volatile int *)where)) {
+ return old_value+1;
+ }
+ }
+}
+
+#if 0
+static int latching_decr_long(unsigned long int *where) {
+ while (1) {
+ unsigned long int old_value = *(volatile int *)where;
+ if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
+ return BLOCK_REFCOUNT_MASK;
+ }
+ if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
+ return 0;
+ }
+ if (OSAtomicCompareAndSwapLong(old_value, old_value-1, (volatile long int *)where)) {
+ return old_value-1;
+ }
+ }
+}
+#endif /* if 0 */
+
+static int latching_decr_int(int *where) {
+ while (1) {
+ int old_value = *(volatile int *)where;
+ if ((old_value & BLOCK_REFCOUNT_MASK) == BLOCK_REFCOUNT_MASK) {
+ return BLOCK_REFCOUNT_MASK;
+ }
+ if ((old_value & BLOCK_REFCOUNT_MASK) == 0) {
+ return 0;
+ }
+ if (OSAtomicCompareAndSwapInt(old_value, old_value-1, (volatile int *)where)) {
+ return old_value-1;
+ }
+ }
+}
+
+
+/*
+ * GC support stub routines:
+ */
+#if 0
+#pragma mark GC Support Routines
+#endif /* if 0 */
+
+
+static void *_Block_alloc_default(const unsigned long size, const bool initialCountIsOne, const bool isObject) {
+ return malloc(size);
+}
+
+static void _Block_assign_default(void *value, void **destptr) {
+ *destptr = value;
+}
+
+static void _Block_setHasRefcount_default(const void *ptr, const bool hasRefcount) {
+}
+
+static void _Block_do_nothing(const void *aBlock) { }
+
+static void _Block_retain_object_default(const void *ptr) {
+ if (!ptr) return;
+}
+
+static void _Block_release_object_default(const void *ptr) {
+ if (!ptr) return;
+}
+
+static void _Block_assign_weak_default(const void *ptr, void *dest) {
+ *(void **)dest = (void *)ptr;
+}
+
+static void _Block_memmove_default(void *dst, void *src, unsigned long size) {
+ memmove(dst, src, (size_t)size);
+}
+
+static void _Block_memmove_gc_broken(void *dest, void *src, unsigned long size) {
+ void **destp = (void **)dest;
+ void **srcp = (void **)src;
+ while (size) {
+ _Block_assign_default(*srcp, destp);
+ destp++;
+ srcp++;
+ size -= sizeof(void *);
+ }
+}
+
+/*
+ * GC support callout functions - initially set to stub routines:
+ */
+
+static void *(*_Block_allocator)(const unsigned long, const bool isOne, const bool isObject) = _Block_alloc_default;
+static void (*_Block_deallocator)(const void *) = (void (*)(const void *))free;
+static void (*_Block_assign)(void *value, void **destptr) = _Block_assign_default;
+static void (*_Block_setHasRefcount)(const void *ptr, const bool hasRefcount) = _Block_setHasRefcount_default;
+static void (*_Block_retain_object)(const void *ptr) = _Block_retain_object_default;
+static void (*_Block_release_object)(const void *ptr) = _Block_release_object_default;
+static void (*_Block_assign_weak)(const void *dest, void *ptr) = _Block_assign_weak_default;
+static void (*_Block_memmove)(void *dest, void *src, unsigned long size) = _Block_memmove_default;
+
+
+/*
+ * GC support SPI functions - called from ObjC runtime and CoreFoundation:
+ */
+
+/* Public SPI
+ * Called from objc-auto to turn on GC.
+ * version 3, 4 arg, but changed 1st arg
+ */
+void _Block_use_GC( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
+ void (*setHasRefcount)(const void *, const bool),
+ void (*gc_assign)(void *, void **),
+ void (*gc_assign_weak)(const void *, void *),
+ void (*gc_memmove)(void *, void *, unsigned long)) {
+
+ isGC = true;
+ _Block_allocator = alloc;
+ _Block_deallocator = _Block_do_nothing;
+ _Block_assign = gc_assign;
+ _Block_copy_flag = BLOCK_IS_GC;
+ _Block_copy_class = _NSConcreteAutoBlock;
+ /* blocks with ctors & dtors need to have the dtor run from a class with a finalizer */
+ _Block_copy_finalizing_class = _NSConcreteFinalizingBlock;
+ _Block_setHasRefcount = setHasRefcount;
+ _Byref_flag_initial_value = BLOCK_IS_GC; // no refcount
+ _Block_retain_object = _Block_do_nothing;
+ _Block_release_object = _Block_do_nothing;
+ _Block_assign_weak = gc_assign_weak;
+ _Block_memmove = gc_memmove;
+}
+
+/* transitional */
+void _Block_use_GC5( void *(*alloc)(const unsigned long, const bool isOne, const bool isObject),
+ void (*setHasRefcount)(const void *, const bool),
+ void (*gc_assign)(void *, void **),
+ void (*gc_assign_weak)(const void *, void *)) {
+ /* until objc calls _Block_use_GC it will call us; supply a broken internal memmove implementation until then */
+ _Block_use_GC(alloc, setHasRefcount, gc_assign, gc_assign_weak, _Block_memmove_gc_broken);
+}
+
+
+/*
+ * Called from objc-auto to alternatively turn on retain/release.
+ * Prior to this the only "object" support we can provide is for those
+ * super special objects that live in libSystem, namely dispatch queues.
+ * Blocks and Block_byrefs have their own special entry points.
+ *
+ */
+void _Block_use_RR( void (*retain)(const void *),
+ void (*release)(const void *)) {
+ _Block_retain_object = retain;
+ _Block_release_object = release;
+}
+
+/*
+ * Internal Support routines for copying:
+ */
+
+#if 0
+#pragma mark Copy/Release support
+#endif /* if 0 */
+
+/* Copy, or bump refcount, of a block. If really copying, call the copy helper if present. */
+static void *_Block_copy_internal(const void *arg, const int flags) {
+ struct Block_layout *aBlock;
+ const bool wantsOne = (WANTS_ONE & flags) == WANTS_ONE;
+
+ //printf("_Block_copy_internal(%p, %x)\n", arg, flags);
+ if (!arg) return NULL;
+
+
+ // The following would be better done as a switch statement
+ aBlock = (struct Block_layout *)arg;
+ if (aBlock->flags & BLOCK_NEEDS_FREE) {
+ // latches on high
+ latching_incr_int(&aBlock->flags);
+ return aBlock;
+ }
+ else if (aBlock->flags & BLOCK_IS_GC) {
+ // GC refcounting is expensive so do most refcounting here.
+ if (wantsOne && ((latching_incr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK) == 1)) {
+ // Tell collector to hang on this - it will bump the GC refcount version
+ _Block_setHasRefcount(aBlock, true);
+ }
+ return aBlock;
+ }
+ else if (aBlock->flags & BLOCK_IS_GLOBAL) {
+ return aBlock;
+ }
+
+ // Its a stack block. Make a copy.
+ if (!isGC) {
+ struct Block_layout *result = malloc(aBlock->descriptor->size);
+ if (!result) return (void *)0;
+ memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
+ // reset refcount
+ result->flags &= ~(BLOCK_REFCOUNT_MASK); // XXX not needed
+ result->flags |= BLOCK_NEEDS_FREE | 1;
+ result->isa = _NSConcreteMallocBlock;
+ if (result->flags & BLOCK_HAS_COPY_DISPOSE) {
+ //printf("calling block copy helper %p(%p, %p)...\n", aBlock->descriptor->copy, result, aBlock);
+ (*aBlock->descriptor->copy)(result, aBlock); // do fixup
+ }
+ return result;
+ }
+ else {
+ // Under GC want allocation with refcount 1 so we ask for "true" if wantsOne
+ // This allows the copy helper routines to make non-refcounted block copies under GC
+ unsigned long int flags = aBlock->flags;
+ bool hasCTOR = (flags & BLOCK_HAS_CTOR) != 0;
+ struct Block_layout *result = _Block_allocator(aBlock->descriptor->size, wantsOne, hasCTOR);
+ if (!result) return (void *)0;
+ memmove(result, aBlock, aBlock->descriptor->size); // bitcopy first
+ // reset refcount
+ // if we copy a malloc block to a GC block then we need to clear NEEDS_FREE.
+ flags &= ~(BLOCK_NEEDS_FREE|BLOCK_REFCOUNT_MASK); // XXX not needed
+ if (wantsOne)
+ flags |= BLOCK_IS_GC | 1;
+ else
+ flags |= BLOCK_IS_GC;
+ result->flags = flags;
+ if (flags & BLOCK_HAS_COPY_DISPOSE) {
+ //printf("calling block copy helper...\n");
+ (*aBlock->descriptor->copy)(result, aBlock); // do fixup
+ }
+ if (hasCTOR) {
+ result->isa = _NSConcreteFinalizingBlock;
+ }
+ else {
+ result->isa = _NSConcreteAutoBlock;
+ }
+ return result;
+ }
+}
+
+
+/*
+ * Runtime entry points for maintaining the sharing knowledge of byref data blocks.
+ *
+ * A closure has been copied and its fixup routine is asking us to fix up the reference to the shared byref data
+ * Closures that aren't copied must still work, so everyone always accesses variables after dereferencing the forwarding ptr.
+ * We ask if the byref pointer that we know about has already been copied to the heap, and if so, increment it.
+ * Otherwise we need to copy it and update the stack forwarding pointer
+ * XXX We need to account for weak/nonretained read-write barriers.
+ */
+
+static void _Block_byref_assign_copy(void *dest, const void *arg, const int flags) {
+ struct Block_byref **destp = (struct Block_byref **)dest;
+ struct Block_byref *src = (struct Block_byref *)arg;
+
+ //printf("_Block_byref_assign_copy called, byref destp %p, src %p, flags %x\n", destp, src, flags);
+ //printf("src dump: %s\n", _Block_byref_dump(src));
+ if (src->forwarding->flags & BLOCK_IS_GC) {
+ ; // don't need to do any more work
+ }
+ else if ((src->forwarding->flags & BLOCK_REFCOUNT_MASK) == 0) {
+ //printf("making copy\n");
+ // src points to stack
+ bool isWeak = ((flags & (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK)) == (BLOCK_FIELD_IS_BYREF|BLOCK_FIELD_IS_WEAK));
+ // if its weak ask for an object (only matters under GC)
+ struct Block_byref *copy = (struct Block_byref *)_Block_allocator(src->size, false, isWeak);
+ copy->flags = src->flags | _Byref_flag_initial_value; // non-GC one for caller, one for stack
+ copy->forwarding = copy; // patch heap copy to point to itself (skip write-barrier)
+ src->forwarding = copy; // patch stack to point to heap copy
+ copy->size = src->size;
+ if (isWeak) {
+ copy->isa = &_NSConcreteWeakBlockVariable; // mark isa field so it gets weak scanning
+ }
+ if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
+ // Trust copy helper to copy everything of interest
+ // If more than one field shows up in a byref block this is wrong XXX
+ copy->byref_keep = src->byref_keep;
+ copy->byref_destroy = src->byref_destroy;
+ (*src->byref_keep)(copy, src);
+ }
+ else {
+ // just bits. Blast 'em using _Block_memmove in case they're __strong
+ _Block_memmove(
+ (void *)&copy->byref_keep,
+ (void *)&src->byref_keep,
+ src->size - sizeof(struct Block_byref_header));
+ }
+ }
+ // already copied to heap
+ else if ((src->forwarding->flags & BLOCK_NEEDS_FREE) == BLOCK_NEEDS_FREE) {
+ latching_incr_int(&src->forwarding->flags);
+ }
+ // assign byref data block pointer into new Block
+ _Block_assign(src->forwarding, (void **)destp);
+}
+
+// Old compiler SPI
+static void _Block_byref_release(const void *arg) {
+ struct Block_byref *shared_struct = (struct Block_byref *)arg;
+ int refcount;
+
+ // dereference the forwarding pointer since the compiler isn't doing this anymore (ever?)
+ shared_struct = shared_struct->forwarding;
+
+ //printf("_Block_byref_release %p called, flags are %x\n", shared_struct, shared_struct->flags);
+ // To support C++ destructors under GC we arrange for there to be a finalizer for this
+ // by using an isa that directs the code to a finalizer that calls the byref_destroy method.
+ if ((shared_struct->flags & BLOCK_NEEDS_FREE) == 0) {
+ return; // stack or GC or global
+ }
+ refcount = shared_struct->flags & BLOCK_REFCOUNT_MASK;
+ if (refcount <= 0) {
+ printf("_Block_byref_release: Block byref data structure at %p underflowed\n", arg);
+ }
+ else if ((latching_decr_int(&shared_struct->flags) & BLOCK_REFCOUNT_MASK) == 0) {
+ //printf("disposing of heap based byref block\n");
+ if (shared_struct->flags & BLOCK_HAS_COPY_DISPOSE) {
+ //printf("calling out to helper\n");
+ (*shared_struct->byref_destroy)(shared_struct);
+ }
+ _Block_deallocator((struct Block_layout *)shared_struct);
+ }
+}
+
+
+/*
+ *
+ * API supporting SPI
+ * _Block_copy, _Block_release, and (old) _Block_destroy
+ *
+ */
+
+#if 0
+#pragma mark SPI/API
+#endif /* if 0 */
+
+void *_Block_copy(const void *arg) {
+ return _Block_copy_internal(arg, WANTS_ONE);
+}
+
+
+// API entry point to release a copied Block
+void _Block_release(void *arg) {
+ struct Block_layout *aBlock = (struct Block_layout *)arg;
+ int32_t newCount;
+ if (!aBlock) return;
+ newCount = latching_decr_int(&aBlock->flags) & BLOCK_REFCOUNT_MASK;
+ if (newCount > 0) return;
+ // Hit zero
+ if (aBlock->flags & BLOCK_IS_GC) {
+ // Tell GC we no longer have our own refcounts. GC will decr its refcount
+ // and unless someone has done a CFRetain or marked it uncollectable it will
+ // now be subject to GC reclamation.
+ _Block_setHasRefcount(aBlock, false);
+ }
+ else if (aBlock->flags & BLOCK_NEEDS_FREE) {
+ if (aBlock->flags & BLOCK_HAS_COPY_DISPOSE)(*aBlock->descriptor->dispose)(aBlock);
+ _Block_deallocator(aBlock);
+ }
+ else if (aBlock->flags & BLOCK_IS_GLOBAL) {
+ ;
+ }
+ else {
+ printf("Block_release called upon a stack Block: %p, ignored\n", (void *)aBlock);
+ }
+}
+
+
+
+// Old Compiler SPI point to release a copied Block used by the compiler in dispose helpers
+static void _Block_destroy(const void *arg) {
+ struct Block_layout *aBlock;
+ if (!arg) return;
+ aBlock = (struct Block_layout *)arg;
+ if (aBlock->flags & BLOCK_IS_GC) {
+ // assert(aBlock->Block_flags & BLOCK_HAS_CTOR);
+ return; // ignore, we are being called because of a DTOR
+ }
+ _Block_release(aBlock);
+}
+
+
+
+/*
+ *
+ * SPI used by other layers
+ *
+ */
+
+// SPI, also internal. Called from NSAutoBlock only under GC
+void *_Block_copy_collectable(const void *aBlock) {
+ return _Block_copy_internal(aBlock, 0);
+}
+
+
+// SPI
+unsigned long int Block_size(void *arg) {
+ return ((struct Block_layout *)arg)->descriptor->size;
+}
+
+
+#if 0
+#pragma mark Compiler SPI entry points
+#endif /* if 0 */
+
+
+/*******************************************************
+
+Entry points used by the compiler - the real API!
+
+
+A Block can reference four different kinds of things that require help when the Block is copied to the heap.
+1) C++ stack based objects
+2) References to Objective-C objects
+3) Other Blocks
+4) __block variables
+
+In these cases helper functions are synthesized by the compiler for use in Block_copy and Block_release, called the copy and dispose helpers. The copy helper emits a call to the C++ const copy constructor for C++ stack based objects and for the rest calls into the runtime support function _Block_object_assign. The dispose helper has a call to the C++ destructor for case 1 and a call into _Block_object_dispose for the rest.
+
+The flags parameter of _Block_object_assign and _Block_object_dispose is set to
+ * BLOCK_FIELD_IS_OBJECT (3), for the case of an Objective-C Object,
+ * BLOCK_FIELD_IS_BLOCK (7), for the case of another Block, and
+ * BLOCK_FIELD_IS_BYREF (8), for the case of a __block variable.
+If the __block variable is marked weak the compiler also or's in BLOCK_FIELD_IS_WEAK (16).
+
+So the Block copy/dispose helpers should only ever generate the four flag values of 3, 7, 8, and 24.
+
+When a __block variable is either a C++ object, an Objective-C object, or another Block then the compiler also generates copy/dispose helper functions. Similarly to the Block copy helper, the "__block" copy helper (formerly and still a.k.a. "byref" copy helper) will do a C++ copy constructor (not a const one though!) and the dispose helper will do the destructor. And similarly the helpers will call into the same two support functions with the same values for objects and Blocks with the additional BLOCK_BYREF_CALLER (128) bit of information supplied.
+
+So the __block copy/dispose helpers will generate flag values of 3 or 7 for objects and Blocks respectively, with BLOCK_FIELD_IS_WEAK (16) or'ed as appropriate and always 128 or'd in, for the following set of possibilities:
+ __block id 128+3
+ __weak block id 128+3+16
+ __block (^Block) 128+7
+ __weak __block (^Block) 128+7+16
+
+The implementation of the two routines would be improved by switch statements enumerating the eight cases.
+
+********************************************************/
+
+/*
+ * When Blocks or Block_byrefs hold objects then their copy routine helpers use this entry point
+ * to do the assignment.
+ */
+void _Block_object_assign(void *destAddr, const void *object, const int flags) {
+ //printf("_Block_object_assign(*%p, %p, %x)\n", destAddr, object, flags);
+ if ((flags & BLOCK_BYREF_CALLER) == BLOCK_BYREF_CALLER) {
+ if ((flags & BLOCK_FIELD_IS_WEAK) == BLOCK_FIELD_IS_WEAK) {
+ _Block_assign_weak(object, destAddr);
+ }
+ else {
+ // do *not* retain or *copy* __block variables whatever they are
+ _Block_assign((void *)object, destAddr);
+ }
+ }
+ else if ((flags & BLOCK_FIELD_IS_BYREF) == BLOCK_FIELD_IS_BYREF) {
+ // copying a __block reference from the stack Block to the heap
+ // flags will indicate if it holds a __weak reference and needs a special isa
+ _Block_byref_assign_copy(destAddr, object, flags);
+ }
+ // (this test must be before next one)
+ else if ((flags & BLOCK_FIELD_IS_BLOCK) == BLOCK_FIELD_IS_BLOCK) {
+ // copying a Block declared variable from the stack Block to the heap
+ _Block_assign(_Block_copy_internal(object, flags), destAddr);
+ }
+ // (this test must be after previous one)
+ else if ((flags & BLOCK_FIELD_IS_OBJECT) == BLOCK_FIELD_IS_OBJECT) {
+ //printf("retaining object at %p\n", object);
+ _Block_retain_object(object);
+ //printf("done retaining object at %p\n", object);
+ _Block_assign((void *)object, destAddr);
+ }
+}
+
+// When Blocks or Block_byrefs hold objects their destroy helper routines call this entry point
+// to help dispose of the contents
+// Used initially only for __attribute__((NSObject)) marked pointers.
+void _Block_object_dispose(const void *object, const int flags) {
+ //printf("_Block_object_dispose(%p, %x)\n", object, flags);
+ if (flags & BLOCK_FIELD_IS_BYREF) {
+ // get rid of the __block data structure held in a Block
+ _Block_byref_release(object);
+ }
+ else if ((flags & (BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_BLOCK) {
+ // get rid of a referenced Block held by this Block
+ // (ignore __block Block variables, compiler doesn't need to call us)
+ _Block_destroy(object);
+ }
+ else if ((flags & (BLOCK_FIELD_IS_WEAK|BLOCK_FIELD_IS_BLOCK|BLOCK_BYREF_CALLER)) == BLOCK_FIELD_IS_OBJECT) {
+ // get rid of a referenced object held by this Block
+ // (ignore __block object variables, compiler doesn't need to call us)
+ _Block_release_object(object);
+ }
+}
+
+
+/*
+ * Debugging support:
+ */
+#if 0
+#pragma mark Debugging
+#endif /* if 0 */
+
+
+const char *_Block_dump(const void *block) {
+ struct Block_layout *closure = (struct Block_layout *)block;
+ static char buffer[512];
+ char *cp = buffer;
+ if (closure == NULL) {
+ sprintf(cp, "NULL passed to _Block_dump\n");
+ return buffer;
+ }
+ if (! (closure->flags & BLOCK_HAS_DESCRIPTOR)) {
+ printf("Block compiled by obsolete compiler, please recompile source for this Block\n");
+ exit(1);
+ }
+ cp += sprintf(cp, "^%p (new layout) =\n", (void *)closure);
+ if (closure->isa == NULL) {
+ cp += sprintf(cp, "isa: NULL\n");
+ }
+ else if (closure->isa == _NSConcreteStackBlock) {
+ cp += sprintf(cp, "isa: stack Block\n");
+ }
+ else if (closure->isa == _NSConcreteMallocBlock) {
+ cp += sprintf(cp, "isa: malloc heap Block\n");
+ }
+ else if (closure->isa == _NSConcreteAutoBlock) {
+ cp += sprintf(cp, "isa: GC heap Block\n");
+ }
+ else if (closure->isa == _NSConcreteGlobalBlock) {
+ cp += sprintf(cp, "isa: global Block\n");
+ }
+ else if (closure->isa == _NSConcreteFinalizingBlock) {
+ cp += sprintf(cp, "isa: finalizing Block\n");
+ }
+ else {
+ cp += sprintf(cp, "isa?: %p\n", (void *)closure->isa);
+ }
+ cp += sprintf(cp, "flags:");
+ if (closure->flags & BLOCK_HAS_DESCRIPTOR) {
+ cp += sprintf(cp, " HASDESCRIPTOR");
+ }
+ if (closure->flags & BLOCK_NEEDS_FREE) {
+ cp += sprintf(cp, " FREEME");
+ }
+ if (closure->flags & BLOCK_IS_GC) {
+ cp += sprintf(cp, " ISGC");
+ }
+ if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
+ cp += sprintf(cp, " HASHELP");
+ }
+ if (closure->flags & BLOCK_HAS_CTOR) {
+ cp += sprintf(cp, " HASCTOR");
+ }
+ cp += sprintf(cp, "\nrefcount: %u\n", closure->flags & BLOCK_REFCOUNT_MASK);
+ cp += sprintf(cp, "invoke: %p\n", (void *)(uintptr_t)closure->invoke);
+ {
+ struct Block_descriptor *dp = closure->descriptor;
+ cp += sprintf(cp, "descriptor: %p\n", (void *)dp);
+ cp += sprintf(cp, "descriptor->reserved: %lu\n", dp->reserved);
+ cp += sprintf(cp, "descriptor->size: %lu\n", dp->size);
+
+ if (closure->flags & BLOCK_HAS_COPY_DISPOSE) {
+ cp += sprintf(cp, "descriptor->copy helper: %p\n", (void *)(uintptr_t)dp->copy);
+ cp += sprintf(cp, "descriptor->dispose helper: %p\n", (void *)(uintptr_t)dp->dispose);
+ }
+ }
+ return buffer;
+}
+
+
+const char *_Block_byref_dump(struct Block_byref *src) {
+ static char buffer[256];
+ char *cp = buffer;
+ cp += sprintf(cp, "byref data block %p contents:\n", (void *)src);
+ cp += sprintf(cp, " forwarding: %p\n", (void *)src->forwarding);
+ cp += sprintf(cp, " flags: 0x%x\n", src->flags);
+ cp += sprintf(cp, " size: %d\n", src->size);
+ if (src->flags & BLOCK_HAS_COPY_DISPOSE) {
+ cp += sprintf(cp, " copy helper: %p\n", (void *)(uintptr_t)src->byref_keep);
+ cp += sprintf(cp, " dispose helper: %p\n", (void *)(uintptr_t)src->byref_destroy);
+ }
+ return buffer;
+}
+
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/lib/BlocksRuntime/runtime.c
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/LICENSE.TXT
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/LICENSE.TXT (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/LICENSE.TXT (revision 351984)
@@ -0,0 +1,311 @@
+==============================================================================
+The LLVM Project is under the Apache License v2.0 with LLVM Exceptions:
+==============================================================================
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+
+---- LLVM Exceptions to the Apache 2.0 License ----
+
+As an exception, if, as a result of your compiling your source code, portions
+of this Software are embedded into an Object form of such source code, you
+may redistribute such embedded portions in such Object form without complying
+with the conditions of Sections 4(a), 4(b) and 4(d) of the License.
+
+In addition, if you combine or link compiled forms of this Software with
+software that is licensed under the GPLv2 ("Combined Software") and if a
+court of competent jurisdiction determines that the patent provision (Section
+3), the indemnity provision (Section 9) or other Section of the License
+conflicts with the conditions of the GPLv2, you may retroactively and
+prospectively choose to deem waived or otherwise exclude such Section(s) of
+the License, but only in their entirety and only with respect to the Combined
+Software.
+
+==============================================================================
+Software from third parties included in the LLVM Project:
+==============================================================================
+The LLVM Project contains third party software which is under different license
+terms. All such code will be identified clearly using at least one of two
+mechanisms:
+1) It will be in a separate directory tree with its own `LICENSE.txt` or
+ `LICENSE` file at the top containing the specific license and restrictions
+ which apply to that software, or
+2) It will contain specific license and restriction terms at the top of every
+ file.
+
+==============================================================================
+Legacy LLVM License (https://llvm.org/docs/DeveloperPolicy.html#legacy):
+==============================================================================
+
+The compiler_rt library is dual licensed under both the University of Illinois
+"BSD-Like" license and the MIT license. As a user of this code you may choose
+to use it under either license. As a contributor, you agree to allow your code
+to be used under both.
+
+Full text of the relevant licenses is included below.
+
+==============================================================================
+
+University of Illinois/NCSA
+Open Source License
+
+Copyright (c) 2009-2019 by the contributors listed in CREDITS.TXT
+
+All rights reserved.
+
+Developed by:
+
+ LLVM Team
+
+ University of Illinois at Urbana-Champaign
+
+ http://llvm.org
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal with
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimers.
+
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimers in the
+ documentation and/or other materials provided with the distribution.
+
+ * Neither the names of the LLVM Team, University of Illinois at
+ Urbana-Champaign, nor the names of its contributors may be used to
+ endorse or promote products derived from this Software without specific
+ prior written permission.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE
+SOFTWARE.
+
+==============================================================================
+
+Copyright (c) 2009-2015 by the contributors listed in CREDITS.TXT
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/allocator_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/allocator_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/allocator_interface.h (revision 351984)
@@ -0,0 +1,88 @@
+//===-- allocator_interface.h ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Public interface header for allocator used in sanitizers (ASan/TSan/MSan).
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ALLOCATOR_INTERFACE_H
+#define SANITIZER_ALLOCATOR_INTERFACE_H
+
+#include <stddef.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* Returns the estimated number of bytes that will be reserved by allocator
+ for request of "size" bytes. If allocator can't allocate that much
+ memory, returns the maximal possible allocation size, otherwise returns
+ "size". */
+ size_t __sanitizer_get_estimated_allocated_size(size_t size);
+
+ /* Returns true if p was returned by the allocator and
+ is not yet freed. */
+ int __sanitizer_get_ownership(const volatile void *p);
+
+ /* Returns the number of bytes reserved for the pointer p.
+ Requires (get_ownership(p) == true) or (p == 0). */
+ size_t __sanitizer_get_allocated_size(const volatile void *p);
+
+ /* Number of bytes, allocated and not yet freed by the application. */
+ size_t __sanitizer_get_current_allocated_bytes(void);
+
+ /* Number of bytes, mmaped by the allocator to fulfill allocation requests.
+ Generally, for request of X bytes, allocator can reserve and add to free
+ lists a large number of chunks of size X to use them for future requests.
+ All these chunks count toward the heap size. Currently, allocator never
+ releases memory to OS (instead, it just puts freed chunks to free
+ lists). */
+ size_t __sanitizer_get_heap_size(void);
+
+ /* Number of bytes, mmaped by the allocator, which can be used to fulfill
+ allocation requests. When a user program frees memory chunk, it can first
+ fall into quarantine and will count toward __sanitizer_get_free_bytes()
+ later. */
+ size_t __sanitizer_get_free_bytes(void);
+
+ /* Number of bytes in unmapped pages, that are released to OS. Currently,
+ always returns 0. */
+ size_t __sanitizer_get_unmapped_bytes(void);
+
+ /* Malloc hooks that may be optionally provided by user.
+ __sanitizer_malloc_hook(ptr, size) is called immediately after
+ allocation of "size" bytes, which returned "ptr".
+ __sanitizer_free_hook(ptr) is called immediately before
+ deallocation of "ptr". */
+ void __sanitizer_malloc_hook(const volatile void *ptr, size_t size);
+ void __sanitizer_free_hook(const volatile void *ptr);
+
+ /* Installs a pair of hooks for malloc/free.
+ Several (currently, 5) hook pairs may be installed, they are executed
+ in the order they were installed and after calling
+ __sanitizer_malloc_hook/__sanitizer_free_hook.
+ Unlike __sanitizer_malloc_hook/__sanitizer_free_hook these hooks can be
+ chained and do not rely on weak symbols working on the platform, but
+ require __sanitizer_install_malloc_and_free_hooks to be called at startup
+ and thus will not be called on malloc/free very early in the process.
+ Returns the number of hooks currently installed or 0 on failure.
+ Not thread-safe, should be called in the main thread before starting
+ other threads.
+ */
+ int __sanitizer_install_malloc_and_free_hooks(
+ void (*malloc_hook)(const volatile void *, size_t),
+ void (*free_hook)(const volatile void *));
+
+ /* Drains allocator quarantines (calling thread's and global ones), returns
+ freed memory back to OS and releases other non-essential internal allocator
+ resources in attempt to reduce process RSS.
+ Currently available with ASan only.
+ */
+ void __sanitizer_purge_allocator(void);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/allocator_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/asan_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/asan_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/asan_interface.h (revision 351984)
@@ -0,0 +1,322 @@
+//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of AddressSanitizer (ASan).
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_ASAN_INTERFACE_H
+#define SANITIZER_ASAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+/// Marks a memory region (<c>[addr, addr+size)</c>) as unaddressable.
+///
+/// This memory must be previously allocated by your program. Instrumented
+/// code is forbidden from accessing addresses in this region until it is
+/// unpoisoned. This function is not guaranteed to poison the entire region -
+/// it could poison only a subregion of <c>[addr, addr+size)</c> due to ASan
+/// alignment restrictions.
+///
+/// \note This function is not thread-safe because no two threads can poison or
+/// unpoison memory in the same memory region simultaneously.
+///
+/// \param addr Start of memory region.
+/// \param size Size of memory region.
+void __asan_poison_memory_region(void const volatile *addr, size_t size);
+
+/// Marks a memory region (<c>[addr, addr+size)</c>) as addressable.
+///
+/// This memory must be previously allocated by your program. Accessing
+/// addresses in this region is allowed until this region is poisoned again.
+/// This function could unpoison a super-region of <c>[addr, addr+size)</c> due
+/// to ASan alignment restrictions.
+///
+/// \note This function is not thread-safe because no two threads can
+/// poison or unpoison memory in the same memory region simultaneously.
+///
+/// \param addr Start of memory region.
+/// \param size Size of memory region.
+void __asan_unpoison_memory_region(void const volatile *addr, size_t size);
+
+// Macros provided for convenience.
+#if __has_feature(address_sanitizer) || defined(__SANITIZE_ADDRESS__)
+/// Marks a memory region as unaddressable.
+///
+/// \note Macro provided for convenience; defined as a no-op if ASan is not
+/// enabled.
+///
+/// \param addr Start of memory region.
+/// \param size Size of memory region.
+#define ASAN_POISON_MEMORY_REGION(addr, size) \
+ __asan_poison_memory_region((addr), (size))
+
+/// Marks a memory region as addressable.
+///
+/// \note Macro provided for convenience; defined as a no-op if ASan is not
+/// enabled.
+///
+/// \param addr Start of memory region.
+/// \param size Size of memory region.
+#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
+ __asan_unpoison_memory_region((addr), (size))
+#else
+#define ASAN_POISON_MEMORY_REGION(addr, size) \
+ ((void)(addr), (void)(size))
+#define ASAN_UNPOISON_MEMORY_REGION(addr, size) \
+ ((void)(addr), (void)(size))
+#endif
+
+/// Checks if an address is poisoned.
+///
+/// Returns 1 if <c><i>addr</i></c> is poisoned (that is, 1-byte read/write
+/// access to this address would result in an error report from ASan).
+/// Otherwise returns 0.
+///
+/// \param addr Address to check.
+///
+/// \retval 1 Address is poisoned.
+/// \retval 0 Address is not poisoned.
+int __asan_address_is_poisoned(void const volatile *addr);
+
+/// Checks if a region is poisoned.
+///
+/// If at least one byte in <c>[beg, beg+size)</c> is poisoned, returns the
+/// address of the first such byte. Otherwise returns 0.
+///
+/// \param beg Start of memory region.
+/// \param size Start of memory region.
+/// \returns Address of first poisoned byte.
+void *__asan_region_is_poisoned(void *beg, size_t size);
+
+/// Describes an address (useful for calling from the debugger).
+///
+/// Prints the description of <c><i>addr</i></c>.
+///
+/// \param addr Address to describe.
+void __asan_describe_address(void *addr);
+
+/// Checks if an error has been or is being reported (useful for calling from
+/// the debugger to get information about an ASan error).
+///
+/// Returns 1 if an error has been (or is being) reported. Otherwise returns 0.
+///
+/// \returns 1 if an error has been (or is being) reported. Otherwise returns
+/// 0.
+int __asan_report_present(void);
+
+/// Gets the PC (program counter) register value of an ASan error (useful for
+/// calling from the debugger).
+///
+/// Returns PC if an error has been (or is being) reported.
+/// Otherwise returns 0.
+///
+/// \returns PC value.
+void *__asan_get_report_pc(void);
+
+/// Gets the BP (base pointer) register value of an ASan error (useful for
+/// calling from the debugger).
+///
+/// Returns BP if an error has been (or is being) reported.
+/// Otherwise returns 0.
+///
+/// \returns BP value.
+void *__asan_get_report_bp(void);
+
+/// Gets the SP (stack pointer) register value of an ASan error (useful for
+/// calling from the debugger).
+///
+/// If an error has been (or is being) reported, returns SP.
+/// Otherwise returns 0.
+///
+/// \returns SP value.
+void *__asan_get_report_sp(void);
+
+/// Gets the address of the report buffer of an ASan error (useful for calling
+/// from the debugger).
+///
+/// Returns the address of the report buffer if an error has been (or is being)
+/// reported. Otherwise returns 0.
+///
+/// \returns Address of report buffer.
+void *__asan_get_report_address(void);
+
+/// Gets access type of an ASan error (useful for calling from the debugger).
+///
+/// Returns access type (read or write) if an error has been (or is being)
+/// reported. Otherwise returns 0.
+///
+/// \returns Access type (0 = read, 1 = write).
+int __asan_get_report_access_type(void);
+
+/// Gets access size of an ASan error (useful for calling from the debugger).
+///
+/// Returns access size if an error has been (or is being) reported. Otherwise
+/// returns 0.
+///
+/// \returns Access size in bytes.
+size_t __asan_get_report_access_size(void);
+
+/// Gets the bug description of an ASan error (useful for calling from a
+/// debugger).
+///
+/// \returns Returns a bug description if an error has been (or is being)
+/// reported - for example, "heap-use-after-free". Otherwise returns an empty
+/// string.
+const char *__asan_get_report_description(void);
+
+/// Gets information about a pointer (useful for calling from the debugger).
+///
+/// Returns the category of the given pointer as a constant string.
+/// Possible return values are <c>global</c>, <c>stack</c>, <c>stack-fake</c>,
+/// <c>heap</c>, <c>heap-invalid</c>, <c>shadow-low</c>, <c>shadow-gap</c>,
+/// <c>shadow-high</c>, and <c>unknown</c>.
+///
+/// If the return value is <c>global</c> or <c>stack</c>, tries to also return
+/// the variable name, address, and size. If the return value is <c>heap</c>,
+/// tries to return the chunk address and size. <c><i>name</i></c> should point
+/// to an allocated buffer of size <c><i>name_size</i></c>.
+///
+/// \param addr Address to locate.
+/// \param name Buffer to store the variable's name.
+/// \param name_size Size in bytes of the variable's name buffer.
+/// \param region_address [out] Address of the region.
+/// \param region_size [out] Size of the region in bytes.
+///
+/// \returns Returns the category of the given pointer as a constant string.
+const char *__asan_locate_address(void *addr, char *name, size_t name_size,
+ void **region_address, size_t *region_size);
+
+/// Gets the allocation stack trace and thread ID for a heap address (useful
+/// for calling from the debugger).
+///
+/// Stores up to <c><i>size</i></c> frames in <c><i>trace</i></c>. Returns
+/// the number of stored frames or 0 on error.
+///
+/// \param addr A heap address.
+/// \param trace A buffer to store the stack trace.
+/// \param size Size in bytes of the trace buffer.
+/// \param thread_id [out] The thread ID of the address.
+///
+/// \returns Returns the number of stored frames or 0 on error.
+size_t __asan_get_alloc_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+/// Gets the free stack trace and thread ID for a heap address (useful for
+/// calling from the debugger).
+///
+/// Stores up to <c><i>size</i></c> frames in <c><i>trace</i></c>. Returns
+/// the number of stored frames or 0 on error.
+///
+/// \param addr A heap address.
+/// \param trace A buffer to store the stack trace.
+/// \param size Size in bytes of the trace buffer.
+/// \param thread_id [out] The thread ID of the address.
+///
+/// \returns Returns the number of stored frames or 0 on error.
+size_t __asan_get_free_stack(void *addr, void **trace, size_t size,
+ int *thread_id);
+
+/// Gets the current shadow memory mapping (useful for calling from the
+/// debugger).
+///
+/// \param shadow_scale [out] Shadow scale value.
+/// \param shadow_offset [out] Offset value.
+void __asan_get_shadow_mapping(size_t *shadow_scale, size_t *shadow_offset);
+
+/// This is an internal function that is called to report an error. However,
+/// it is still a part of the interface because you might want to set a
+/// breakpoint on this function in the debugger.
+///
+/// \param pc <c><i>pc</i></c> value of the ASan error.
+/// \param bp <c><i>bp</i></c> value of the ASan error.
+/// \param sp <c><i>sp</i></c> value of the ASan error.
+/// \param addr Address of the ASan error.
+/// \param is_write True if the error is a write error; false otherwise.
+/// \param access_size Size of the memory access of the ASan error.
+void __asan_report_error(void *pc, void *bp, void *sp,
+ void *addr, int is_write, size_t access_size);
+
+// Deprecated. Call __sanitizer_set_death_callback instead.
+void __asan_set_death_callback(void (*callback)(void));
+
+/// Sets the callback function to be called during ASan error reporting.
+///
+/// The callback provides a string pointer to the report.
+///
+/// \param callback User-provided function.
+void __asan_set_error_report_callback(void (*callback)(const char *));
+
+/// User-provided callback on ASan errors.
+///
+/// You can provide a function that would be called immediately when ASan
+/// detects an error. This is useful in cases when ASan detects an error but
+/// your program crashes before the ASan report is printed.
+void __asan_on_error(void);
+
+/// Prints accumulated statistics to <c>stderr</c> (useful for calling from the
+/// debugger).
+void __asan_print_accumulated_stats(void);
+
+/// User-provided default option settings.
+///
+/// You can provide your own implementation of this function to return a string
+/// containing ASan runtime options (for example,
+/// <c>verbosity=1:halt_on_error=0</c>).
+///
+/// \returns Default options string.
+const char* __asan_default_options(void);
+
+// The following two functions facilitate garbage collection in presence of
+// ASan's fake stack.
+
+/// Gets an opaque handler to the current thread's fake stack.
+///
+/// Returns an opaque handler to be used by
+/// <c>__asan_addr_is_in_fake_stack()</c>. Returns NULL if the current thread
+/// does not have a fake stack.
+///
+/// \returns An opaque handler to the fake stack or NULL.
+void *__asan_get_current_fake_stack(void);
+
+/// Checks if an address belongs to a given fake stack.
+///
+/// If <c><i>fake_stack</i></c> is non-NULL and <c><i>addr</i></c> belongs to a
+/// fake frame in <c><i>fake_stack</i></c>, returns the address of the real
+/// stack that corresponds to the fake frame and sets <c><i>beg</i></c> and
+/// <c><i>end</i></c> to the boundaries of this fake frame. Otherwise returns
+/// NULL and does not touch <c><i>beg</i></c> and <c><i>end</i></c>.
+///
+/// If <c><i>beg</i></c> or <c><i>end</i></c> are NULL, they are not touched.
+///
+/// \note This function can be called from a thread other than the owner of
+/// <c><i>fake_stack</i></c>, but the owner thread needs to be alive.
+///
+/// \param fake_stack An opaque handler to a fake stack.
+/// \param addr Address to test.
+/// \param beg [out] Beginning of fake frame.
+/// \param end [out] End of fake frame.
+/// \returns Stack address or NULL.
+void *__asan_addr_is_in_fake_stack(void *fake_stack, void *addr, void **beg,
+ void **end);
+
+/// Performs shadow memory cleanup of the current thread's stack before a
+/// function marked with the <c>[[noreturn]]</c> attribute is called.
+///
+/// To avoid false positives on the stack, must be called before no-return
+/// functions like <c>_exit()</c> and <c>execl()</c>.
+void __asan_handle_no_return(void);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_ASAN_INTERFACE_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/common_interface_defs.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/common_interface_defs.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/common_interface_defs.h (revision 351984)
@@ -0,0 +1,354 @@
+//===-- sanitizer/common_interface_defs.h -----------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Common part of the public sanitizer interface.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_COMMON_INTERFACE_DEFS_H
+#define SANITIZER_COMMON_INTERFACE_DEFS_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+// GCC does not understand __has_feature.
+#if !defined(__has_feature)
+#define __has_feature(x) 0
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+// Arguments for __sanitizer_sandbox_on_notify() below.
+typedef struct {
+ // Enable sandbox support in sanitizer coverage.
+ int coverage_sandboxed;
+ // File descriptor to write coverage data to. If -1 is passed, a file will
+ // be pre-opened by __sanitizer_sandobx_on_notify(). This field has no
+ // effect if coverage_sandboxed == 0.
+ intptr_t coverage_fd;
+ // If non-zero, split the coverage data into well-formed blocks. This is
+ // useful when coverage_fd is a socket descriptor. Each block will contain
+ // a header, allowing data from multiple processes to be sent over the same
+ // socket.
+ unsigned int coverage_max_block_size;
+} __sanitizer_sandbox_arguments;
+
+// Tell the tools to write their reports to "path.<pid>" instead of stderr.
+void __sanitizer_set_report_path(const char *path);
+// Tell the tools to write their reports to the provided file descriptor
+// (casted to void *).
+void __sanitizer_set_report_fd(void *fd);
+
+// Notify the tools that the sandbox is going to be turned on. The reserved
+// parameter will be used in the future to hold a structure with functions
+// that the tools may call to bypass the sandbox.
+void __sanitizer_sandbox_on_notify(__sanitizer_sandbox_arguments *args);
+
+// This function is called by the tool when it has just finished reporting
+// an error. 'error_summary' is a one-line string that summarizes
+// the error message. This function can be overridden by the client.
+void __sanitizer_report_error_summary(const char *error_summary);
+
+// Some of the sanitizers (for example ASan/TSan) could miss bugs that happen
+// in unaligned loads/stores. To find such bugs reliably, you need to replace
+// plain unaligned loads/stores with these calls.
+
+/// Loads a 16-bit unaligned value.
+///
+/// \param p Pointer to unaligned memory.
+///
+/// \returns Loaded value.
+uint16_t __sanitizer_unaligned_load16(const void *p);
+
+/// Loads a 32-bit unaligned value.
+///
+/// \param p Pointer to unaligned memory.
+///
+/// \returns Loaded value.
+uint32_t __sanitizer_unaligned_load32(const void *p);
+
+/// Loads a 64-bit unaligned value.
+///
+/// \param p Pointer to unaligned memory.
+///
+/// \returns Loaded value.
+uint64_t __sanitizer_unaligned_load64(const void *p);
+
+/// Stores a 16-bit unaligned value.
+///
+/// \param p Pointer to unaligned memory.
+/// \param x 16-bit value to store.
+void __sanitizer_unaligned_store16(void *p, uint16_t x);
+
+/// Stores a 32-bit unaligned value.
+///
+/// \param p Pointer to unaligned memory.
+/// \param x 32-bit value to store.
+void __sanitizer_unaligned_store32(void *p, uint32_t x);
+
+/// Stores a 64-bit unaligned value.
+///
+/// \param p Pointer to unaligned memory.
+/// \param x 64-bit value to store.
+void __sanitizer_unaligned_store64(void *p, uint64_t x);
+
+// Returns 1 on the first call, then returns 0 thereafter. Called by the tool
+// to ensure only one report is printed when multiple errors occur
+// simultaneously.
+int __sanitizer_acquire_crash_state();
+
+/// Annotates the current state of a contiguous container, such as
+/// <c>std::vector</c>, <c>std::string</c>, or similar.
+///
+/// A contiguous container is a container that keeps all of its elements
+/// in a contiguous region of memory. The container owns the region of memory
+/// <c>[beg, end)</c>; the memory <c>[beg, mid)</c> is used to store the
+/// current elements, and the memory <c>[mid, end)</c> is reserved for future
+/// elements (<c>beg <= mid <= end</c>). For example, in
+/// <c>std::vector<> v</c>:
+///
+/// \code
+/// beg = &v[0];
+/// end = beg + v.capacity() * sizeof(v[0]);
+/// mid = beg + v.size() * sizeof(v[0]);
+/// \endcode
+///
+/// This annotation tells the Sanitizer tool about the current state of the
+/// container so that the tool can report errors when memory from
+/// <c>[mid, end)</c> is accessed. Insert this annotation into methods like
+/// <c>push_back()</c> or <c>pop_back()</c>. Supply the old and new values of
+/// <c>mid</c>(<c><i>old_mid</i></c> and <c><i>new_mid</i></c>). In the initial
+/// state <c>mid == end</c>, so that should be the final state when the
+/// container is destroyed or when the container reallocates the storage.
+///
+/// For ASan, <c><i>beg</i></c> should be 8-aligned and <c><i>end</i></c>
+/// should be either 8-aligned or it should point to the end of a separate
+/// heap-, stack-, or global-allocated buffer. So the following example will
+/// not work:
+///
+/// \code
+/// int64_t x[2]; // 16 bytes, 8-aligned
+/// char *beg = (char *)&x[0];
+/// char *end = beg + 12; // Not 8-aligned, not the end of the buffer
+/// \endcode
+///
+/// The following, however, will work:
+/// \code
+/// int32_t x[3]; // 12 bytes, but 8-aligned under ASan.
+/// char *beg = (char*)&x[0];
+/// char *end = beg + 12; // Not 8-aligned, but is the end of the buffer
+/// \endcode
+///
+/// \note Use this function with caution and do not use for anything other
+/// than vector-like classes.
+///
+/// \param beg Beginning of memory region.
+/// \param end End of memory region.
+/// \param old_mid Old middle of memory region.
+/// \param new_mid New middle of memory region.
+void __sanitizer_annotate_contiguous_container(const void *beg,
+ const void *end,
+ const void *old_mid,
+ const void *new_mid);
+
+/// Returns true if the contiguous container <c>[beg, end)</c> is properly
+/// poisoned.
+///
+/// Proper poisoning could occur, for example, with
+/// <c>__sanitizer_annotate_contiguous_container</c>), that is, if
+/// <c>[beg, mid)</c> is addressable and <c>[mid, end)</c> is unaddressable.
+/// Full verification requires O (<c>end - beg</c>) time; this function tries
+/// to avoid such complexity by touching only parts of the container around
+/// <c><i>beg</i></c>, <c><i>mid</i></c>, and <c><i>end</i></c>.
+///
+/// \param beg Beginning of memory region.
+/// \param mid Middle of memory region.
+/// \param end Old end of memory region.
+///
+/// \returns True if the contiguous container <c>[beg, end)</c> is properly
+/// poisoned.
+int __sanitizer_verify_contiguous_container(const void *beg, const void *mid,
+ const void *end);
+
+/// Similar to <c>__sanitizer_verify_contiguous_container()</c> but also
+/// returns the address of the first improperly poisoned byte.
+///
+/// Returns NULL if the area is poisoned properly.
+///
+/// \param beg Beginning of memory region.
+/// \param mid Middle of memory region.
+/// \param end Old end of memory region.
+///
+/// \returns The bad address or NULL.
+const void *__sanitizer_contiguous_container_find_bad_address(const void *beg,
+ const void *mid,
+ const void *end);
+
+/// Prints the stack trace leading to this call (useful for calling from the
+/// debugger).
+void __sanitizer_print_stack_trace(void);
+
+// Symbolizes the supplied 'pc' using the format string 'fmt'.
+// Outputs at most 'out_buf_size' bytes into 'out_buf'.
+// If 'out_buf' is not empty then output is zero or more non empty C strings
+// followed by single empty C string. Multiple strings can be returned if PC
+// corresponds to inlined function. Inlined frames are printed in the order
+// from "most-inlined" to the "least-inlined", so the last frame should be the
+// not inlined function.
+// Inlined frames can be removed with 'symbolize_inline_frames=0'.
+// The format syntax is described in
+// lib/sanitizer_common/sanitizer_stacktrace_printer.h.
+void __sanitizer_symbolize_pc(void *pc, const char *fmt, char *out_buf,
+ size_t out_buf_size);
+// Same as __sanitizer_symbolize_pc, but for data section (i.e. globals).
+void __sanitizer_symbolize_global(void *data_ptr, const char *fmt,
+ char *out_buf, size_t out_buf_size);
+
+/// Sets the callback to be called immediately before death on error.
+///
+/// Passing 0 will unset the callback.
+///
+/// \param callback User-provided callback.
+void __sanitizer_set_death_callback(void (*callback)(void));
+
+
+// Interceptor hooks.
+// Whenever a libc function interceptor is called, it checks if the
+// corresponding weak hook is defined, and calls it if it is indeed defined.
+// The primary use-case is data-flow-guided fuzzing, where the fuzzer needs
+// to know what is being passed to libc functions (for example memcmp).
+// FIXME: implement more hooks.
+
+/// Interceptor hook for <c>memcmp()</c>.
+///
+/// \param called_pc PC (program counter) address of the original call.
+/// \param s1 Pointer to block of memory.
+/// \param s2 Pointer to block of memory.
+/// \param n Number of bytes to compare.
+/// \param result Value returned by the intercepted function.
+void __sanitizer_weak_hook_memcmp(void *called_pc, const void *s1,
+ const void *s2, size_t n, int result);
+
+/// Interceptor hook for <c>strncmp()</c>.
+///
+/// \param called_pc PC (program counter) address of the original call.
+/// \param s1 Pointer to block of memory.
+/// \param s2 Pointer to block of memory.
+/// \param n Number of bytes to compare.
+/// \param result Value returned by the intercepted function.
+void __sanitizer_weak_hook_strncmp(void *called_pc, const char *s1,
+ const char *s2, size_t n, int result);
+
+/// Interceptor hook for <c>strncasecmp()</c>.
+///
+/// \param called_pc PC (program counter) address of the original call.
+/// \param s1 Pointer to block of memory.
+/// \param s2 Pointer to block of memory.
+/// \param n Number of bytes to compare.
+/// \param result Value returned by the intercepted function.
+void __sanitizer_weak_hook_strncasecmp(void *called_pc, const char *s1,
+ const char *s2, size_t n, int result);
+
+/// Interceptor hook for <c>strcmp()</c>.
+///
+/// \param called_pc PC (program counter) address of the original call.
+/// \param s1 Pointer to block of memory.
+/// \param s2 Pointer to block of memory.
+/// \param result Value returned by the intercepted function.
+void __sanitizer_weak_hook_strcmp(void *called_pc, const char *s1,
+ const char *s2, int result);
+
+/// Interceptor hook for <c>strcasecmp()</c>.
+///
+/// \param called_pc PC (program counter) address of the original call.
+/// \param s1 Pointer to block of memory.
+/// \param s2 Pointer to block of memory.
+/// \param result Value returned by the intercepted function.
+void __sanitizer_weak_hook_strcasecmp(void *called_pc, const char *s1,
+ const char *s2, int result);
+
+/// Interceptor hook for <c>strstr()</c>.
+///
+/// \param called_pc PC (program counter) address of the original call.
+/// \param s1 Pointer to block of memory.
+/// \param s2 Pointer to block of memory.
+/// \param result Value returned by the intercepted function.
+void __sanitizer_weak_hook_strstr(void *called_pc, const char *s1,
+ const char *s2, char *result);
+
+void __sanitizer_weak_hook_strcasestr(void *called_pc, const char *s1,
+ const char *s2, char *result);
+
+void __sanitizer_weak_hook_memmem(void *called_pc,
+ const void *s1, size_t len1,
+ const void *s2, size_t len2, void *result);
+
+// Prints stack traces for all live heap allocations ordered by total
+// allocation size until top_percent of total live heap is shown. top_percent
+// should be between 1 and 100. At most max_number_of_contexts contexts
+// (stack traces) are printed.
+// Experimental feature currently available only with ASan on Linux/x86_64.
+void __sanitizer_print_memory_profile(size_t top_percent,
+ size_t max_number_of_contexts);
+
+/// Notify ASan that a fiber switch has started (required only if implementing
+/// your own fiber library).
+///
+/// Before switching to a different stack, you must call
+/// <c>__sanitizer_start_switch_fiber()</c> with a pointer to the bottom of the
+/// destination stack and with its size. When code starts running on the new
+/// stack, it must call <c>__sanitizer_finish_switch_fiber()</c> to finalize
+/// the switch. The <c>__sanitizer_start_switch_fiber()</c> function takes a
+/// <c>void**</c> pointer argument to store the current fake stack if there is
+/// one (it is necessary when the runtime option
+/// <c>detect_stack_use_after_return</c> is enabled).
+///
+/// When restoring a stack, this <c>void**</c> pointer must be given to the
+/// <c>__sanitizer_finish_switch_fiber()</c> function. In most cases, this
+/// pointer can be stored on the stack immediately before switching. When
+/// leaving a fiber definitely, NULL must be passed as the first argument to
+/// the <c>__sanitizer_start_switch_fiber()</c> function so that the fake stack
+/// is destroyed. If your program does not need stack use-after-return
+/// detection, you can always pass NULL to these two functions.
+///
+/// \note The fake stack mechanism is disabled during fiber switch, so if a
+/// signal callback runs during the switch, it will not benefit from stack
+/// use-after-return detection.
+///
+/// \param fake_stack_save [out] Fake stack save location.
+/// \param bottom Bottom address of stack.
+/// \param size Size of stack in bytes.
+void __sanitizer_start_switch_fiber(void **fake_stack_save,
+ const void *bottom, size_t size);
+
+/// Notify ASan that a fiber switch has completed (required only if
+/// implementing your own fiber library).
+///
+/// When code starts running on the new stack, it must call
+/// <c>__sanitizer_finish_switch_fiber()</c> to finalize
+/// the switch. For usage details, see the description of
+/// <c>__sanitizer_start_switch_fiber()</c>.
+///
+/// \param fake_stack_save Fake stack save location.
+/// \param bottom_old [out] Bottom address of old stack.
+/// \param size_old [out] Size of old stack in bytes.
+void __sanitizer_finish_switch_fiber(void *fake_stack_save,
+ const void **bottom_old,
+ size_t *size_old);
+
+// Get full module name and calculate pc offset within it.
+// Returns 1 if pc belongs to some module, 0 if module was not found.
+int __sanitizer_get_module_and_offset_for_pc(void *pc, char *module_path,
+ size_t module_path_len,
+ void **pc_offset);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_COMMON_INTERFACE_DEFS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/coverage_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/coverage_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/coverage_interface.h (revision 351984)
@@ -0,0 +1,35 @@
+//===-- sanitizer/coverage_interface.h --------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Public interface for sanitizer coverage.
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_COVERAG_INTERFACE_H
+#define SANITIZER_COVERAG_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ // Record and dump coverage info.
+ void __sanitizer_cov_dump(void);
+
+ // Clear collected coverage info.
+ void __sanitizer_cov_reset(void);
+
+ // Dump collected coverage info. Sorts pcs by module into individual .sancov
+ // files.
+ void __sanitizer_dump_coverage(const uintptr_t *pcs, uintptr_t len);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_COVERAG_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/coverage_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/dfsan_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/dfsan_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/dfsan_interface.h (revision 351984)
@@ -0,0 +1,121 @@
+//===-- dfsan_interface.h -------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of DataFlowSanitizer.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef DFSAN_INTERFACE_H
+#define DFSAN_INTERFACE_H
+
+#include <stddef.h>
+#include <stdint.h>
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef uint16_t dfsan_label;
+
+/// Stores information associated with a specific label identifier. A label
+/// may be a base label created using dfsan_create_label, with associated
+/// text description and user data, or an automatically created union label,
+/// which represents the union of two label identifiers (which may themselves
+/// be base or union labels).
+struct dfsan_label_info {
+ // Fields for union labels, set to 0 for base labels.
+ dfsan_label l1;
+ dfsan_label l2;
+
+ // Fields for base labels.
+ const char *desc;
+ void *userdata;
+};
+
+/// Signature of the callback argument to dfsan_set_write_callback().
+typedef void (*dfsan_write_callback_t)(int fd, const void *buf, size_t count);
+
+/// Computes the union of \c l1 and \c l2, possibly creating a union label in
+/// the process.
+dfsan_label dfsan_union(dfsan_label l1, dfsan_label l2);
+
+/// Creates and returns a base label with the given description and user data.
+dfsan_label dfsan_create_label(const char *desc, void *userdata);
+
+/// Sets the label for each address in [addr,addr+size) to \c label.
+void dfsan_set_label(dfsan_label label, void *addr, size_t size);
+
+/// Sets the label for each address in [addr,addr+size) to the union of the
+/// current label for that address and \c label.
+void dfsan_add_label(dfsan_label label, void *addr, size_t size);
+
+/// Retrieves the label associated with the given data.
+///
+/// The type of 'data' is arbitrary. The function accepts a value of any type,
+/// which can be truncated or extended (implicitly or explicitly) as necessary.
+/// The truncation/extension operations will preserve the label of the original
+/// value.
+dfsan_label dfsan_get_label(long data);
+
+/// Retrieves the label associated with the data at the given address.
+dfsan_label dfsan_read_label(const void *addr, size_t size);
+
+/// Retrieves a pointer to the dfsan_label_info struct for the given label.
+const struct dfsan_label_info *dfsan_get_label_info(dfsan_label label);
+
+/// Returns whether the given label label contains the label elem.
+int dfsan_has_label(dfsan_label label, dfsan_label elem);
+
+/// If the given label label contains a label with the description desc, returns
+/// that label, else returns 0.
+dfsan_label dfsan_has_label_with_desc(dfsan_label label, const char *desc);
+
+/// Returns the number of labels allocated.
+size_t dfsan_get_label_count(void);
+
+/// Flushes the DFSan shadow, i.e. forgets about all labels currently associated
+/// with the application memory. Will work only if there are no other
+/// threads executing DFSan-instrumented code concurrently.
+/// Use this call to start over the taint tracking within the same procces.
+void dfsan_flush(void);
+
+/// Sets a callback to be invoked on calls to write(). The callback is invoked
+/// before the write is done. The write is not guaranteed to succeed when the
+/// callback executes. Pass in NULL to remove any callback.
+void dfsan_set_write_callback(dfsan_write_callback_t labeled_write_callback);
+
+/// Writes the labels currently used by the program to the given file
+/// descriptor. The lines of the output have the following format:
+///
+/// <label> <parent label 1> <parent label 2> <label description if any>
+void dfsan_dump_labels(int fd);
+
+/// Interceptor hooks.
+/// Whenever a dfsan's custom function is called the corresponding
+/// hook is called it non-zero. The hooks should be defined by the user.
+/// The primary use case is taint-guided fuzzing, where the fuzzer
+/// needs to see the parameters of the function and the labels.
+/// FIXME: implement more hooks.
+void dfsan_weak_hook_memcmp(void *caller_pc, const void *s1, const void *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label);
+void dfsan_weak_hook_strncmp(void *caller_pc, const char *s1, const char *s2,
+ size_t n, dfsan_label s1_label,
+ dfsan_label s2_label, dfsan_label n_label);
+#ifdef __cplusplus
+} // extern "C"
+
+template <typename T>
+void dfsan_set_label(dfsan_label label, T &data) { // NOLINT
+ dfsan_set_label(label, (void *)&data, sizeof(T));
+}
+
+#endif
+
+#endif // DFSAN_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/dfsan_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/hwasan_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/hwasan_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/hwasan_interface.h (revision 351984)
@@ -0,0 +1,96 @@
+//===-- sanitizer/asan_interface.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of HWAddressSanitizer.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_HWASAN_INTERFACE_H
+#define SANITIZER_HWASAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ // Libc hook for program startup in statically linked executables.
+ // Initializes enough of the runtime to run instrumented code. This function
+ // should only be called in statically linked executables because it modifies
+ // the GOT, which won't work in regular binaries because RELRO will already
+ // have been applied by the time the function is called. This also means that
+ // the function should be called before libc applies RELRO.
+ // Does not call libc unless there is an error.
+ // Can be called multiple times.
+ void __hwasan_init_static(void);
+
+ // This function may be optionally provided by user and should return
+ // a string containing HWASan runtime options. See asan_flags.h for details.
+ const char* __hwasan_default_options(void);
+
+ void __hwasan_enable_allocator_tagging(void);
+ void __hwasan_disable_allocator_tagging(void);
+
+ // Mark region of memory with the given tag. Both address and size need to be
+ // 16-byte aligned.
+ void __hwasan_tag_memory(const volatile void *p, unsigned char tag,
+ size_t size);
+
+ /// Set pointer tag. Previous tag is lost.
+ void *__hwasan_tag_pointer(const volatile void *p, unsigned char tag);
+
+ // Set memory tag from the current SP address to the given address to zero.
+ // This is meant to annotate longjmp and other non-local jumps.
+ // This function needs to know the (almost) exact destination frame address;
+ // clearing shadow for the entire thread stack like __asan_handle_no_return
+ // does would cause false reports.
+ void __hwasan_handle_longjmp(const void *sp_dst);
+
+ // Set memory tag for the part of the current thread stack below sp_dst to
+ // zero. Call this in vfork() before returning in the parent process.
+ void __hwasan_handle_vfork(const void *sp_dst);
+
+ // Libc hook for thread creation. Should be called in the child thread before
+ // any instrumented code.
+ void __hwasan_thread_enter();
+
+ // Libc hook for thread destruction. No instrumented code should run after
+ // this call.
+ void __hwasan_thread_exit();
+
+ // Print shadow and origin for the memory range to stderr in a human-readable
+ // format.
+ void __hwasan_print_shadow(const volatile void *x, size_t size);
+
+ // Print one-line report about the memory usage of the current process.
+ void __hwasan_print_memory_usage();
+
+ /* Returns the offset of the first byte in the memory range that can not be
+ * accessed through the pointer in x, or -1 if the whole range is good. */
+ intptr_t __hwasan_test_shadow(const volatile void *x, size_t size);
+
+ int __sanitizer_posix_memalign(void **memptr, size_t alignment, size_t size);
+ void * __sanitizer_memalign(size_t alignment, size_t size);
+ void * __sanitizer_aligned_alloc(size_t alignment, size_t size);
+ void * __sanitizer___libc_memalign(size_t alignment, size_t size);
+ void * __sanitizer_valloc(size_t size);
+ void * __sanitizer_pvalloc(size_t size);
+ void __sanitizer_free(void *ptr);
+ void __sanitizer_cfree(void *ptr);
+ size_t __sanitizer_malloc_usable_size(const void *ptr);
+ struct mallinfo __sanitizer_mallinfo();
+ int __sanitizer_mallopt(int cmd, int value);
+ void __sanitizer_malloc_stats(void);
+ void * __sanitizer_calloc(size_t nmemb, size_t size);
+ void * __sanitizer_realloc(void *ptr, size_t size);
+ void * __sanitizer_reallocarray(void *ptr, size_t nmemb, size_t size);
+ void * __sanitizer_malloc(size_t size);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_HWASAN_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/hwasan_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/linux_syscall_hooks.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/linux_syscall_hooks.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/linux_syscall_hooks.h (revision 351984)
@@ -0,0 +1,3082 @@
+//===-- linux_syscall_hooks.h ---------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of public sanitizer interface.
+//
+// System call handlers.
+//
+// Interface methods declared in this header implement pre- and post- syscall
+// actions for the active sanitizer.
+// Usage:
+// __sanitizer_syscall_pre_getfoo(...args...);
+// long res = syscall(__NR_getfoo, ...args...);
+// __sanitizer_syscall_post_getfoo(res, ...args...);
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LINUX_SYSCALL_HOOKS_H
+#define SANITIZER_LINUX_SYSCALL_HOOKS_H
+
+#define __sanitizer_syscall_pre_time(tloc) \
+ __sanitizer_syscall_pre_impl_time((long)(tloc))
+#define __sanitizer_syscall_post_time(res, tloc) \
+ __sanitizer_syscall_post_impl_time(res, (long)(tloc))
+#define __sanitizer_syscall_pre_stime(tptr) \
+ __sanitizer_syscall_pre_impl_stime((long)(tptr))
+#define __sanitizer_syscall_post_stime(res, tptr) \
+ __sanitizer_syscall_post_impl_stime(res, (long)(tptr))
+#define __sanitizer_syscall_pre_gettimeofday(tv, tz) \
+ __sanitizer_syscall_pre_impl_gettimeofday((long)(tv), (long)(tz))
+#define __sanitizer_syscall_post_gettimeofday(res, tv, tz) \
+ __sanitizer_syscall_post_impl_gettimeofday(res, (long)(tv), (long)(tz))
+#define __sanitizer_syscall_pre_settimeofday(tv, tz) \
+ __sanitizer_syscall_pre_impl_settimeofday((long)(tv), (long)(tz))
+#define __sanitizer_syscall_post_settimeofday(res, tv, tz) \
+ __sanitizer_syscall_post_impl_settimeofday(res, (long)(tv), (long)(tz))
+#define __sanitizer_syscall_pre_adjtimex(txc_p) \
+ __sanitizer_syscall_pre_impl_adjtimex((long)(txc_p))
+#define __sanitizer_syscall_post_adjtimex(res, txc_p) \
+ __sanitizer_syscall_post_impl_adjtimex(res, (long)(txc_p))
+#define __sanitizer_syscall_pre_times(tbuf) \
+ __sanitizer_syscall_pre_impl_times((long)(tbuf))
+#define __sanitizer_syscall_post_times(res, tbuf) \
+ __sanitizer_syscall_post_impl_times(res, (long)(tbuf))
+#define __sanitizer_syscall_pre_gettid() __sanitizer_syscall_pre_impl_gettid()
+#define __sanitizer_syscall_post_gettid(res) \
+ __sanitizer_syscall_post_impl_gettid(res)
+#define __sanitizer_syscall_pre_nanosleep(rqtp, rmtp) \
+ __sanitizer_syscall_pre_impl_nanosleep((long)(rqtp), (long)(rmtp))
+#define __sanitizer_syscall_post_nanosleep(res, rqtp, rmtp) \
+ __sanitizer_syscall_post_impl_nanosleep(res, (long)(rqtp), (long)(rmtp))
+#define __sanitizer_syscall_pre_alarm(seconds) \
+ __sanitizer_syscall_pre_impl_alarm((long)(seconds))
+#define __sanitizer_syscall_post_alarm(res, seconds) \
+ __sanitizer_syscall_post_impl_alarm(res, (long)(seconds))
+#define __sanitizer_syscall_pre_getpid() __sanitizer_syscall_pre_impl_getpid()
+#define __sanitizer_syscall_post_getpid(res) \
+ __sanitizer_syscall_post_impl_getpid(res)
+#define __sanitizer_syscall_pre_getppid() __sanitizer_syscall_pre_impl_getppid()
+#define __sanitizer_syscall_post_getppid(res) \
+ __sanitizer_syscall_post_impl_getppid(res)
+#define __sanitizer_syscall_pre_getuid() __sanitizer_syscall_pre_impl_getuid()
+#define __sanitizer_syscall_post_getuid(res) \
+ __sanitizer_syscall_post_impl_getuid(res)
+#define __sanitizer_syscall_pre_geteuid() __sanitizer_syscall_pre_impl_geteuid()
+#define __sanitizer_syscall_post_geteuid(res) \
+ __sanitizer_syscall_post_impl_geteuid(res)
+#define __sanitizer_syscall_pre_getgid() __sanitizer_syscall_pre_impl_getgid()
+#define __sanitizer_syscall_post_getgid(res) \
+ __sanitizer_syscall_post_impl_getgid(res)
+#define __sanitizer_syscall_pre_getegid() __sanitizer_syscall_pre_impl_getegid()
+#define __sanitizer_syscall_post_getegid(res) \
+ __sanitizer_syscall_post_impl_getegid(res)
+#define __sanitizer_syscall_pre_getresuid(ruid, euid, suid) \
+ __sanitizer_syscall_pre_impl_getresuid((long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_post_getresuid(res, ruid, euid, suid) \
+ __sanitizer_syscall_post_impl_getresuid(res, (long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_pre_getresgid(rgid, egid, sgid) \
+ __sanitizer_syscall_pre_impl_getresgid((long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_post_getresgid(res, rgid, egid, sgid) \
+ __sanitizer_syscall_post_impl_getresgid(res, (long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_pre_getpgid(pid) \
+ __sanitizer_syscall_pre_impl_getpgid((long)(pid))
+#define __sanitizer_syscall_post_getpgid(res, pid) \
+ __sanitizer_syscall_post_impl_getpgid(res, (long)(pid))
+#define __sanitizer_syscall_pre_getpgrp() __sanitizer_syscall_pre_impl_getpgrp()
+#define __sanitizer_syscall_post_getpgrp(res) \
+ __sanitizer_syscall_post_impl_getpgrp(res)
+#define __sanitizer_syscall_pre_getsid(pid) \
+ __sanitizer_syscall_pre_impl_getsid((long)(pid))
+#define __sanitizer_syscall_post_getsid(res, pid) \
+ __sanitizer_syscall_post_impl_getsid(res, (long)(pid))
+#define __sanitizer_syscall_pre_getgroups(gidsetsize, grouplist) \
+ __sanitizer_syscall_pre_impl_getgroups((long)(gidsetsize), (long)(grouplist))
+#define __sanitizer_syscall_post_getgroups(res, gidsetsize, grouplist) \
+ __sanitizer_syscall_post_impl_getgroups(res, (long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_pre_setregid(rgid, egid) \
+ __sanitizer_syscall_pre_impl_setregid((long)(rgid), (long)(egid))
+#define __sanitizer_syscall_post_setregid(res, rgid, egid) \
+ __sanitizer_syscall_post_impl_setregid(res, (long)(rgid), (long)(egid))
+#define __sanitizer_syscall_pre_setgid(gid) \
+ __sanitizer_syscall_pre_impl_setgid((long)(gid))
+#define __sanitizer_syscall_post_setgid(res, gid) \
+ __sanitizer_syscall_post_impl_setgid(res, (long)(gid))
+#define __sanitizer_syscall_pre_setreuid(ruid, euid) \
+ __sanitizer_syscall_pre_impl_setreuid((long)(ruid), (long)(euid))
+#define __sanitizer_syscall_post_setreuid(res, ruid, euid) \
+ __sanitizer_syscall_post_impl_setreuid(res, (long)(ruid), (long)(euid))
+#define __sanitizer_syscall_pre_setuid(uid) \
+ __sanitizer_syscall_pre_impl_setuid((long)(uid))
+#define __sanitizer_syscall_post_setuid(res, uid) \
+ __sanitizer_syscall_post_impl_setuid(res, (long)(uid))
+#define __sanitizer_syscall_pre_setresuid(ruid, euid, suid) \
+ __sanitizer_syscall_pre_impl_setresuid((long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_post_setresuid(res, ruid, euid, suid) \
+ __sanitizer_syscall_post_impl_setresuid(res, (long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_pre_setresgid(rgid, egid, sgid) \
+ __sanitizer_syscall_pre_impl_setresgid((long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_post_setresgid(res, rgid, egid, sgid) \
+ __sanitizer_syscall_post_impl_setresgid(res, (long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_pre_setfsuid(uid) \
+ __sanitizer_syscall_pre_impl_setfsuid((long)(uid))
+#define __sanitizer_syscall_post_setfsuid(res, uid) \
+ __sanitizer_syscall_post_impl_setfsuid(res, (long)(uid))
+#define __sanitizer_syscall_pre_setfsgid(gid) \
+ __sanitizer_syscall_pre_impl_setfsgid((long)(gid))
+#define __sanitizer_syscall_post_setfsgid(res, gid) \
+ __sanitizer_syscall_post_impl_setfsgid(res, (long)(gid))
+#define __sanitizer_syscall_pre_setpgid(pid, pgid) \
+ __sanitizer_syscall_pre_impl_setpgid((long)(pid), (long)(pgid))
+#define __sanitizer_syscall_post_setpgid(res, pid, pgid) \
+ __sanitizer_syscall_post_impl_setpgid(res, (long)(pid), (long)(pgid))
+#define __sanitizer_syscall_pre_setsid() __sanitizer_syscall_pre_impl_setsid()
+#define __sanitizer_syscall_post_setsid(res) \
+ __sanitizer_syscall_post_impl_setsid(res)
+#define __sanitizer_syscall_pre_setgroups(gidsetsize, grouplist) \
+ __sanitizer_syscall_pre_impl_setgroups((long)(gidsetsize), (long)(grouplist))
+#define __sanitizer_syscall_post_setgroups(res, gidsetsize, grouplist) \
+ __sanitizer_syscall_post_impl_setgroups(res, (long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_pre_acct(name) \
+ __sanitizer_syscall_pre_impl_acct((long)(name))
+#define __sanitizer_syscall_post_acct(res, name) \
+ __sanitizer_syscall_post_impl_acct(res, (long)(name))
+#define __sanitizer_syscall_pre_capget(header, dataptr) \
+ __sanitizer_syscall_pre_impl_capget((long)(header), (long)(dataptr))
+#define __sanitizer_syscall_post_capget(res, header, dataptr) \
+ __sanitizer_syscall_post_impl_capget(res, (long)(header), (long)(dataptr))
+#define __sanitizer_syscall_pre_capset(header, data) \
+ __sanitizer_syscall_pre_impl_capset((long)(header), (long)(data))
+#define __sanitizer_syscall_post_capset(res, header, data) \
+ __sanitizer_syscall_post_impl_capset(res, (long)(header), (long)(data))
+#define __sanitizer_syscall_pre_personality(personality) \
+ __sanitizer_syscall_pre_impl_personality((long)(personality))
+#define __sanitizer_syscall_post_personality(res, personality) \
+ __sanitizer_syscall_post_impl_personality(res, (long)(personality))
+#define __sanitizer_syscall_pre_sigpending(set) \
+ __sanitizer_syscall_pre_impl_sigpending((long)(set))
+#define __sanitizer_syscall_post_sigpending(res, set) \
+ __sanitizer_syscall_post_impl_sigpending(res, (long)(set))
+#define __sanitizer_syscall_pre_sigprocmask(how, set, oset) \
+ __sanitizer_syscall_pre_impl_sigprocmask((long)(how), (long)(set), \
+ (long)(oset))
+#define __sanitizer_syscall_post_sigprocmask(res, how, set, oset) \
+ __sanitizer_syscall_post_impl_sigprocmask(res, (long)(how), (long)(set), \
+ (long)(oset))
+#define __sanitizer_syscall_pre_getitimer(which, value) \
+ __sanitizer_syscall_pre_impl_getitimer((long)(which), (long)(value))
+#define __sanitizer_syscall_post_getitimer(res, which, value) \
+ __sanitizer_syscall_post_impl_getitimer(res, (long)(which), (long)(value))
+#define __sanitizer_syscall_pre_setitimer(which, value, ovalue) \
+ __sanitizer_syscall_pre_impl_setitimer((long)(which), (long)(value), \
+ (long)(ovalue))
+#define __sanitizer_syscall_post_setitimer(res, which, value, ovalue) \
+ __sanitizer_syscall_post_impl_setitimer(res, (long)(which), (long)(value), \
+ (long)(ovalue))
+#define __sanitizer_syscall_pre_timer_create(which_clock, timer_event_spec, \
+ created_timer_id) \
+ __sanitizer_syscall_pre_impl_timer_create( \
+ (long)(which_clock), (long)(timer_event_spec), (long)(created_timer_id))
+#define __sanitizer_syscall_post_timer_create( \
+ res, which_clock, timer_event_spec, created_timer_id) \
+ __sanitizer_syscall_post_impl_timer_create(res, (long)(which_clock), \
+ (long)(timer_event_spec), \
+ (long)(created_timer_id))
+#define __sanitizer_syscall_pre_timer_gettime(timer_id, setting) \
+ __sanitizer_syscall_pre_impl_timer_gettime((long)(timer_id), (long)(setting))
+#define __sanitizer_syscall_post_timer_gettime(res, timer_id, setting) \
+ __sanitizer_syscall_post_impl_timer_gettime(res, (long)(timer_id), \
+ (long)(setting))
+#define __sanitizer_syscall_pre_timer_getoverrun(timer_id) \
+ __sanitizer_syscall_pre_impl_timer_getoverrun((long)(timer_id))
+#define __sanitizer_syscall_post_timer_getoverrun(res, timer_id) \
+ __sanitizer_syscall_post_impl_timer_getoverrun(res, (long)(timer_id))
+#define __sanitizer_syscall_pre_timer_settime(timer_id, flags, new_setting, \
+ old_setting) \
+ __sanitizer_syscall_pre_impl_timer_settime((long)(timer_id), (long)(flags), \
+ (long)(new_setting), \
+ (long)(old_setting))
+#define __sanitizer_syscall_post_timer_settime(res, timer_id, flags, \
+ new_setting, old_setting) \
+ __sanitizer_syscall_post_impl_timer_settime( \
+ res, (long)(timer_id), (long)(flags), (long)(new_setting), \
+ (long)(old_setting))
+#define __sanitizer_syscall_pre_timer_delete(timer_id) \
+ __sanitizer_syscall_pre_impl_timer_delete((long)(timer_id))
+#define __sanitizer_syscall_post_timer_delete(res, timer_id) \
+ __sanitizer_syscall_post_impl_timer_delete(res, (long)(timer_id))
+#define __sanitizer_syscall_pre_clock_settime(which_clock, tp) \
+ __sanitizer_syscall_pre_impl_clock_settime((long)(which_clock), (long)(tp))
+#define __sanitizer_syscall_post_clock_settime(res, which_clock, tp) \
+ __sanitizer_syscall_post_impl_clock_settime(res, (long)(which_clock), \
+ (long)(tp))
+#define __sanitizer_syscall_pre_clock_gettime(which_clock, tp) \
+ __sanitizer_syscall_pre_impl_clock_gettime((long)(which_clock), (long)(tp))
+#define __sanitizer_syscall_post_clock_gettime(res, which_clock, tp) \
+ __sanitizer_syscall_post_impl_clock_gettime(res, (long)(which_clock), \
+ (long)(tp))
+#define __sanitizer_syscall_pre_clock_adjtime(which_clock, tx) \
+ __sanitizer_syscall_pre_impl_clock_adjtime((long)(which_clock), (long)(tx))
+#define __sanitizer_syscall_post_clock_adjtime(res, which_clock, tx) \
+ __sanitizer_syscall_post_impl_clock_adjtime(res, (long)(which_clock), \
+ (long)(tx))
+#define __sanitizer_syscall_pre_clock_getres(which_clock, tp) \
+ __sanitizer_syscall_pre_impl_clock_getres((long)(which_clock), (long)(tp))
+#define __sanitizer_syscall_post_clock_getres(res, which_clock, tp) \
+ __sanitizer_syscall_post_impl_clock_getres(res, (long)(which_clock), \
+ (long)(tp))
+#define __sanitizer_syscall_pre_clock_nanosleep(which_clock, flags, rqtp, \
+ rmtp) \
+ __sanitizer_syscall_pre_impl_clock_nanosleep( \
+ (long)(which_clock), (long)(flags), (long)(rqtp), (long)(rmtp))
+#define __sanitizer_syscall_post_clock_nanosleep(res, which_clock, flags, \
+ rqtp, rmtp) \
+ __sanitizer_syscall_post_impl_clock_nanosleep( \
+ res, (long)(which_clock), (long)(flags), (long)(rqtp), (long)(rmtp))
+#define __sanitizer_syscall_pre_nice(increment) \
+ __sanitizer_syscall_pre_impl_nice((long)(increment))
+#define __sanitizer_syscall_post_nice(res, increment) \
+ __sanitizer_syscall_post_impl_nice(res, (long)(increment))
+#define __sanitizer_syscall_pre_sched_setscheduler(pid, policy, param) \
+ __sanitizer_syscall_pre_impl_sched_setscheduler((long)(pid), (long)(policy), \
+ (long)(param))
+#define __sanitizer_syscall_post_sched_setscheduler(res, pid, policy, param) \
+ __sanitizer_syscall_post_impl_sched_setscheduler( \
+ res, (long)(pid), (long)(policy), (long)(param))
+#define __sanitizer_syscall_pre_sched_setparam(pid, param) \
+ __sanitizer_syscall_pre_impl_sched_setparam((long)(pid), (long)(param))
+#define __sanitizer_syscall_post_sched_setparam(res, pid, param) \
+ __sanitizer_syscall_post_impl_sched_setparam(res, (long)(pid), (long)(param))
+#define __sanitizer_syscall_pre_sched_getscheduler(pid) \
+ __sanitizer_syscall_pre_impl_sched_getscheduler((long)(pid))
+#define __sanitizer_syscall_post_sched_getscheduler(res, pid) \
+ __sanitizer_syscall_post_impl_sched_getscheduler(res, (long)(pid))
+#define __sanitizer_syscall_pre_sched_getparam(pid, param) \
+ __sanitizer_syscall_pre_impl_sched_getparam((long)(pid), (long)(param))
+#define __sanitizer_syscall_post_sched_getparam(res, pid, param) \
+ __sanitizer_syscall_post_impl_sched_getparam(res, (long)(pid), (long)(param))
+#define __sanitizer_syscall_pre_sched_setaffinity(pid, len, user_mask_ptr) \
+ __sanitizer_syscall_pre_impl_sched_setaffinity((long)(pid), (long)(len), \
+ (long)(user_mask_ptr))
+#define __sanitizer_syscall_post_sched_setaffinity(res, pid, len, \
+ user_mask_ptr) \
+ __sanitizer_syscall_post_impl_sched_setaffinity( \
+ res, (long)(pid), (long)(len), (long)(user_mask_ptr))
+#define __sanitizer_syscall_pre_sched_getaffinity(pid, len, user_mask_ptr) \
+ __sanitizer_syscall_pre_impl_sched_getaffinity((long)(pid), (long)(len), \
+ (long)(user_mask_ptr))
+#define __sanitizer_syscall_post_sched_getaffinity(res, pid, len, \
+ user_mask_ptr) \
+ __sanitizer_syscall_post_impl_sched_getaffinity( \
+ res, (long)(pid), (long)(len), (long)(user_mask_ptr))
+#define __sanitizer_syscall_pre_sched_yield() \
+ __sanitizer_syscall_pre_impl_sched_yield()
+#define __sanitizer_syscall_post_sched_yield(res) \
+ __sanitizer_syscall_post_impl_sched_yield(res)
+#define __sanitizer_syscall_pre_sched_get_priority_max(policy) \
+ __sanitizer_syscall_pre_impl_sched_get_priority_max((long)(policy))
+#define __sanitizer_syscall_post_sched_get_priority_max(res, policy) \
+ __sanitizer_syscall_post_impl_sched_get_priority_max(res, (long)(policy))
+#define __sanitizer_syscall_pre_sched_get_priority_min(policy) \
+ __sanitizer_syscall_pre_impl_sched_get_priority_min((long)(policy))
+#define __sanitizer_syscall_post_sched_get_priority_min(res, policy) \
+ __sanitizer_syscall_post_impl_sched_get_priority_min(res, (long)(policy))
+#define __sanitizer_syscall_pre_sched_rr_get_interval(pid, interval) \
+ __sanitizer_syscall_pre_impl_sched_rr_get_interval((long)(pid), \
+ (long)(interval))
+#define __sanitizer_syscall_post_sched_rr_get_interval(res, pid, interval) \
+ __sanitizer_syscall_post_impl_sched_rr_get_interval(res, (long)(pid), \
+ (long)(interval))
+#define __sanitizer_syscall_pre_setpriority(which, who, niceval) \
+ __sanitizer_syscall_pre_impl_setpriority((long)(which), (long)(who), \
+ (long)(niceval))
+#define __sanitizer_syscall_post_setpriority(res, which, who, niceval) \
+ __sanitizer_syscall_post_impl_setpriority(res, (long)(which), (long)(who), \
+ (long)(niceval))
+#define __sanitizer_syscall_pre_getpriority(which, who) \
+ __sanitizer_syscall_pre_impl_getpriority((long)(which), (long)(who))
+#define __sanitizer_syscall_post_getpriority(res, which, who) \
+ __sanitizer_syscall_post_impl_getpriority(res, (long)(which), (long)(who))
+#define __sanitizer_syscall_pre_shutdown(arg0, arg1) \
+ __sanitizer_syscall_pre_impl_shutdown((long)(arg0), (long)(arg1))
+#define __sanitizer_syscall_post_shutdown(res, arg0, arg1) \
+ __sanitizer_syscall_post_impl_shutdown(res, (long)(arg0), (long)(arg1))
+#define __sanitizer_syscall_pre_reboot(magic1, magic2, cmd, arg) \
+ __sanitizer_syscall_pre_impl_reboot((long)(magic1), (long)(magic2), \
+ (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_reboot(res, magic1, magic2, cmd, arg) \
+ __sanitizer_syscall_post_impl_reboot(res, (long)(magic1), (long)(magic2), \
+ (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_pre_restart_syscall() \
+ __sanitizer_syscall_pre_impl_restart_syscall()
+#define __sanitizer_syscall_post_restart_syscall(res) \
+ __sanitizer_syscall_post_impl_restart_syscall(res)
+#define __sanitizer_syscall_pre_kexec_load(entry, nr_segments, segments, \
+ flags) \
+ __sanitizer_syscall_pre_impl_kexec_load((long)(entry), (long)(nr_segments), \
+ (long)(segments), (long)(flags))
+#define __sanitizer_syscall_post_kexec_load(res, entry, nr_segments, segments, \
+ flags) \
+ __sanitizer_syscall_post_impl_kexec_load(res, (long)(entry), \
+ (long)(nr_segments), \
+ (long)(segments), (long)(flags))
+#define __sanitizer_syscall_pre_exit(error_code) \
+ __sanitizer_syscall_pre_impl_exit((long)(error_code))
+#define __sanitizer_syscall_post_exit(res, error_code) \
+ __sanitizer_syscall_post_impl_exit(res, (long)(error_code))
+#define __sanitizer_syscall_pre_exit_group(error_code) \
+ __sanitizer_syscall_pre_impl_exit_group((long)(error_code))
+#define __sanitizer_syscall_post_exit_group(res, error_code) \
+ __sanitizer_syscall_post_impl_exit_group(res, (long)(error_code))
+#define __sanitizer_syscall_pre_wait4(pid, stat_addr, options, ru) \
+ __sanitizer_syscall_pre_impl_wait4((long)(pid), (long)(stat_addr), \
+ (long)(options), (long)(ru))
+#define __sanitizer_syscall_post_wait4(res, pid, stat_addr, options, ru) \
+ __sanitizer_syscall_post_impl_wait4(res, (long)(pid), (long)(stat_addr), \
+ (long)(options), (long)(ru))
+#define __sanitizer_syscall_pre_waitid(which, pid, infop, options, ru) \
+ __sanitizer_syscall_pre_impl_waitid( \
+ (long)(which), (long)(pid), (long)(infop), (long)(options), (long)(ru))
+#define __sanitizer_syscall_post_waitid(res, which, pid, infop, options, ru) \
+ __sanitizer_syscall_post_impl_waitid(res, (long)(which), (long)(pid), \
+ (long)(infop), (long)(options), \
+ (long)(ru))
+#define __sanitizer_syscall_pre_waitpid(pid, stat_addr, options) \
+ __sanitizer_syscall_pre_impl_waitpid((long)(pid), (long)(stat_addr), \
+ (long)(options))
+#define __sanitizer_syscall_post_waitpid(res, pid, stat_addr, options) \
+ __sanitizer_syscall_post_impl_waitpid(res, (long)(pid), (long)(stat_addr), \
+ (long)(options))
+#define __sanitizer_syscall_pre_set_tid_address(tidptr) \
+ __sanitizer_syscall_pre_impl_set_tid_address((long)(tidptr))
+#define __sanitizer_syscall_post_set_tid_address(res, tidptr) \
+ __sanitizer_syscall_post_impl_set_tid_address(res, (long)(tidptr))
+#define __sanitizer_syscall_pre_init_module(umod, len, uargs) \
+ __sanitizer_syscall_pre_impl_init_module((long)(umod), (long)(len), \
+ (long)(uargs))
+#define __sanitizer_syscall_post_init_module(res, umod, len, uargs) \
+ __sanitizer_syscall_post_impl_init_module(res, (long)(umod), (long)(len), \
+ (long)(uargs))
+#define __sanitizer_syscall_pre_delete_module(name_user, flags) \
+ __sanitizer_syscall_pre_impl_delete_module((long)(name_user), (long)(flags))
+#define __sanitizer_syscall_post_delete_module(res, name_user, flags) \
+ __sanitizer_syscall_post_impl_delete_module(res, (long)(name_user), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_rt_sigprocmask(how, set, oset, sigsetsize) \
+ __sanitizer_syscall_pre_impl_rt_sigprocmask( \
+ (long)(how), (long)(set), (long)(oset), (long)(sigsetsize))
+#define __sanitizer_syscall_post_rt_sigprocmask(res, how, set, oset, \
+ sigsetsize) \
+ __sanitizer_syscall_post_impl_rt_sigprocmask( \
+ res, (long)(how), (long)(set), (long)(oset), (long)(sigsetsize))
+#define __sanitizer_syscall_pre_rt_sigpending(set, sigsetsize) \
+ __sanitizer_syscall_pre_impl_rt_sigpending((long)(set), (long)(sigsetsize))
+#define __sanitizer_syscall_post_rt_sigpending(res, set, sigsetsize) \
+ __sanitizer_syscall_post_impl_rt_sigpending(res, (long)(set), \
+ (long)(sigsetsize))
+#define __sanitizer_syscall_pre_rt_sigtimedwait(uthese, uinfo, uts, \
+ sigsetsize) \
+ __sanitizer_syscall_pre_impl_rt_sigtimedwait( \
+ (long)(uthese), (long)(uinfo), (long)(uts), (long)(sigsetsize))
+#define __sanitizer_syscall_post_rt_sigtimedwait(res, uthese, uinfo, uts, \
+ sigsetsize) \
+ __sanitizer_syscall_post_impl_rt_sigtimedwait( \
+ res, (long)(uthese), (long)(uinfo), (long)(uts), (long)(sigsetsize))
+#define __sanitizer_syscall_pre_rt_tgsigqueueinfo(tgid, pid, sig, uinfo) \
+ __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo((long)(tgid), (long)(pid), \
+ (long)(sig), (long)(uinfo))
+#define __sanitizer_syscall_post_rt_tgsigqueueinfo(res, tgid, pid, sig, uinfo) \
+ __sanitizer_syscall_post_impl_rt_tgsigqueueinfo( \
+ res, (long)(tgid), (long)(pid), (long)(sig), (long)(uinfo))
+#define __sanitizer_syscall_pre_kill(pid, sig) \
+ __sanitizer_syscall_pre_impl_kill((long)(pid), (long)(sig))
+#define __sanitizer_syscall_post_kill(res, pid, sig) \
+ __sanitizer_syscall_post_impl_kill(res, (long)(pid), (long)(sig))
+#define __sanitizer_syscall_pre_tgkill(tgid, pid, sig) \
+ __sanitizer_syscall_pre_impl_tgkill((long)(tgid), (long)(pid), (long)(sig))
+#define __sanitizer_syscall_post_tgkill(res, tgid, pid, sig) \
+ __sanitizer_syscall_post_impl_tgkill(res, (long)(tgid), (long)(pid), \
+ (long)(sig))
+#define __sanitizer_syscall_pre_tkill(pid, sig) \
+ __sanitizer_syscall_pre_impl_tkill((long)(pid), (long)(sig))
+#define __sanitizer_syscall_post_tkill(res, pid, sig) \
+ __sanitizer_syscall_post_impl_tkill(res, (long)(pid), (long)(sig))
+#define __sanitizer_syscall_pre_rt_sigqueueinfo(pid, sig, uinfo) \
+ __sanitizer_syscall_pre_impl_rt_sigqueueinfo((long)(pid), (long)(sig), \
+ (long)(uinfo))
+#define __sanitizer_syscall_post_rt_sigqueueinfo(res, pid, sig, uinfo) \
+ __sanitizer_syscall_post_impl_rt_sigqueueinfo(res, (long)(pid), (long)(sig), \
+ (long)(uinfo))
+#define __sanitizer_syscall_pre_sgetmask() \
+ __sanitizer_syscall_pre_impl_sgetmask()
+#define __sanitizer_syscall_post_sgetmask(res) \
+ __sanitizer_syscall_post_impl_sgetmask(res)
+#define __sanitizer_syscall_pre_ssetmask(newmask) \
+ __sanitizer_syscall_pre_impl_ssetmask((long)(newmask))
+#define __sanitizer_syscall_post_ssetmask(res, newmask) \
+ __sanitizer_syscall_post_impl_ssetmask(res, (long)(newmask))
+#define __sanitizer_syscall_pre_signal(sig, handler) \
+ __sanitizer_syscall_pre_impl_signal((long)(sig), (long)(handler))
+#define __sanitizer_syscall_post_signal(res, sig, handler) \
+ __sanitizer_syscall_post_impl_signal(res, (long)(sig), (long)(handler))
+#define __sanitizer_syscall_pre_pause() __sanitizer_syscall_pre_impl_pause()
+#define __sanitizer_syscall_post_pause(res) \
+ __sanitizer_syscall_post_impl_pause(res)
+#define __sanitizer_syscall_pre_sync() __sanitizer_syscall_pre_impl_sync()
+#define __sanitizer_syscall_post_sync(res) \
+ __sanitizer_syscall_post_impl_sync(res)
+#define __sanitizer_syscall_pre_fsync(fd) \
+ __sanitizer_syscall_pre_impl_fsync((long)(fd))
+#define __sanitizer_syscall_post_fsync(res, fd) \
+ __sanitizer_syscall_post_impl_fsync(res, (long)(fd))
+#define __sanitizer_syscall_pre_fdatasync(fd) \
+ __sanitizer_syscall_pre_impl_fdatasync((long)(fd))
+#define __sanitizer_syscall_post_fdatasync(res, fd) \
+ __sanitizer_syscall_post_impl_fdatasync(res, (long)(fd))
+#define __sanitizer_syscall_pre_bdflush(func, data) \
+ __sanitizer_syscall_pre_impl_bdflush((long)(func), (long)(data))
+#define __sanitizer_syscall_post_bdflush(res, func, data) \
+ __sanitizer_syscall_post_impl_bdflush(res, (long)(func), (long)(data))
+#define __sanitizer_syscall_pre_mount(dev_name, dir_name, type, flags, data) \
+ __sanitizer_syscall_pre_impl_mount((long)(dev_name), (long)(dir_name), \
+ (long)(type), (long)(flags), \
+ (long)(data))
+#define __sanitizer_syscall_post_mount(res, dev_name, dir_name, type, flags, \
+ data) \
+ __sanitizer_syscall_post_impl_mount(res, (long)(dev_name), (long)(dir_name), \
+ (long)(type), (long)(flags), \
+ (long)(data))
+#define __sanitizer_syscall_pre_umount(name, flags) \
+ __sanitizer_syscall_pre_impl_umount((long)(name), (long)(flags))
+#define __sanitizer_syscall_post_umount(res, name, flags) \
+ __sanitizer_syscall_post_impl_umount(res, (long)(name), (long)(flags))
+#define __sanitizer_syscall_pre_oldumount(name) \
+ __sanitizer_syscall_pre_impl_oldumount((long)(name))
+#define __sanitizer_syscall_post_oldumount(res, name) \
+ __sanitizer_syscall_post_impl_oldumount(res, (long)(name))
+#define __sanitizer_syscall_pre_truncate(path, length) \
+ __sanitizer_syscall_pre_impl_truncate((long)(path), (long)(length))
+#define __sanitizer_syscall_post_truncate(res, path, length) \
+ __sanitizer_syscall_post_impl_truncate(res, (long)(path), (long)(length))
+#define __sanitizer_syscall_pre_ftruncate(fd, length) \
+ __sanitizer_syscall_pre_impl_ftruncate((long)(fd), (long)(length))
+#define __sanitizer_syscall_post_ftruncate(res, fd, length) \
+ __sanitizer_syscall_post_impl_ftruncate(res, (long)(fd), (long)(length))
+#define __sanitizer_syscall_pre_stat(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_stat((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_stat(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_stat(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_statfs(path, buf) \
+ __sanitizer_syscall_pre_impl_statfs((long)(path), (long)(buf))
+#define __sanitizer_syscall_post_statfs(res, path, buf) \
+ __sanitizer_syscall_post_impl_statfs(res, (long)(path), (long)(buf))
+#define __sanitizer_syscall_pre_statfs64(path, sz, buf) \
+ __sanitizer_syscall_pre_impl_statfs64((long)(path), (long)(sz), (long)(buf))
+#define __sanitizer_syscall_post_statfs64(res, path, sz, buf) \
+ __sanitizer_syscall_post_impl_statfs64(res, (long)(path), (long)(sz), \
+ (long)(buf))
+#define __sanitizer_syscall_pre_fstatfs(fd, buf) \
+ __sanitizer_syscall_pre_impl_fstatfs((long)(fd), (long)(buf))
+#define __sanitizer_syscall_post_fstatfs(res, fd, buf) \
+ __sanitizer_syscall_post_impl_fstatfs(res, (long)(fd), (long)(buf))
+#define __sanitizer_syscall_pre_fstatfs64(fd, sz, buf) \
+ __sanitizer_syscall_pre_impl_fstatfs64((long)(fd), (long)(sz), (long)(buf))
+#define __sanitizer_syscall_post_fstatfs64(res, fd, sz, buf) \
+ __sanitizer_syscall_post_impl_fstatfs64(res, (long)(fd), (long)(sz), \
+ (long)(buf))
+#define __sanitizer_syscall_pre_lstat(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_lstat((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_lstat(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_lstat(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_fstat(fd, statbuf) \
+ __sanitizer_syscall_pre_impl_fstat((long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_post_fstat(res, fd, statbuf) \
+ __sanitizer_syscall_post_impl_fstat(res, (long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_pre_newstat(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_newstat((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_newstat(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_newstat(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_newlstat(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_newlstat((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_newlstat(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_newlstat(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_newfstat(fd, statbuf) \
+ __sanitizer_syscall_pre_impl_newfstat((long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_post_newfstat(res, fd, statbuf) \
+ __sanitizer_syscall_post_impl_newfstat(res, (long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_pre_ustat(dev, ubuf) \
+ __sanitizer_syscall_pre_impl_ustat((long)(dev), (long)(ubuf))
+#define __sanitizer_syscall_post_ustat(res, dev, ubuf) \
+ __sanitizer_syscall_post_impl_ustat(res, (long)(dev), (long)(ubuf))
+#define __sanitizer_syscall_pre_stat64(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_stat64((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_stat64(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_stat64(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_fstat64(fd, statbuf) \
+ __sanitizer_syscall_pre_impl_fstat64((long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_post_fstat64(res, fd, statbuf) \
+ __sanitizer_syscall_post_impl_fstat64(res, (long)(fd), (long)(statbuf))
+#define __sanitizer_syscall_pre_lstat64(filename, statbuf) \
+ __sanitizer_syscall_pre_impl_lstat64((long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_post_lstat64(res, filename, statbuf) \
+ __sanitizer_syscall_post_impl_lstat64(res, (long)(filename), (long)(statbuf))
+#define __sanitizer_syscall_pre_setxattr(path, name, value, size, flags) \
+ __sanitizer_syscall_pre_impl_setxattr( \
+ (long)(path), (long)(name), (long)(value), (long)(size), (long)(flags))
+#define __sanitizer_syscall_post_setxattr(res, path, name, value, size, flags) \
+ __sanitizer_syscall_post_impl_setxattr(res, (long)(path), (long)(name), \
+ (long)(value), (long)(size), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_lsetxattr(path, name, value, size, flags) \
+ __sanitizer_syscall_pre_impl_lsetxattr( \
+ (long)(path), (long)(name), (long)(value), (long)(size), (long)(flags))
+#define __sanitizer_syscall_post_lsetxattr(res, path, name, value, size, \
+ flags) \
+ __sanitizer_syscall_post_impl_lsetxattr(res, (long)(path), (long)(name), \
+ (long)(value), (long)(size), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_fsetxattr(fd, name, value, size, flags) \
+ __sanitizer_syscall_pre_impl_fsetxattr( \
+ (long)(fd), (long)(name), (long)(value), (long)(size), (long)(flags))
+#define __sanitizer_syscall_post_fsetxattr(res, fd, name, value, size, flags) \
+ __sanitizer_syscall_post_impl_fsetxattr(res, (long)(fd), (long)(name), \
+ (long)(value), (long)(size), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_getxattr(path, name, value, size) \
+ __sanitizer_syscall_pre_impl_getxattr((long)(path), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_post_getxattr(res, path, name, value, size) \
+ __sanitizer_syscall_post_impl_getxattr(res, (long)(path), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_pre_lgetxattr(path, name, value, size) \
+ __sanitizer_syscall_pre_impl_lgetxattr((long)(path), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_post_lgetxattr(res, path, name, value, size) \
+ __sanitizer_syscall_post_impl_lgetxattr(res, (long)(path), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_pre_fgetxattr(fd, name, value, size) \
+ __sanitizer_syscall_pre_impl_fgetxattr((long)(fd), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_post_fgetxattr(res, fd, name, value, size) \
+ __sanitizer_syscall_post_impl_fgetxattr(res, (long)(fd), (long)(name), \
+ (long)(value), (long)(size))
+#define __sanitizer_syscall_pre_listxattr(path, list, size) \
+ __sanitizer_syscall_pre_impl_listxattr((long)(path), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_post_listxattr(res, path, list, size) \
+ __sanitizer_syscall_post_impl_listxattr(res, (long)(path), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_pre_llistxattr(path, list, size) \
+ __sanitizer_syscall_pre_impl_llistxattr((long)(path), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_post_llistxattr(res, path, list, size) \
+ __sanitizer_syscall_post_impl_llistxattr(res, (long)(path), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_pre_flistxattr(fd, list, size) \
+ __sanitizer_syscall_pre_impl_flistxattr((long)(fd), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_post_flistxattr(res, fd, list, size) \
+ __sanitizer_syscall_post_impl_flistxattr(res, (long)(fd), (long)(list), \
+ (long)(size))
+#define __sanitizer_syscall_pre_removexattr(path, name) \
+ __sanitizer_syscall_pre_impl_removexattr((long)(path), (long)(name))
+#define __sanitizer_syscall_post_removexattr(res, path, name) \
+ __sanitizer_syscall_post_impl_removexattr(res, (long)(path), (long)(name))
+#define __sanitizer_syscall_pre_lremovexattr(path, name) \
+ __sanitizer_syscall_pre_impl_lremovexattr((long)(path), (long)(name))
+#define __sanitizer_syscall_post_lremovexattr(res, path, name) \
+ __sanitizer_syscall_post_impl_lremovexattr(res, (long)(path), (long)(name))
+#define __sanitizer_syscall_pre_fremovexattr(fd, name) \
+ __sanitizer_syscall_pre_impl_fremovexattr((long)(fd), (long)(name))
+#define __sanitizer_syscall_post_fremovexattr(res, fd, name) \
+ __sanitizer_syscall_post_impl_fremovexattr(res, (long)(fd), (long)(name))
+#define __sanitizer_syscall_pre_brk(brk) \
+ __sanitizer_syscall_pre_impl_brk((long)(brk))
+#define __sanitizer_syscall_post_brk(res, brk) \
+ __sanitizer_syscall_post_impl_brk(res, (long)(brk))
+#define __sanitizer_syscall_pre_mprotect(start, len, prot) \
+ __sanitizer_syscall_pre_impl_mprotect((long)(start), (long)(len), \
+ (long)(prot))
+#define __sanitizer_syscall_post_mprotect(res, start, len, prot) \
+ __sanitizer_syscall_post_impl_mprotect(res, (long)(start), (long)(len), \
+ (long)(prot))
+#define __sanitizer_syscall_pre_mremap(addr, old_len, new_len, flags, \
+ new_addr) \
+ __sanitizer_syscall_pre_impl_mremap((long)(addr), (long)(old_len), \
+ (long)(new_len), (long)(flags), \
+ (long)(new_addr))
+#define __sanitizer_syscall_post_mremap(res, addr, old_len, new_len, flags, \
+ new_addr) \
+ __sanitizer_syscall_post_impl_mremap(res, (long)(addr), (long)(old_len), \
+ (long)(new_len), (long)(flags), \
+ (long)(new_addr))
+#define __sanitizer_syscall_pre_remap_file_pages(start, size, prot, pgoff, \
+ flags) \
+ __sanitizer_syscall_pre_impl_remap_file_pages( \
+ (long)(start), (long)(size), (long)(prot), (long)(pgoff), (long)(flags))
+#define __sanitizer_syscall_post_remap_file_pages(res, start, size, prot, \
+ pgoff, flags) \
+ __sanitizer_syscall_post_impl_remap_file_pages(res, (long)(start), \
+ (long)(size), (long)(prot), \
+ (long)(pgoff), (long)(flags))
+#define __sanitizer_syscall_pre_msync(start, len, flags) \
+ __sanitizer_syscall_pre_impl_msync((long)(start), (long)(len), (long)(flags))
+#define __sanitizer_syscall_post_msync(res, start, len, flags) \
+ __sanitizer_syscall_post_impl_msync(res, (long)(start), (long)(len), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_munmap(addr, len) \
+ __sanitizer_syscall_pre_impl_munmap((long)(addr), (long)(len))
+#define __sanitizer_syscall_post_munmap(res, addr, len) \
+ __sanitizer_syscall_post_impl_munmap(res, (long)(addr), (long)(len))
+#define __sanitizer_syscall_pre_mlock(start, len) \
+ __sanitizer_syscall_pre_impl_mlock((long)(start), (long)(len))
+#define __sanitizer_syscall_post_mlock(res, start, len) \
+ __sanitizer_syscall_post_impl_mlock(res, (long)(start), (long)(len))
+#define __sanitizer_syscall_pre_munlock(start, len) \
+ __sanitizer_syscall_pre_impl_munlock((long)(start), (long)(len))
+#define __sanitizer_syscall_post_munlock(res, start, len) \
+ __sanitizer_syscall_post_impl_munlock(res, (long)(start), (long)(len))
+#define __sanitizer_syscall_pre_mlockall(flags) \
+ __sanitizer_syscall_pre_impl_mlockall((long)(flags))
+#define __sanitizer_syscall_post_mlockall(res, flags) \
+ __sanitizer_syscall_post_impl_mlockall(res, (long)(flags))
+#define __sanitizer_syscall_pre_munlockall() \
+ __sanitizer_syscall_pre_impl_munlockall()
+#define __sanitizer_syscall_post_munlockall(res) \
+ __sanitizer_syscall_post_impl_munlockall(res)
+#define __sanitizer_syscall_pre_madvise(start, len, behavior) \
+ __sanitizer_syscall_pre_impl_madvise((long)(start), (long)(len), \
+ (long)(behavior))
+#define __sanitizer_syscall_post_madvise(res, start, len, behavior) \
+ __sanitizer_syscall_post_impl_madvise(res, (long)(start), (long)(len), \
+ (long)(behavior))
+#define __sanitizer_syscall_pre_mincore(start, len, vec) \
+ __sanitizer_syscall_pre_impl_mincore((long)(start), (long)(len), (long)(vec))
+#define __sanitizer_syscall_post_mincore(res, start, len, vec) \
+ __sanitizer_syscall_post_impl_mincore(res, (long)(start), (long)(len), \
+ (long)(vec))
+#define __sanitizer_syscall_pre_pivot_root(new_root, put_old) \
+ __sanitizer_syscall_pre_impl_pivot_root((long)(new_root), (long)(put_old))
+#define __sanitizer_syscall_post_pivot_root(res, new_root, put_old) \
+ __sanitizer_syscall_post_impl_pivot_root(res, (long)(new_root), \
+ (long)(put_old))
+#define __sanitizer_syscall_pre_chroot(filename) \
+ __sanitizer_syscall_pre_impl_chroot((long)(filename))
+#define __sanitizer_syscall_post_chroot(res, filename) \
+ __sanitizer_syscall_post_impl_chroot(res, (long)(filename))
+#define __sanitizer_syscall_pre_mknod(filename, mode, dev) \
+ __sanitizer_syscall_pre_impl_mknod((long)(filename), (long)(mode), \
+ (long)(dev))
+#define __sanitizer_syscall_post_mknod(res, filename, mode, dev) \
+ __sanitizer_syscall_post_impl_mknod(res, (long)(filename), (long)(mode), \
+ (long)(dev))
+#define __sanitizer_syscall_pre_link(oldname, newname) \
+ __sanitizer_syscall_pre_impl_link((long)(oldname), (long)(newname))
+#define __sanitizer_syscall_post_link(res, oldname, newname) \
+ __sanitizer_syscall_post_impl_link(res, (long)(oldname), (long)(newname))
+#define __sanitizer_syscall_pre_symlink(old, new_) \
+ __sanitizer_syscall_pre_impl_symlink((long)(old), (long)(new_))
+#define __sanitizer_syscall_post_symlink(res, old, new_) \
+ __sanitizer_syscall_post_impl_symlink(res, (long)(old), (long)(new_))
+#define __sanitizer_syscall_pre_unlink(pathname) \
+ __sanitizer_syscall_pre_impl_unlink((long)(pathname))
+#define __sanitizer_syscall_post_unlink(res, pathname) \
+ __sanitizer_syscall_post_impl_unlink(res, (long)(pathname))
+#define __sanitizer_syscall_pre_rename(oldname, newname) \
+ __sanitizer_syscall_pre_impl_rename((long)(oldname), (long)(newname))
+#define __sanitizer_syscall_post_rename(res, oldname, newname) \
+ __sanitizer_syscall_post_impl_rename(res, (long)(oldname), (long)(newname))
+#define __sanitizer_syscall_pre_chmod(filename, mode) \
+ __sanitizer_syscall_pre_impl_chmod((long)(filename), (long)(mode))
+#define __sanitizer_syscall_post_chmod(res, filename, mode) \
+ __sanitizer_syscall_post_impl_chmod(res, (long)(filename), (long)(mode))
+#define __sanitizer_syscall_pre_fchmod(fd, mode) \
+ __sanitizer_syscall_pre_impl_fchmod((long)(fd), (long)(mode))
+#define __sanitizer_syscall_post_fchmod(res, fd, mode) \
+ __sanitizer_syscall_post_impl_fchmod(res, (long)(fd), (long)(mode))
+#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg) \
+ __sanitizer_syscall_pre_impl_fcntl((long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_fcntl(res, fd, cmd, arg) \
+ __sanitizer_syscall_post_impl_fcntl(res, (long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_pre_fcntl64(fd, cmd, arg) \
+ __sanitizer_syscall_pre_impl_fcntl64((long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_fcntl64(res, fd, cmd, arg) \
+ __sanitizer_syscall_post_impl_fcntl64(res, (long)(fd), (long)(cmd), \
+ (long)(arg))
+#define __sanitizer_syscall_pre_pipe(fildes) \
+ __sanitizer_syscall_pre_impl_pipe((long)(fildes))
+#define __sanitizer_syscall_post_pipe(res, fildes) \
+ __sanitizer_syscall_post_impl_pipe(res, (long)(fildes))
+#define __sanitizer_syscall_pre_pipe2(fildes, flags) \
+ __sanitizer_syscall_pre_impl_pipe2((long)(fildes), (long)(flags))
+#define __sanitizer_syscall_post_pipe2(res, fildes, flags) \
+ __sanitizer_syscall_post_impl_pipe2(res, (long)(fildes), (long)(flags))
+#define __sanitizer_syscall_pre_dup(fildes) \
+ __sanitizer_syscall_pre_impl_dup((long)(fildes))
+#define __sanitizer_syscall_post_dup(res, fildes) \
+ __sanitizer_syscall_post_impl_dup(res, (long)(fildes))
+#define __sanitizer_syscall_pre_dup2(oldfd, newfd) \
+ __sanitizer_syscall_pre_impl_dup2((long)(oldfd), (long)(newfd))
+#define __sanitizer_syscall_post_dup2(res, oldfd, newfd) \
+ __sanitizer_syscall_post_impl_dup2(res, (long)(oldfd), (long)(newfd))
+#define __sanitizer_syscall_pre_dup3(oldfd, newfd, flags) \
+ __sanitizer_syscall_pre_impl_dup3((long)(oldfd), (long)(newfd), (long)(flags))
+#define __sanitizer_syscall_post_dup3(res, oldfd, newfd, flags) \
+ __sanitizer_syscall_post_impl_dup3(res, (long)(oldfd), (long)(newfd), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_ioperm(from, num, on) \
+ __sanitizer_syscall_pre_impl_ioperm((long)(from), (long)(num), (long)(on))
+#define __sanitizer_syscall_post_ioperm(res, from, num, on) \
+ __sanitizer_syscall_post_impl_ioperm(res, (long)(from), (long)(num), \
+ (long)(on))
+#define __sanitizer_syscall_pre_ioctl(fd, cmd, arg) \
+ __sanitizer_syscall_pre_impl_ioctl((long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_ioctl(res, fd, cmd, arg) \
+ __sanitizer_syscall_post_impl_ioctl(res, (long)(fd), (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_pre_flock(fd, cmd) \
+ __sanitizer_syscall_pre_impl_flock((long)(fd), (long)(cmd))
+#define __sanitizer_syscall_post_flock(res, fd, cmd) \
+ __sanitizer_syscall_post_impl_flock(res, (long)(fd), (long)(cmd))
+#define __sanitizer_syscall_pre_io_setup(nr_reqs, ctx) \
+ __sanitizer_syscall_pre_impl_io_setup((long)(nr_reqs), (long)(ctx))
+#define __sanitizer_syscall_post_io_setup(res, nr_reqs, ctx) \
+ __sanitizer_syscall_post_impl_io_setup(res, (long)(nr_reqs), (long)(ctx))
+#define __sanitizer_syscall_pre_io_destroy(ctx) \
+ __sanitizer_syscall_pre_impl_io_destroy((long)(ctx))
+#define __sanitizer_syscall_post_io_destroy(res, ctx) \
+ __sanitizer_syscall_post_impl_io_destroy(res, (long)(ctx))
+#define __sanitizer_syscall_pre_io_getevents(ctx_id, min_nr, nr, events, \
+ timeout) \
+ __sanitizer_syscall_pre_impl_io_getevents((long)(ctx_id), (long)(min_nr), \
+ (long)(nr), (long)(events), \
+ (long)(timeout))
+#define __sanitizer_syscall_post_io_getevents(res, ctx_id, min_nr, nr, events, \
+ timeout) \
+ __sanitizer_syscall_post_impl_io_getevents(res, (long)(ctx_id), \
+ (long)(min_nr), (long)(nr), \
+ (long)(events), (long)(timeout))
+#define __sanitizer_syscall_pre_io_submit(ctx_id, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_io_submit((long)(ctx_id), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_post_io_submit(res, ctx_id, arg1, arg2) \
+ __sanitizer_syscall_post_impl_io_submit(res, (long)(ctx_id), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_io_cancel(ctx_id, iocb, result) \
+ __sanitizer_syscall_pre_impl_io_cancel((long)(ctx_id), (long)(iocb), \
+ (long)(result))
+#define __sanitizer_syscall_post_io_cancel(res, ctx_id, iocb, result) \
+ __sanitizer_syscall_post_impl_io_cancel(res, (long)(ctx_id), (long)(iocb), \
+ (long)(result))
+#define __sanitizer_syscall_pre_sendfile(out_fd, in_fd, offset, count) \
+ __sanitizer_syscall_pre_impl_sendfile((long)(out_fd), (long)(in_fd), \
+ (long)(offset), (long)(count))
+#define __sanitizer_syscall_post_sendfile(res, out_fd, in_fd, offset, count) \
+ __sanitizer_syscall_post_impl_sendfile(res, (long)(out_fd), (long)(in_fd), \
+ (long)(offset), (long)(count))
+#define __sanitizer_syscall_pre_sendfile64(out_fd, in_fd, offset, count) \
+ __sanitizer_syscall_pre_impl_sendfile64((long)(out_fd), (long)(in_fd), \
+ (long)(offset), (long)(count))
+#define __sanitizer_syscall_post_sendfile64(res, out_fd, in_fd, offset, count) \
+ __sanitizer_syscall_post_impl_sendfile64(res, (long)(out_fd), (long)(in_fd), \
+ (long)(offset), (long)(count))
+#define __sanitizer_syscall_pre_readlink(path, buf, bufsiz) \
+ __sanitizer_syscall_pre_impl_readlink((long)(path), (long)(buf), \
+ (long)(bufsiz))
+#define __sanitizer_syscall_post_readlink(res, path, buf, bufsiz) \
+ __sanitizer_syscall_post_impl_readlink(res, (long)(path), (long)(buf), \
+ (long)(bufsiz))
+#define __sanitizer_syscall_pre_creat(pathname, mode) \
+ __sanitizer_syscall_pre_impl_creat((long)(pathname), (long)(mode))
+#define __sanitizer_syscall_post_creat(res, pathname, mode) \
+ __sanitizer_syscall_post_impl_creat(res, (long)(pathname), (long)(mode))
+#define __sanitizer_syscall_pre_open(filename, flags, mode) \
+ __sanitizer_syscall_pre_impl_open((long)(filename), (long)(flags), \
+ (long)(mode))
+#define __sanitizer_syscall_post_open(res, filename, flags, mode) \
+ __sanitizer_syscall_post_impl_open(res, (long)(filename), (long)(flags), \
+ (long)(mode))
+#define __sanitizer_syscall_pre_close(fd) \
+ __sanitizer_syscall_pre_impl_close((long)(fd))
+#define __sanitizer_syscall_post_close(res, fd) \
+ __sanitizer_syscall_post_impl_close(res, (long)(fd))
+#define __sanitizer_syscall_pre_access(filename, mode) \
+ __sanitizer_syscall_pre_impl_access((long)(filename), (long)(mode))
+#define __sanitizer_syscall_post_access(res, filename, mode) \
+ __sanitizer_syscall_post_impl_access(res, (long)(filename), (long)(mode))
+#define __sanitizer_syscall_pre_vhangup() __sanitizer_syscall_pre_impl_vhangup()
+#define __sanitizer_syscall_post_vhangup(res) \
+ __sanitizer_syscall_post_impl_vhangup(res)
+#define __sanitizer_syscall_pre_chown(filename, user, group) \
+ __sanitizer_syscall_pre_impl_chown((long)(filename), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_post_chown(res, filename, user, group) \
+ __sanitizer_syscall_post_impl_chown(res, (long)(filename), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_pre_lchown(filename, user, group) \
+ __sanitizer_syscall_pre_impl_lchown((long)(filename), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_post_lchown(res, filename, user, group) \
+ __sanitizer_syscall_post_impl_lchown(res, (long)(filename), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_pre_fchown(fd, user, group) \
+ __sanitizer_syscall_pre_impl_fchown((long)(fd), (long)(user), (long)(group))
+#define __sanitizer_syscall_post_fchown(res, fd, user, group) \
+ __sanitizer_syscall_post_impl_fchown(res, (long)(fd), (long)(user), \
+ (long)(group))
+#define __sanitizer_syscall_pre_chown16(filename, user, group) \
+ __sanitizer_syscall_pre_impl_chown16((long)(filename), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_post_chown16(res, filename, user, group) \
+ __sanitizer_syscall_post_impl_chown16(res, (long)(filename), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_pre_lchown16(filename, user, group) \
+ __sanitizer_syscall_pre_impl_lchown16((long)(filename), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_post_lchown16(res, filename, user, group) \
+ __sanitizer_syscall_post_impl_lchown16(res, (long)(filename), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_pre_fchown16(fd, user, group) \
+ __sanitizer_syscall_pre_impl_fchown16((long)(fd), (long)user, (long)group)
+#define __sanitizer_syscall_post_fchown16(res, fd, user, group) \
+ __sanitizer_syscall_post_impl_fchown16(res, (long)(fd), (long)user, \
+ (long)group)
+#define __sanitizer_syscall_pre_setregid16(rgid, egid) \
+ __sanitizer_syscall_pre_impl_setregid16((long)rgid, (long)egid)
+#define __sanitizer_syscall_post_setregid16(res, rgid, egid) \
+ __sanitizer_syscall_post_impl_setregid16(res, (long)rgid, (long)egid)
+#define __sanitizer_syscall_pre_setgid16(gid) \
+ __sanitizer_syscall_pre_impl_setgid16((long)gid)
+#define __sanitizer_syscall_post_setgid16(res, gid) \
+ __sanitizer_syscall_post_impl_setgid16(res, (long)gid)
+#define __sanitizer_syscall_pre_setreuid16(ruid, euid) \
+ __sanitizer_syscall_pre_impl_setreuid16((long)ruid, (long)euid)
+#define __sanitizer_syscall_post_setreuid16(res, ruid, euid) \
+ __sanitizer_syscall_post_impl_setreuid16(res, (long)ruid, (long)euid)
+#define __sanitizer_syscall_pre_setuid16(uid) \
+ __sanitizer_syscall_pre_impl_setuid16((long)uid)
+#define __sanitizer_syscall_post_setuid16(res, uid) \
+ __sanitizer_syscall_post_impl_setuid16(res, (long)uid)
+#define __sanitizer_syscall_pre_setresuid16(ruid, euid, suid) \
+ __sanitizer_syscall_pre_impl_setresuid16((long)ruid, (long)euid, (long)suid)
+#define __sanitizer_syscall_post_setresuid16(res, ruid, euid, suid) \
+ __sanitizer_syscall_post_impl_setresuid16(res, (long)ruid, (long)euid, \
+ (long)suid)
+#define __sanitizer_syscall_pre_getresuid16(ruid, euid, suid) \
+ __sanitizer_syscall_pre_impl_getresuid16((long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_post_getresuid16(res, ruid, euid, suid) \
+ __sanitizer_syscall_post_impl_getresuid16(res, (long)(ruid), (long)(euid), \
+ (long)(suid))
+#define __sanitizer_syscall_pre_setresgid16(rgid, egid, sgid) \
+ __sanitizer_syscall_pre_impl_setresgid16((long)rgid, (long)egid, (long)sgid)
+#define __sanitizer_syscall_post_setresgid16(res, rgid, egid, sgid) \
+ __sanitizer_syscall_post_impl_setresgid16(res, (long)rgid, (long)egid, \
+ (long)sgid)
+#define __sanitizer_syscall_pre_getresgid16(rgid, egid, sgid) \
+ __sanitizer_syscall_pre_impl_getresgid16((long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_post_getresgid16(res, rgid, egid, sgid) \
+ __sanitizer_syscall_post_impl_getresgid16(res, (long)(rgid), (long)(egid), \
+ (long)(sgid))
+#define __sanitizer_syscall_pre_setfsuid16(uid) \
+ __sanitizer_syscall_pre_impl_setfsuid16((long)uid)
+#define __sanitizer_syscall_post_setfsuid16(res, uid) \
+ __sanitizer_syscall_post_impl_setfsuid16(res, (long)uid)
+#define __sanitizer_syscall_pre_setfsgid16(gid) \
+ __sanitizer_syscall_pre_impl_setfsgid16((long)gid)
+#define __sanitizer_syscall_post_setfsgid16(res, gid) \
+ __sanitizer_syscall_post_impl_setfsgid16(res, (long)gid)
+#define __sanitizer_syscall_pre_getgroups16(gidsetsize, grouplist) \
+ __sanitizer_syscall_pre_impl_getgroups16((long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_post_getgroups16(res, gidsetsize, grouplist) \
+ __sanitizer_syscall_post_impl_getgroups16(res, (long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_pre_setgroups16(gidsetsize, grouplist) \
+ __sanitizer_syscall_pre_impl_setgroups16((long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_post_setgroups16(res, gidsetsize, grouplist) \
+ __sanitizer_syscall_post_impl_setgroups16(res, (long)(gidsetsize), \
+ (long)(grouplist))
+#define __sanitizer_syscall_pre_getuid16() \
+ __sanitizer_syscall_pre_impl_getuid16()
+#define __sanitizer_syscall_post_getuid16(res) \
+ __sanitizer_syscall_post_impl_getuid16(res)
+#define __sanitizer_syscall_pre_geteuid16() \
+ __sanitizer_syscall_pre_impl_geteuid16()
+#define __sanitizer_syscall_post_geteuid16(res) \
+ __sanitizer_syscall_post_impl_geteuid16(res)
+#define __sanitizer_syscall_pre_getgid16() \
+ __sanitizer_syscall_pre_impl_getgid16()
+#define __sanitizer_syscall_post_getgid16(res) \
+ __sanitizer_syscall_post_impl_getgid16(res)
+#define __sanitizer_syscall_pre_getegid16() \
+ __sanitizer_syscall_pre_impl_getegid16()
+#define __sanitizer_syscall_post_getegid16(res) \
+ __sanitizer_syscall_post_impl_getegid16(res)
+#define __sanitizer_syscall_pre_utime(filename, times) \
+ __sanitizer_syscall_pre_impl_utime((long)(filename), (long)(times))
+#define __sanitizer_syscall_post_utime(res, filename, times) \
+ __sanitizer_syscall_post_impl_utime(res, (long)(filename), (long)(times))
+#define __sanitizer_syscall_pre_utimes(filename, utimes) \
+ __sanitizer_syscall_pre_impl_utimes((long)(filename), (long)(utimes))
+#define __sanitizer_syscall_post_utimes(res, filename, utimes) \
+ __sanitizer_syscall_post_impl_utimes(res, (long)(filename), (long)(utimes))
+#define __sanitizer_syscall_pre_lseek(fd, offset, origin) \
+ __sanitizer_syscall_pre_impl_lseek((long)(fd), (long)(offset), (long)(origin))
+#define __sanitizer_syscall_post_lseek(res, fd, offset, origin) \
+ __sanitizer_syscall_post_impl_lseek(res, (long)(fd), (long)(offset), \
+ (long)(origin))
+#define __sanitizer_syscall_pre_llseek(fd, offset_high, offset_low, result, \
+ origin) \
+ __sanitizer_syscall_pre_impl_llseek((long)(fd), (long)(offset_high), \
+ (long)(offset_low), (long)(result), \
+ (long)(origin))
+#define __sanitizer_syscall_post_llseek(res, fd, offset_high, offset_low, \
+ result, origin) \
+ __sanitizer_syscall_post_impl_llseek(res, (long)(fd), (long)(offset_high), \
+ (long)(offset_low), (long)(result), \
+ (long)(origin))
+#define __sanitizer_syscall_pre_read(fd, buf, count) \
+ __sanitizer_syscall_pre_impl_read((long)(fd), (long)(buf), (long)(count))
+#define __sanitizer_syscall_post_read(res, fd, buf, count) \
+ __sanitizer_syscall_post_impl_read(res, (long)(fd), (long)(buf), \
+ (long)(count))
+#define __sanitizer_syscall_pre_readv(fd, vec, vlen) \
+ __sanitizer_syscall_pre_impl_readv((long)(fd), (long)(vec), (long)(vlen))
+#define __sanitizer_syscall_post_readv(res, fd, vec, vlen) \
+ __sanitizer_syscall_post_impl_readv(res, (long)(fd), (long)(vec), \
+ (long)(vlen))
+#define __sanitizer_syscall_pre_write(fd, buf, count) \
+ __sanitizer_syscall_pre_impl_write((long)(fd), (long)(buf), (long)(count))
+#define __sanitizer_syscall_post_write(res, fd, buf, count) \
+ __sanitizer_syscall_post_impl_write(res, (long)(fd), (long)(buf), \
+ (long)(count))
+#define __sanitizer_syscall_pre_writev(fd, vec, vlen) \
+ __sanitizer_syscall_pre_impl_writev((long)(fd), (long)(vec), (long)(vlen))
+#define __sanitizer_syscall_post_writev(res, fd, vec, vlen) \
+ __sanitizer_syscall_post_impl_writev(res, (long)(fd), (long)(vec), \
+ (long)(vlen))
+
+#ifdef _LP64
+#define __sanitizer_syscall_pre_pread64(fd, buf, count, pos) \
+ __sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \
+ (long)(pos))
+#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos) \
+ __sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf), \
+ (long)(count), (long)(pos))
+#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos) \
+ __sanitizer_syscall_pre_impl_pwrite64((long)(fd), (long)(buf), \
+ (long)(count), (long)(pos))
+#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos) \
+ __sanitizer_syscall_post_impl_pwrite64(res, (long)(fd), (long)(buf), \
+ (long)(count), (long)(pos))
+#else
+#define __sanitizer_syscall_pre_pread64(fd, buf, count, pos0, pos1) \
+ __sanitizer_syscall_pre_impl_pread64((long)(fd), (long)(buf), (long)(count), \
+ (long)(pos0), (long)(pos1))
+#define __sanitizer_syscall_post_pread64(res, fd, buf, count, pos0, pos1) \
+ __sanitizer_syscall_post_impl_pread64(res, (long)(fd), (long)(buf), \
+ (long)(count), (long)(pos0), \
+ (long)(pos1))
+#define __sanitizer_syscall_pre_pwrite64(fd, buf, count, pos0, pos1) \
+ __sanitizer_syscall_pre_impl_pwrite64( \
+ (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1))
+#define __sanitizer_syscall_post_pwrite64(res, fd, buf, count, pos0, pos1) \
+ __sanitizer_syscall_post_impl_pwrite64( \
+ res, (long)(fd), (long)(buf), (long)(count), (long)(pos0), (long)(pos1))
+#endif
+
+#define __sanitizer_syscall_pre_preadv(fd, vec, vlen, pos_l, pos_h) \
+ __sanitizer_syscall_pre_impl_preadv((long)(fd), (long)(vec), (long)(vlen), \
+ (long)(pos_l), (long)(pos_h))
+#define __sanitizer_syscall_post_preadv(res, fd, vec, vlen, pos_l, pos_h) \
+ __sanitizer_syscall_post_impl_preadv(res, (long)(fd), (long)(vec), \
+ (long)(vlen), (long)(pos_l), \
+ (long)(pos_h))
+#define __sanitizer_syscall_pre_pwritev(fd, vec, vlen, pos_l, pos_h) \
+ __sanitizer_syscall_pre_impl_pwritev((long)(fd), (long)(vec), (long)(vlen), \
+ (long)(pos_l), (long)(pos_h))
+#define __sanitizer_syscall_post_pwritev(res, fd, vec, vlen, pos_l, pos_h) \
+ __sanitizer_syscall_post_impl_pwritev(res, (long)(fd), (long)(vec), \
+ (long)(vlen), (long)(pos_l), \
+ (long)(pos_h))
+#define __sanitizer_syscall_pre_getcwd(buf, size) \
+ __sanitizer_syscall_pre_impl_getcwd((long)(buf), (long)(size))
+#define __sanitizer_syscall_post_getcwd(res, buf, size) \
+ __sanitizer_syscall_post_impl_getcwd(res, (long)(buf), (long)(size))
+#define __sanitizer_syscall_pre_mkdir(pathname, mode) \
+ __sanitizer_syscall_pre_impl_mkdir((long)(pathname), (long)(mode))
+#define __sanitizer_syscall_post_mkdir(res, pathname, mode) \
+ __sanitizer_syscall_post_impl_mkdir(res, (long)(pathname), (long)(mode))
+#define __sanitizer_syscall_pre_chdir(filename) \
+ __sanitizer_syscall_pre_impl_chdir((long)(filename))
+#define __sanitizer_syscall_post_chdir(res, filename) \
+ __sanitizer_syscall_post_impl_chdir(res, (long)(filename))
+#define __sanitizer_syscall_pre_fchdir(fd) \
+ __sanitizer_syscall_pre_impl_fchdir((long)(fd))
+#define __sanitizer_syscall_post_fchdir(res, fd) \
+ __sanitizer_syscall_post_impl_fchdir(res, (long)(fd))
+#define __sanitizer_syscall_pre_rmdir(pathname) \
+ __sanitizer_syscall_pre_impl_rmdir((long)(pathname))
+#define __sanitizer_syscall_post_rmdir(res, pathname) \
+ __sanitizer_syscall_post_impl_rmdir(res, (long)(pathname))
+#define __sanitizer_syscall_pre_lookup_dcookie(cookie64, buf, len) \
+ __sanitizer_syscall_pre_impl_lookup_dcookie((long)(cookie64), (long)(buf), \
+ (long)(len))
+#define __sanitizer_syscall_post_lookup_dcookie(res, cookie64, buf, len) \
+ __sanitizer_syscall_post_impl_lookup_dcookie(res, (long)(cookie64), \
+ (long)(buf), (long)(len))
+#define __sanitizer_syscall_pre_quotactl(cmd, special, id, addr) \
+ __sanitizer_syscall_pre_impl_quotactl((long)(cmd), (long)(special), \
+ (long)(id), (long)(addr))
+#define __sanitizer_syscall_post_quotactl(res, cmd, special, id, addr) \
+ __sanitizer_syscall_post_impl_quotactl(res, (long)(cmd), (long)(special), \
+ (long)(id), (long)(addr))
+#define __sanitizer_syscall_pre_getdents(fd, dirent, count) \
+ __sanitizer_syscall_pre_impl_getdents((long)(fd), (long)(dirent), \
+ (long)(count))
+#define __sanitizer_syscall_post_getdents(res, fd, dirent, count) \
+ __sanitizer_syscall_post_impl_getdents(res, (long)(fd), (long)(dirent), \
+ (long)(count))
+#define __sanitizer_syscall_pre_getdents64(fd, dirent, count) \
+ __sanitizer_syscall_pre_impl_getdents64((long)(fd), (long)(dirent), \
+ (long)(count))
+#define __sanitizer_syscall_post_getdents64(res, fd, dirent, count) \
+ __sanitizer_syscall_post_impl_getdents64(res, (long)(fd), (long)(dirent), \
+ (long)(count))
+#define __sanitizer_syscall_pre_setsockopt(fd, level, optname, optval, optlen) \
+ __sanitizer_syscall_pre_impl_setsockopt((long)(fd), (long)(level), \
+ (long)(optname), (long)(optval), \
+ (long)(optlen))
+#define __sanitizer_syscall_post_setsockopt(res, fd, level, optname, optval, \
+ optlen) \
+ __sanitizer_syscall_post_impl_setsockopt(res, (long)(fd), (long)(level), \
+ (long)(optname), (long)(optval), \
+ (long)(optlen))
+#define __sanitizer_syscall_pre_getsockopt(fd, level, optname, optval, optlen) \
+ __sanitizer_syscall_pre_impl_getsockopt((long)(fd), (long)(level), \
+ (long)(optname), (long)(optval), \
+ (long)(optlen))
+#define __sanitizer_syscall_post_getsockopt(res, fd, level, optname, optval, \
+ optlen) \
+ __sanitizer_syscall_post_impl_getsockopt(res, (long)(fd), (long)(level), \
+ (long)(optname), (long)(optval), \
+ (long)(optlen))
+#define __sanitizer_syscall_pre_bind(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_bind((long)(arg0), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_bind(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_bind(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_connect(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_connect((long)(arg0), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_connect(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_connect(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_accept(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_accept((long)(arg0), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_accept(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_accept(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_accept4(arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_pre_impl_accept4((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_post_accept4(res, arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_post_impl_accept4(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_pre_getsockname(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_getsockname((long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_post_getsockname(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_getsockname(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_getpeername(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_getpeername((long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_post_getpeername(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_getpeername(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_send(arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_pre_impl_send((long)(arg0), (long)(arg1), (long)(arg2), \
+ (long)(arg3))
+#define __sanitizer_syscall_post_send(res, arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_post_impl_send(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_pre_sendto(arg0, arg1, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_pre_impl_sendto((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_post_sendto(res, arg0, arg1, arg2, arg3, arg4, \
+ arg5) \
+ __sanitizer_syscall_post_impl_sendto(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_pre_sendmsg(fd, msg, flags) \
+ __sanitizer_syscall_pre_impl_sendmsg((long)(fd), (long)(msg), (long)(flags))
+#define __sanitizer_syscall_post_sendmsg(res, fd, msg, flags) \
+ __sanitizer_syscall_post_impl_sendmsg(res, (long)(fd), (long)(msg), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_sendmmsg(fd, msg, vlen, flags) \
+ __sanitizer_syscall_pre_impl_sendmmsg((long)(fd), (long)(msg), (long)(vlen), \
+ (long)(flags))
+#define __sanitizer_syscall_post_sendmmsg(res, fd, msg, vlen, flags) \
+ __sanitizer_syscall_post_impl_sendmmsg(res, (long)(fd), (long)(msg), \
+ (long)(vlen), (long)(flags))
+#define __sanitizer_syscall_pre_recv(arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_pre_impl_recv((long)(arg0), (long)(arg1), (long)(arg2), \
+ (long)(arg3))
+#define __sanitizer_syscall_post_recv(res, arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_post_impl_recv(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_pre_recvfrom(arg0, arg1, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_pre_impl_recvfrom((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_post_recvfrom(res, arg0, arg1, arg2, arg3, arg4, \
+ arg5) \
+ __sanitizer_syscall_post_impl_recvfrom(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_pre_recvmsg(fd, msg, flags) \
+ __sanitizer_syscall_pre_impl_recvmsg((long)(fd), (long)(msg), (long)(flags))
+#define __sanitizer_syscall_post_recvmsg(res, fd, msg, flags) \
+ __sanitizer_syscall_post_impl_recvmsg(res, (long)(fd), (long)(msg), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_recvmmsg(fd, msg, vlen, flags, timeout) \
+ __sanitizer_syscall_pre_impl_recvmmsg((long)(fd), (long)(msg), (long)(vlen), \
+ (long)(flags), (long)(timeout))
+#define __sanitizer_syscall_post_recvmmsg(res, fd, msg, vlen, flags, timeout) \
+ __sanitizer_syscall_post_impl_recvmmsg(res, (long)(fd), (long)(msg), \
+ (long)(vlen), (long)(flags), \
+ (long)(timeout))
+#define __sanitizer_syscall_pre_socket(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_socket((long)(arg0), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_socket(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_socket(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_socketpair(arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_pre_impl_socketpair((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_post_socketpair(res, arg0, arg1, arg2, arg3) \
+ __sanitizer_syscall_post_impl_socketpair(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3))
+#define __sanitizer_syscall_pre_socketcall(call, args) \
+ __sanitizer_syscall_pre_impl_socketcall((long)(call), (long)(args))
+#define __sanitizer_syscall_post_socketcall(res, call, args) \
+ __sanitizer_syscall_post_impl_socketcall(res, (long)(call), (long)(args))
+#define __sanitizer_syscall_pre_listen(arg0, arg1) \
+ __sanitizer_syscall_pre_impl_listen((long)(arg0), (long)(arg1))
+#define __sanitizer_syscall_post_listen(res, arg0, arg1) \
+ __sanitizer_syscall_post_impl_listen(res, (long)(arg0), (long)(arg1))
+#define __sanitizer_syscall_pre_poll(ufds, nfds, timeout) \
+ __sanitizer_syscall_pre_impl_poll((long)(ufds), (long)(nfds), (long)(timeout))
+#define __sanitizer_syscall_post_poll(res, ufds, nfds, timeout) \
+ __sanitizer_syscall_post_impl_poll(res, (long)(ufds), (long)(nfds), \
+ (long)(timeout))
+#define __sanitizer_syscall_pre_select(n, inp, outp, exp, tvp) \
+ __sanitizer_syscall_pre_impl_select((long)(n), (long)(inp), (long)(outp), \
+ (long)(exp), (long)(tvp))
+#define __sanitizer_syscall_post_select(res, n, inp, outp, exp, tvp) \
+ __sanitizer_syscall_post_impl_select(res, (long)(n), (long)(inp), \
+ (long)(outp), (long)(exp), (long)(tvp))
+#define __sanitizer_syscall_pre_old_select(arg) \
+ __sanitizer_syscall_pre_impl_old_select((long)(arg))
+#define __sanitizer_syscall_post_old_select(res, arg) \
+ __sanitizer_syscall_post_impl_old_select(res, (long)(arg))
+#define __sanitizer_syscall_pre_epoll_create(size) \
+ __sanitizer_syscall_pre_impl_epoll_create((long)(size))
+#define __sanitizer_syscall_post_epoll_create(res, size) \
+ __sanitizer_syscall_post_impl_epoll_create(res, (long)(size))
+#define __sanitizer_syscall_pre_epoll_create1(flags) \
+ __sanitizer_syscall_pre_impl_epoll_create1((long)(flags))
+#define __sanitizer_syscall_post_epoll_create1(res, flags) \
+ __sanitizer_syscall_post_impl_epoll_create1(res, (long)(flags))
+#define __sanitizer_syscall_pre_epoll_ctl(epfd, op, fd, event) \
+ __sanitizer_syscall_pre_impl_epoll_ctl((long)(epfd), (long)(op), (long)(fd), \
+ (long)(event))
+#define __sanitizer_syscall_post_epoll_ctl(res, epfd, op, fd, event) \
+ __sanitizer_syscall_post_impl_epoll_ctl(res, (long)(epfd), (long)(op), \
+ (long)(fd), (long)(event))
+#define __sanitizer_syscall_pre_epoll_wait(epfd, events, maxevents, timeout) \
+ __sanitizer_syscall_pre_impl_epoll_wait((long)(epfd), (long)(events), \
+ (long)(maxevents), (long)(timeout))
+#define __sanitizer_syscall_post_epoll_wait(res, epfd, events, maxevents, \
+ timeout) \
+ __sanitizer_syscall_post_impl_epoll_wait(res, (long)(epfd), (long)(events), \
+ (long)(maxevents), (long)(timeout))
+#define __sanitizer_syscall_pre_epoll_pwait(epfd, events, maxevents, timeout, \
+ sigmask, sigsetsize) \
+ __sanitizer_syscall_pre_impl_epoll_pwait( \
+ (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout), \
+ (long)(sigmask), (long)(sigsetsize))
+#define __sanitizer_syscall_post_epoll_pwait(res, epfd, events, maxevents, \
+ timeout, sigmask, sigsetsize) \
+ __sanitizer_syscall_post_impl_epoll_pwait( \
+ res, (long)(epfd), (long)(events), (long)(maxevents), (long)(timeout), \
+ (long)(sigmask), (long)(sigsetsize))
+#define __sanitizer_syscall_pre_gethostname(name, len) \
+ __sanitizer_syscall_pre_impl_gethostname((long)(name), (long)(len))
+#define __sanitizer_syscall_post_gethostname(res, name, len) \
+ __sanitizer_syscall_post_impl_gethostname(res, (long)(name), (long)(len))
+#define __sanitizer_syscall_pre_sethostname(name, len) \
+ __sanitizer_syscall_pre_impl_sethostname((long)(name), (long)(len))
+#define __sanitizer_syscall_post_sethostname(res, name, len) \
+ __sanitizer_syscall_post_impl_sethostname(res, (long)(name), (long)(len))
+#define __sanitizer_syscall_pre_setdomainname(name, len) \
+ __sanitizer_syscall_pre_impl_setdomainname((long)(name), (long)(len))
+#define __sanitizer_syscall_post_setdomainname(res, name, len) \
+ __sanitizer_syscall_post_impl_setdomainname(res, (long)(name), (long)(len))
+#define __sanitizer_syscall_pre_newuname(name) \
+ __sanitizer_syscall_pre_impl_newuname((long)(name))
+#define __sanitizer_syscall_post_newuname(res, name) \
+ __sanitizer_syscall_post_impl_newuname(res, (long)(name))
+#define __sanitizer_syscall_pre_uname(arg0) \
+ __sanitizer_syscall_pre_impl_uname((long)(arg0))
+#define __sanitizer_syscall_post_uname(res, arg0) \
+ __sanitizer_syscall_post_impl_uname(res, (long)(arg0))
+#define __sanitizer_syscall_pre_olduname(arg0) \
+ __sanitizer_syscall_pre_impl_olduname((long)(arg0))
+#define __sanitizer_syscall_post_olduname(res, arg0) \
+ __sanitizer_syscall_post_impl_olduname(res, (long)(arg0))
+#define __sanitizer_syscall_pre_getrlimit(resource, rlim) \
+ __sanitizer_syscall_pre_impl_getrlimit((long)(resource), (long)(rlim))
+#define __sanitizer_syscall_post_getrlimit(res, resource, rlim) \
+ __sanitizer_syscall_post_impl_getrlimit(res, (long)(resource), (long)(rlim))
+#define __sanitizer_syscall_pre_old_getrlimit(resource, rlim) \
+ __sanitizer_syscall_pre_impl_old_getrlimit((long)(resource), (long)(rlim))
+#define __sanitizer_syscall_post_old_getrlimit(res, resource, rlim) \
+ __sanitizer_syscall_post_impl_old_getrlimit(res, (long)(resource), \
+ (long)(rlim))
+#define __sanitizer_syscall_pre_setrlimit(resource, rlim) \
+ __sanitizer_syscall_pre_impl_setrlimit((long)(resource), (long)(rlim))
+#define __sanitizer_syscall_post_setrlimit(res, resource, rlim) \
+ __sanitizer_syscall_post_impl_setrlimit(res, (long)(resource), (long)(rlim))
+#define __sanitizer_syscall_pre_prlimit64(pid, resource, new_rlim, old_rlim) \
+ __sanitizer_syscall_pre_impl_prlimit64((long)(pid), (long)(resource), \
+ (long)(new_rlim), (long)(old_rlim))
+#define __sanitizer_syscall_post_prlimit64(res, pid, resource, new_rlim, \
+ old_rlim) \
+ __sanitizer_syscall_post_impl_prlimit64(res, (long)(pid), (long)(resource), \
+ (long)(new_rlim), (long)(old_rlim))
+#define __sanitizer_syscall_pre_getrusage(who, ru) \
+ __sanitizer_syscall_pre_impl_getrusage((long)(who), (long)(ru))
+#define __sanitizer_syscall_post_getrusage(res, who, ru) \
+ __sanitizer_syscall_post_impl_getrusage(res, (long)(who), (long)(ru))
+#define __sanitizer_syscall_pre_umask(mask) \
+ __sanitizer_syscall_pre_impl_umask((long)(mask))
+#define __sanitizer_syscall_post_umask(res, mask) \
+ __sanitizer_syscall_post_impl_umask(res, (long)(mask))
+#define __sanitizer_syscall_pre_msgget(key, msgflg) \
+ __sanitizer_syscall_pre_impl_msgget((long)(key), (long)(msgflg))
+#define __sanitizer_syscall_post_msgget(res, key, msgflg) \
+ __sanitizer_syscall_post_impl_msgget(res, (long)(key), (long)(msgflg))
+#define __sanitizer_syscall_pre_msgsnd(msqid, msgp, msgsz, msgflg) \
+ __sanitizer_syscall_pre_impl_msgsnd((long)(msqid), (long)(msgp), \
+ (long)(msgsz), (long)(msgflg))
+#define __sanitizer_syscall_post_msgsnd(res, msqid, msgp, msgsz, msgflg) \
+ __sanitizer_syscall_post_impl_msgsnd(res, (long)(msqid), (long)(msgp), \
+ (long)(msgsz), (long)(msgflg))
+#define __sanitizer_syscall_pre_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg) \
+ __sanitizer_syscall_pre_impl_msgrcv((long)(msqid), (long)(msgp), \
+ (long)(msgsz), (long)(msgtyp), \
+ (long)(msgflg))
+#define __sanitizer_syscall_post_msgrcv(res, msqid, msgp, msgsz, msgtyp, \
+ msgflg) \
+ __sanitizer_syscall_post_impl_msgrcv(res, (long)(msqid), (long)(msgp), \
+ (long)(msgsz), (long)(msgtyp), \
+ (long)(msgflg))
+#define __sanitizer_syscall_pre_msgctl(msqid, cmd, buf) \
+ __sanitizer_syscall_pre_impl_msgctl((long)(msqid), (long)(cmd), (long)(buf))
+#define __sanitizer_syscall_post_msgctl(res, msqid, cmd, buf) \
+ __sanitizer_syscall_post_impl_msgctl(res, (long)(msqid), (long)(cmd), \
+ (long)(buf))
+#define __sanitizer_syscall_pre_semget(key, nsems, semflg) \
+ __sanitizer_syscall_pre_impl_semget((long)(key), (long)(nsems), \
+ (long)(semflg))
+#define __sanitizer_syscall_post_semget(res, key, nsems, semflg) \
+ __sanitizer_syscall_post_impl_semget(res, (long)(key), (long)(nsems), \
+ (long)(semflg))
+#define __sanitizer_syscall_pre_semop(semid, sops, nsops) \
+ __sanitizer_syscall_pre_impl_semop((long)(semid), (long)(sops), (long)(nsops))
+#define __sanitizer_syscall_post_semop(res, semid, sops, nsops) \
+ __sanitizer_syscall_post_impl_semop(res, (long)(semid), (long)(sops), \
+ (long)(nsops))
+#define __sanitizer_syscall_pre_semctl(semid, semnum, cmd, arg) \
+ __sanitizer_syscall_pre_impl_semctl((long)(semid), (long)(semnum), \
+ (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_post_semctl(res, semid, semnum, cmd, arg) \
+ __sanitizer_syscall_post_impl_semctl(res, (long)(semid), (long)(semnum), \
+ (long)(cmd), (long)(arg))
+#define __sanitizer_syscall_pre_semtimedop(semid, sops, nsops, timeout) \
+ __sanitizer_syscall_pre_impl_semtimedop((long)(semid), (long)(sops), \
+ (long)(nsops), (long)(timeout))
+#define __sanitizer_syscall_post_semtimedop(res, semid, sops, nsops, timeout) \
+ __sanitizer_syscall_post_impl_semtimedop(res, (long)(semid), (long)(sops), \
+ (long)(nsops), (long)(timeout))
+#define __sanitizer_syscall_pre_shmat(shmid, shmaddr, shmflg) \
+ __sanitizer_syscall_pre_impl_shmat((long)(shmid), (long)(shmaddr), \
+ (long)(shmflg))
+#define __sanitizer_syscall_post_shmat(res, shmid, shmaddr, shmflg) \
+ __sanitizer_syscall_post_impl_shmat(res, (long)(shmid), (long)(shmaddr), \
+ (long)(shmflg))
+#define __sanitizer_syscall_pre_shmget(key, size, flag) \
+ __sanitizer_syscall_pre_impl_shmget((long)(key), (long)(size), (long)(flag))
+#define __sanitizer_syscall_post_shmget(res, key, size, flag) \
+ __sanitizer_syscall_post_impl_shmget(res, (long)(key), (long)(size), \
+ (long)(flag))
+#define __sanitizer_syscall_pre_shmdt(shmaddr) \
+ __sanitizer_syscall_pre_impl_shmdt((long)(shmaddr))
+#define __sanitizer_syscall_post_shmdt(res, shmaddr) \
+ __sanitizer_syscall_post_impl_shmdt(res, (long)(shmaddr))
+#define __sanitizer_syscall_pre_shmctl(shmid, cmd, buf) \
+ __sanitizer_syscall_pre_impl_shmctl((long)(shmid), (long)(cmd), (long)(buf))
+#define __sanitizer_syscall_post_shmctl(res, shmid, cmd, buf) \
+ __sanitizer_syscall_post_impl_shmctl(res, (long)(shmid), (long)(cmd), \
+ (long)(buf))
+#define __sanitizer_syscall_pre_ipc(call, first, second, third, ptr, fifth) \
+ __sanitizer_syscall_pre_impl_ipc((long)(call), (long)(first), \
+ (long)(second), (long)(third), (long)(ptr), \
+ (long)(fifth))
+#define __sanitizer_syscall_post_ipc(res, call, first, second, third, ptr, \
+ fifth) \
+ __sanitizer_syscall_post_impl_ipc(res, (long)(call), (long)(first), \
+ (long)(second), (long)(third), \
+ (long)(ptr), (long)(fifth))
+#define __sanitizer_syscall_pre_mq_open(name, oflag, mode, attr) \
+ __sanitizer_syscall_pre_impl_mq_open((long)(name), (long)(oflag), \
+ (long)(mode), (long)(attr))
+#define __sanitizer_syscall_post_mq_open(res, name, oflag, mode, attr) \
+ __sanitizer_syscall_post_impl_mq_open(res, (long)(name), (long)(oflag), \
+ (long)(mode), (long)(attr))
+#define __sanitizer_syscall_pre_mq_unlink(name) \
+ __sanitizer_syscall_pre_impl_mq_unlink((long)(name))
+#define __sanitizer_syscall_post_mq_unlink(res, name) \
+ __sanitizer_syscall_post_impl_mq_unlink(res, (long)(name))
+#define __sanitizer_syscall_pre_mq_timedsend(mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_pre_impl_mq_timedsend((long)(mqdes), (long)(msg_ptr), \
+ (long)(msg_len), (long)(msg_prio), \
+ (long)(abs_timeout))
+#define __sanitizer_syscall_post_mq_timedsend(res, mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_post_impl_mq_timedsend( \
+ res, (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \
+ (long)(abs_timeout))
+#define __sanitizer_syscall_pre_mq_timedreceive(mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_pre_impl_mq_timedreceive( \
+ (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \
+ (long)(abs_timeout))
+#define __sanitizer_syscall_post_mq_timedreceive(res, mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_post_impl_mq_timedreceive( \
+ res, (long)(mqdes), (long)(msg_ptr), (long)(msg_len), (long)(msg_prio), \
+ (long)(abs_timeout))
+#define __sanitizer_syscall_pre_mq_notify(mqdes, notification) \
+ __sanitizer_syscall_pre_impl_mq_notify((long)(mqdes), (long)(notification))
+#define __sanitizer_syscall_post_mq_notify(res, mqdes, notification) \
+ __sanitizer_syscall_post_impl_mq_notify(res, (long)(mqdes), \
+ (long)(notification))
+#define __sanitizer_syscall_pre_mq_getsetattr(mqdes, mqstat, omqstat) \
+ __sanitizer_syscall_pre_impl_mq_getsetattr((long)(mqdes), (long)(mqstat), \
+ (long)(omqstat))
+#define __sanitizer_syscall_post_mq_getsetattr(res, mqdes, mqstat, omqstat) \
+ __sanitizer_syscall_post_impl_mq_getsetattr(res, (long)(mqdes), \
+ (long)(mqstat), (long)(omqstat))
+#define __sanitizer_syscall_pre_pciconfig_iobase(which, bus, devfn) \
+ __sanitizer_syscall_pre_impl_pciconfig_iobase((long)(which), (long)(bus), \
+ (long)(devfn))
+#define __sanitizer_syscall_post_pciconfig_iobase(res, which, bus, devfn) \
+ __sanitizer_syscall_post_impl_pciconfig_iobase(res, (long)(which), \
+ (long)(bus), (long)(devfn))
+#define __sanitizer_syscall_pre_pciconfig_read(bus, dfn, off, len, buf) \
+ __sanitizer_syscall_pre_impl_pciconfig_read( \
+ (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
+#define __sanitizer_syscall_post_pciconfig_read(res, bus, dfn, off, len, buf) \
+ __sanitizer_syscall_post_impl_pciconfig_read( \
+ res, (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
+#define __sanitizer_syscall_pre_pciconfig_write(bus, dfn, off, len, buf) \
+ __sanitizer_syscall_pre_impl_pciconfig_write( \
+ (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
+#define __sanitizer_syscall_post_pciconfig_write(res, bus, dfn, off, len, buf) \
+ __sanitizer_syscall_post_impl_pciconfig_write( \
+ res, (long)(bus), (long)(dfn), (long)(off), (long)(len), (long)(buf))
+#define __sanitizer_syscall_pre_swapon(specialfile, swap_flags) \
+ __sanitizer_syscall_pre_impl_swapon((long)(specialfile), (long)(swap_flags))
+#define __sanitizer_syscall_post_swapon(res, specialfile, swap_flags) \
+ __sanitizer_syscall_post_impl_swapon(res, (long)(specialfile), \
+ (long)(swap_flags))
+#define __sanitizer_syscall_pre_swapoff(specialfile) \
+ __sanitizer_syscall_pre_impl_swapoff((long)(specialfile))
+#define __sanitizer_syscall_post_swapoff(res, specialfile) \
+ __sanitizer_syscall_post_impl_swapoff(res, (long)(specialfile))
+#define __sanitizer_syscall_pre_sysctl(args) \
+ __sanitizer_syscall_pre_impl_sysctl((long)(args))
+#define __sanitizer_syscall_post_sysctl(res, args) \
+ __sanitizer_syscall_post_impl_sysctl(res, (long)(args))
+#define __sanitizer_syscall_pre_sysinfo(info) \
+ __sanitizer_syscall_pre_impl_sysinfo((long)(info))
+#define __sanitizer_syscall_post_sysinfo(res, info) \
+ __sanitizer_syscall_post_impl_sysinfo(res, (long)(info))
+#define __sanitizer_syscall_pre_sysfs(option, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_sysfs((long)(option), (long)(arg1), (long)(arg2))
+#define __sanitizer_syscall_post_sysfs(res, option, arg1, arg2) \
+ __sanitizer_syscall_post_impl_sysfs(res, (long)(option), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_syslog(type, buf, len) \
+ __sanitizer_syscall_pre_impl_syslog((long)(type), (long)(buf), (long)(len))
+#define __sanitizer_syscall_post_syslog(res, type, buf, len) \
+ __sanitizer_syscall_post_impl_syslog(res, (long)(type), (long)(buf), \
+ (long)(len))
+#define __sanitizer_syscall_pre_uselib(library) \
+ __sanitizer_syscall_pre_impl_uselib((long)(library))
+#define __sanitizer_syscall_post_uselib(res, library) \
+ __sanitizer_syscall_post_impl_uselib(res, (long)(library))
+#define __sanitizer_syscall_pre_ni_syscall() \
+ __sanitizer_syscall_pre_impl_ni_syscall()
+#define __sanitizer_syscall_post_ni_syscall(res) \
+ __sanitizer_syscall_post_impl_ni_syscall(res)
+#define __sanitizer_syscall_pre_ptrace(request, pid, addr, data) \
+ __sanitizer_syscall_pre_impl_ptrace((long)(request), (long)(pid), \
+ (long)(addr), (long)(data))
+#define __sanitizer_syscall_post_ptrace(res, request, pid, addr, data) \
+ __sanitizer_syscall_post_impl_ptrace(res, (long)(request), (long)(pid), \
+ (long)(addr), (long)(data))
+#define __sanitizer_syscall_pre_add_key(_type, _description, _payload, plen, \
+ destringid) \
+ __sanitizer_syscall_pre_impl_add_key((long)(_type), (long)(_description), \
+ (long)(_payload), (long)(plen), \
+ (long)(destringid))
+#define __sanitizer_syscall_post_add_key(res, _type, _description, _payload, \
+ plen, destringid) \
+ __sanitizer_syscall_post_impl_add_key( \
+ res, (long)(_type), (long)(_description), (long)(_payload), \
+ (long)(plen), (long)(destringid))
+#define __sanitizer_syscall_pre_request_key(_type, _description, \
+ _callout_info, destringid) \
+ __sanitizer_syscall_pre_impl_request_key( \
+ (long)(_type), (long)(_description), (long)(_callout_info), \
+ (long)(destringid))
+#define __sanitizer_syscall_post_request_key(res, _type, _description, \
+ _callout_info, destringid) \
+ __sanitizer_syscall_post_impl_request_key( \
+ res, (long)(_type), (long)(_description), (long)(_callout_info), \
+ (long)(destringid))
+#define __sanitizer_syscall_pre_keyctl(cmd, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_pre_impl_keyctl((long)(cmd), (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_post_keyctl(res, cmd, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_post_impl_keyctl(res, (long)(cmd), (long)(arg2), \
+ (long)(arg3), (long)(arg4), \
+ (long)(arg5))
+#define __sanitizer_syscall_pre_ioprio_set(which, who, ioprio) \
+ __sanitizer_syscall_pre_impl_ioprio_set((long)(which), (long)(who), \
+ (long)(ioprio))
+#define __sanitizer_syscall_post_ioprio_set(res, which, who, ioprio) \
+ __sanitizer_syscall_post_impl_ioprio_set(res, (long)(which), (long)(who), \
+ (long)(ioprio))
+#define __sanitizer_syscall_pre_ioprio_get(which, who) \
+ __sanitizer_syscall_pre_impl_ioprio_get((long)(which), (long)(who))
+#define __sanitizer_syscall_post_ioprio_get(res, which, who) \
+ __sanitizer_syscall_post_impl_ioprio_get(res, (long)(which), (long)(who))
+#define __sanitizer_syscall_pre_set_mempolicy(mode, nmask, maxnode) \
+ __sanitizer_syscall_pre_impl_set_mempolicy((long)(mode), (long)(nmask), \
+ (long)(maxnode))
+#define __sanitizer_syscall_post_set_mempolicy(res, mode, nmask, maxnode) \
+ __sanitizer_syscall_post_impl_set_mempolicy(res, (long)(mode), \
+ (long)(nmask), (long)(maxnode))
+#define __sanitizer_syscall_pre_migrate_pages(pid, maxnode, from, to) \
+ __sanitizer_syscall_pre_impl_migrate_pages((long)(pid), (long)(maxnode), \
+ (long)(from), (long)(to))
+#define __sanitizer_syscall_post_migrate_pages(res, pid, maxnode, from, to) \
+ __sanitizer_syscall_post_impl_migrate_pages( \
+ res, (long)(pid), (long)(maxnode), (long)(from), (long)(to))
+#define __sanitizer_syscall_pre_move_pages(pid, nr_pages, pages, nodes, \
+ status, flags) \
+ __sanitizer_syscall_pre_impl_move_pages((long)(pid), (long)(nr_pages), \
+ (long)(pages), (long)(nodes), \
+ (long)(status), (long)(flags))
+#define __sanitizer_syscall_post_move_pages(res, pid, nr_pages, pages, nodes, \
+ status, flags) \
+ __sanitizer_syscall_post_impl_move_pages(res, (long)(pid), (long)(nr_pages), \
+ (long)(pages), (long)(nodes), \
+ (long)(status), (long)(flags))
+#define __sanitizer_syscall_pre_mbind(start, len, mode, nmask, maxnode, flags) \
+ __sanitizer_syscall_pre_impl_mbind((long)(start), (long)(len), (long)(mode), \
+ (long)(nmask), (long)(maxnode), \
+ (long)(flags))
+#define __sanitizer_syscall_post_mbind(res, start, len, mode, nmask, maxnode, \
+ flags) \
+ __sanitizer_syscall_post_impl_mbind(res, (long)(start), (long)(len), \
+ (long)(mode), (long)(nmask), \
+ (long)(maxnode), (long)(flags))
+#define __sanitizer_syscall_pre_get_mempolicy(policy, nmask, maxnode, addr, \
+ flags) \
+ __sanitizer_syscall_pre_impl_get_mempolicy((long)(policy), (long)(nmask), \
+ (long)(maxnode), (long)(addr), \
+ (long)(flags))
+#define __sanitizer_syscall_post_get_mempolicy(res, policy, nmask, maxnode, \
+ addr, flags) \
+ __sanitizer_syscall_post_impl_get_mempolicy(res, (long)(policy), \
+ (long)(nmask), (long)(maxnode), \
+ (long)(addr), (long)(flags))
+#define __sanitizer_syscall_pre_inotify_init() \
+ __sanitizer_syscall_pre_impl_inotify_init()
+#define __sanitizer_syscall_post_inotify_init(res) \
+ __sanitizer_syscall_post_impl_inotify_init(res)
+#define __sanitizer_syscall_pre_inotify_init1(flags) \
+ __sanitizer_syscall_pre_impl_inotify_init1((long)(flags))
+#define __sanitizer_syscall_post_inotify_init1(res, flags) \
+ __sanitizer_syscall_post_impl_inotify_init1(res, (long)(flags))
+#define __sanitizer_syscall_pre_inotify_add_watch(fd, path, mask) \
+ __sanitizer_syscall_pre_impl_inotify_add_watch((long)(fd), (long)(path), \
+ (long)(mask))
+#define __sanitizer_syscall_post_inotify_add_watch(res, fd, path, mask) \
+ __sanitizer_syscall_post_impl_inotify_add_watch(res, (long)(fd), \
+ (long)(path), (long)(mask))
+#define __sanitizer_syscall_pre_inotify_rm_watch(fd, wd) \
+ __sanitizer_syscall_pre_impl_inotify_rm_watch((long)(fd), (long)(wd))
+#define __sanitizer_syscall_post_inotify_rm_watch(res, fd, wd) \
+ __sanitizer_syscall_post_impl_inotify_rm_watch(res, (long)(fd), (long)(wd))
+#define __sanitizer_syscall_pre_spu_run(fd, unpc, ustatus) \
+ __sanitizer_syscall_pre_impl_spu_run((long)(fd), (long)(unpc), \
+ (long)(ustatus))
+#define __sanitizer_syscall_post_spu_run(res, fd, unpc, ustatus) \
+ __sanitizer_syscall_post_impl_spu_run(res, (long)(fd), (long)(unpc), \
+ (long)(ustatus))
+#define __sanitizer_syscall_pre_spu_create(name, flags, mode, fd) \
+ __sanitizer_syscall_pre_impl_spu_create((long)(name), (long)(flags), \
+ (long)(mode), (long)(fd))
+#define __sanitizer_syscall_post_spu_create(res, name, flags, mode, fd) \
+ __sanitizer_syscall_post_impl_spu_create(res, (long)(name), (long)(flags), \
+ (long)(mode), (long)(fd))
+#define __sanitizer_syscall_pre_mknodat(dfd, filename, mode, dev) \
+ __sanitizer_syscall_pre_impl_mknodat((long)(dfd), (long)(filename), \
+ (long)(mode), (long)(dev))
+#define __sanitizer_syscall_post_mknodat(res, dfd, filename, mode, dev) \
+ __sanitizer_syscall_post_impl_mknodat(res, (long)(dfd), (long)(filename), \
+ (long)(mode), (long)(dev))
+#define __sanitizer_syscall_pre_mkdirat(dfd, pathname, mode) \
+ __sanitizer_syscall_pre_impl_mkdirat((long)(dfd), (long)(pathname), \
+ (long)(mode))
+#define __sanitizer_syscall_post_mkdirat(res, dfd, pathname, mode) \
+ __sanitizer_syscall_post_impl_mkdirat(res, (long)(dfd), (long)(pathname), \
+ (long)(mode))
+#define __sanitizer_syscall_pre_unlinkat(dfd, pathname, flag) \
+ __sanitizer_syscall_pre_impl_unlinkat((long)(dfd), (long)(pathname), \
+ (long)(flag))
+#define __sanitizer_syscall_post_unlinkat(res, dfd, pathname, flag) \
+ __sanitizer_syscall_post_impl_unlinkat(res, (long)(dfd), (long)(pathname), \
+ (long)(flag))
+#define __sanitizer_syscall_pre_symlinkat(oldname, newdfd, newname) \
+ __sanitizer_syscall_pre_impl_symlinkat((long)(oldname), (long)(newdfd), \
+ (long)(newname))
+#define __sanitizer_syscall_post_symlinkat(res, oldname, newdfd, newname) \
+ __sanitizer_syscall_post_impl_symlinkat(res, (long)(oldname), \
+ (long)(newdfd), (long)(newname))
+#define __sanitizer_syscall_pre_linkat(olddfd, oldname, newdfd, newname, \
+ flags) \
+ __sanitizer_syscall_pre_impl_linkat((long)(olddfd), (long)(oldname), \
+ (long)(newdfd), (long)(newname), \
+ (long)(flags))
+#define __sanitizer_syscall_post_linkat(res, olddfd, oldname, newdfd, newname, \
+ flags) \
+ __sanitizer_syscall_post_impl_linkat(res, (long)(olddfd), (long)(oldname), \
+ (long)(newdfd), (long)(newname), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_renameat(olddfd, oldname, newdfd, newname) \
+ __sanitizer_syscall_pre_impl_renameat((long)(olddfd), (long)(oldname), \
+ (long)(newdfd), (long)(newname))
+#define __sanitizer_syscall_post_renameat(res, olddfd, oldname, newdfd, \
+ newname) \
+ __sanitizer_syscall_post_impl_renameat(res, (long)(olddfd), (long)(oldname), \
+ (long)(newdfd), (long)(newname))
+#define __sanitizer_syscall_pre_futimesat(dfd, filename, utimes) \
+ __sanitizer_syscall_pre_impl_futimesat((long)(dfd), (long)(filename), \
+ (long)(utimes))
+#define __sanitizer_syscall_post_futimesat(res, dfd, filename, utimes) \
+ __sanitizer_syscall_post_impl_futimesat(res, (long)(dfd), (long)(filename), \
+ (long)(utimes))
+#define __sanitizer_syscall_pre_faccessat(dfd, filename, mode) \
+ __sanitizer_syscall_pre_impl_faccessat((long)(dfd), (long)(filename), \
+ (long)(mode))
+#define __sanitizer_syscall_post_faccessat(res, dfd, filename, mode) \
+ __sanitizer_syscall_post_impl_faccessat(res, (long)(dfd), (long)(filename), \
+ (long)(mode))
+#define __sanitizer_syscall_pre_fchmodat(dfd, filename, mode) \
+ __sanitizer_syscall_pre_impl_fchmodat((long)(dfd), (long)(filename), \
+ (long)(mode))
+#define __sanitizer_syscall_post_fchmodat(res, dfd, filename, mode) \
+ __sanitizer_syscall_post_impl_fchmodat(res, (long)(dfd), (long)(filename), \
+ (long)(mode))
+#define __sanitizer_syscall_pre_fchownat(dfd, filename, user, group, flag) \
+ __sanitizer_syscall_pre_impl_fchownat((long)(dfd), (long)(filename), \
+ (long)(user), (long)(group), \
+ (long)(flag))
+#define __sanitizer_syscall_post_fchownat(res, dfd, filename, user, group, \
+ flag) \
+ __sanitizer_syscall_post_impl_fchownat(res, (long)(dfd), (long)(filename), \
+ (long)(user), (long)(group), \
+ (long)(flag))
+#define __sanitizer_syscall_pre_openat(dfd, filename, flags, mode) \
+ __sanitizer_syscall_pre_impl_openat((long)(dfd), (long)(filename), \
+ (long)(flags), (long)(mode))
+#define __sanitizer_syscall_post_openat(res, dfd, filename, flags, mode) \
+ __sanitizer_syscall_post_impl_openat(res, (long)(dfd), (long)(filename), \
+ (long)(flags), (long)(mode))
+#define __sanitizer_syscall_pre_newfstatat(dfd, filename, statbuf, flag) \
+ __sanitizer_syscall_pre_impl_newfstatat((long)(dfd), (long)(filename), \
+ (long)(statbuf), (long)(flag))
+#define __sanitizer_syscall_post_newfstatat(res, dfd, filename, statbuf, flag) \
+ __sanitizer_syscall_post_impl_newfstatat(res, (long)(dfd), (long)(filename), \
+ (long)(statbuf), (long)(flag))
+#define __sanitizer_syscall_pre_fstatat64(dfd, filename, statbuf, flag) \
+ __sanitizer_syscall_pre_impl_fstatat64((long)(dfd), (long)(filename), \
+ (long)(statbuf), (long)(flag))
+#define __sanitizer_syscall_post_fstatat64(res, dfd, filename, statbuf, flag) \
+ __sanitizer_syscall_post_impl_fstatat64(res, (long)(dfd), (long)(filename), \
+ (long)(statbuf), (long)(flag))
+#define __sanitizer_syscall_pre_readlinkat(dfd, path, buf, bufsiz) \
+ __sanitizer_syscall_pre_impl_readlinkat((long)(dfd), (long)(path), \
+ (long)(buf), (long)(bufsiz))
+#define __sanitizer_syscall_post_readlinkat(res, dfd, path, buf, bufsiz) \
+ __sanitizer_syscall_post_impl_readlinkat(res, (long)(dfd), (long)(path), \
+ (long)(buf), (long)(bufsiz))
+#define __sanitizer_syscall_pre_utimensat(dfd, filename, utimes, flags) \
+ __sanitizer_syscall_pre_impl_utimensat((long)(dfd), (long)(filename), \
+ (long)(utimes), (long)(flags))
+#define __sanitizer_syscall_post_utimensat(res, dfd, filename, utimes, flags) \
+ __sanitizer_syscall_post_impl_utimensat(res, (long)(dfd), (long)(filename), \
+ (long)(utimes), (long)(flags))
+#define __sanitizer_syscall_pre_unshare(unshare_flags) \
+ __sanitizer_syscall_pre_impl_unshare((long)(unshare_flags))
+#define __sanitizer_syscall_post_unshare(res, unshare_flags) \
+ __sanitizer_syscall_post_impl_unshare(res, (long)(unshare_flags))
+#define __sanitizer_syscall_pre_splice(fd_in, off_in, fd_out, off_out, len, \
+ flags) \
+ __sanitizer_syscall_pre_impl_splice((long)(fd_in), (long)(off_in), \
+ (long)(fd_out), (long)(off_out), \
+ (long)(len), (long)(flags))
+#define __sanitizer_syscall_post_splice(res, fd_in, off_in, fd_out, off_out, \
+ len, flags) \
+ __sanitizer_syscall_post_impl_splice(res, (long)(fd_in), (long)(off_in), \
+ (long)(fd_out), (long)(off_out), \
+ (long)(len), (long)(flags))
+#define __sanitizer_syscall_pre_vmsplice(fd, iov, nr_segs, flags) \
+ __sanitizer_syscall_pre_impl_vmsplice((long)(fd), (long)(iov), \
+ (long)(nr_segs), (long)(flags))
+#define __sanitizer_syscall_post_vmsplice(res, fd, iov, nr_segs, flags) \
+ __sanitizer_syscall_post_impl_vmsplice(res, (long)(fd), (long)(iov), \
+ (long)(nr_segs), (long)(flags))
+#define __sanitizer_syscall_pre_tee(fdin, fdout, len, flags) \
+ __sanitizer_syscall_pre_impl_tee((long)(fdin), (long)(fdout), (long)(len), \
+ (long)(flags))
+#define __sanitizer_syscall_post_tee(res, fdin, fdout, len, flags) \
+ __sanitizer_syscall_post_impl_tee(res, (long)(fdin), (long)(fdout), \
+ (long)(len), (long)(flags))
+#define __sanitizer_syscall_pre_get_robust_list(pid, head_ptr, len_ptr) \
+ __sanitizer_syscall_pre_impl_get_robust_list((long)(pid), (long)(head_ptr), \
+ (long)(len_ptr))
+#define __sanitizer_syscall_post_get_robust_list(res, pid, head_ptr, len_ptr) \
+ __sanitizer_syscall_post_impl_get_robust_list( \
+ res, (long)(pid), (long)(head_ptr), (long)(len_ptr))
+#define __sanitizer_syscall_pre_set_robust_list(head, len) \
+ __sanitizer_syscall_pre_impl_set_robust_list((long)(head), (long)(len))
+#define __sanitizer_syscall_post_set_robust_list(res, head, len) \
+ __sanitizer_syscall_post_impl_set_robust_list(res, (long)(head), (long)(len))
+#define __sanitizer_syscall_pre_getcpu(cpu, node, cache) \
+ __sanitizer_syscall_pre_impl_getcpu((long)(cpu), (long)(node), (long)(cache))
+#define __sanitizer_syscall_post_getcpu(res, cpu, node, cache) \
+ __sanitizer_syscall_post_impl_getcpu(res, (long)(cpu), (long)(node), \
+ (long)(cache))
+#define __sanitizer_syscall_pre_signalfd(ufd, user_mask, sizemask) \
+ __sanitizer_syscall_pre_impl_signalfd((long)(ufd), (long)(user_mask), \
+ (long)(sizemask))
+#define __sanitizer_syscall_post_signalfd(res, ufd, user_mask, sizemask) \
+ __sanitizer_syscall_post_impl_signalfd(res, (long)(ufd), (long)(user_mask), \
+ (long)(sizemask))
+#define __sanitizer_syscall_pre_signalfd4(ufd, user_mask, sizemask, flags) \
+ __sanitizer_syscall_pre_impl_signalfd4((long)(ufd), (long)(user_mask), \
+ (long)(sizemask), (long)(flags))
+#define __sanitizer_syscall_post_signalfd4(res, ufd, user_mask, sizemask, \
+ flags) \
+ __sanitizer_syscall_post_impl_signalfd4(res, (long)(ufd), (long)(user_mask), \
+ (long)(sizemask), (long)(flags))
+#define __sanitizer_syscall_pre_timerfd_create(clockid, flags) \
+ __sanitizer_syscall_pre_impl_timerfd_create((long)(clockid), (long)(flags))
+#define __sanitizer_syscall_post_timerfd_create(res, clockid, flags) \
+ __sanitizer_syscall_post_impl_timerfd_create(res, (long)(clockid), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_timerfd_settime(ufd, flags, utmr, otmr) \
+ __sanitizer_syscall_pre_impl_timerfd_settime((long)(ufd), (long)(flags), \
+ (long)(utmr), (long)(otmr))
+#define __sanitizer_syscall_post_timerfd_settime(res, ufd, flags, utmr, otmr) \
+ __sanitizer_syscall_post_impl_timerfd_settime( \
+ res, (long)(ufd), (long)(flags), (long)(utmr), (long)(otmr))
+#define __sanitizer_syscall_pre_timerfd_gettime(ufd, otmr) \
+ __sanitizer_syscall_pre_impl_timerfd_gettime((long)(ufd), (long)(otmr))
+#define __sanitizer_syscall_post_timerfd_gettime(res, ufd, otmr) \
+ __sanitizer_syscall_post_impl_timerfd_gettime(res, (long)(ufd), (long)(otmr))
+#define __sanitizer_syscall_pre_eventfd(count) \
+ __sanitizer_syscall_pre_impl_eventfd((long)(count))
+#define __sanitizer_syscall_post_eventfd(res, count) \
+ __sanitizer_syscall_post_impl_eventfd(res, (long)(count))
+#define __sanitizer_syscall_pre_eventfd2(count, flags) \
+ __sanitizer_syscall_pre_impl_eventfd2((long)(count), (long)(flags))
+#define __sanitizer_syscall_post_eventfd2(res, count, flags) \
+ __sanitizer_syscall_post_impl_eventfd2(res, (long)(count), (long)(flags))
+#define __sanitizer_syscall_pre_old_readdir(arg0, arg1, arg2) \
+ __sanitizer_syscall_pre_impl_old_readdir((long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_post_old_readdir(res, arg0, arg1, arg2) \
+ __sanitizer_syscall_post_impl_old_readdir(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2))
+#define __sanitizer_syscall_pre_pselect6(arg0, arg1, arg2, arg3, arg4, arg5) \
+ __sanitizer_syscall_pre_impl_pselect6((long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_post_pselect6(res, arg0, arg1, arg2, arg3, arg4, \
+ arg5) \
+ __sanitizer_syscall_post_impl_pselect6(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4), (long)(arg5))
+#define __sanitizer_syscall_pre_ppoll(arg0, arg1, arg2, arg3, arg4) \
+ __sanitizer_syscall_pre_impl_ppoll((long)(arg0), (long)(arg1), (long)(arg2), \
+ (long)(arg3), (long)(arg4))
+#define __sanitizer_syscall_post_ppoll(res, arg0, arg1, arg2, arg3, arg4) \
+ __sanitizer_syscall_post_impl_ppoll(res, (long)(arg0), (long)(arg1), \
+ (long)(arg2), (long)(arg3), \
+ (long)(arg4))
+#define __sanitizer_syscall_pre_syncfs(fd) \
+ __sanitizer_syscall_pre_impl_syncfs((long)(fd))
+#define __sanitizer_syscall_post_syncfs(res, fd) \
+ __sanitizer_syscall_post_impl_syncfs(res, (long)(fd))
+#define __sanitizer_syscall_pre_perf_event_open(attr_uptr, pid, cpu, group_fd, \
+ flags) \
+ __sanitizer_syscall_pre_impl_perf_event_open((long)(attr_uptr), (long)(pid), \
+ (long)(cpu), (long)(group_fd), \
+ (long)(flags))
+#define __sanitizer_syscall_post_perf_event_open(res, attr_uptr, pid, cpu, \
+ group_fd, flags) \
+ __sanitizer_syscall_post_impl_perf_event_open( \
+ res, (long)(attr_uptr), (long)(pid), (long)(cpu), (long)(group_fd), \
+ (long)(flags))
+#define __sanitizer_syscall_pre_mmap_pgoff(addr, len, prot, flags, fd, pgoff) \
+ __sanitizer_syscall_pre_impl_mmap_pgoff((long)(addr), (long)(len), \
+ (long)(prot), (long)(flags), \
+ (long)(fd), (long)(pgoff))
+#define __sanitizer_syscall_post_mmap_pgoff(res, addr, len, prot, flags, fd, \
+ pgoff) \
+ __sanitizer_syscall_post_impl_mmap_pgoff(res, (long)(addr), (long)(len), \
+ (long)(prot), (long)(flags), \
+ (long)(fd), (long)(pgoff))
+#define __sanitizer_syscall_pre_old_mmap(arg) \
+ __sanitizer_syscall_pre_impl_old_mmap((long)(arg))
+#define __sanitizer_syscall_post_old_mmap(res, arg) \
+ __sanitizer_syscall_post_impl_old_mmap(res, (long)(arg))
+#define __sanitizer_syscall_pre_name_to_handle_at(dfd, name, handle, mnt_id, \
+ flag) \
+ __sanitizer_syscall_pre_impl_name_to_handle_at( \
+ (long)(dfd), (long)(name), (long)(handle), (long)(mnt_id), (long)(flag))
+#define __sanitizer_syscall_post_name_to_handle_at(res, dfd, name, handle, \
+ mnt_id, flag) \
+ __sanitizer_syscall_post_impl_name_to_handle_at( \
+ res, (long)(dfd), (long)(name), (long)(handle), (long)(mnt_id), \
+ (long)(flag))
+#define __sanitizer_syscall_pre_open_by_handle_at(mountdirfd, handle, flags) \
+ __sanitizer_syscall_pre_impl_open_by_handle_at( \
+ (long)(mountdirfd), (long)(handle), (long)(flags))
+#define __sanitizer_syscall_post_open_by_handle_at(res, mountdirfd, handle, \
+ flags) \
+ __sanitizer_syscall_post_impl_open_by_handle_at( \
+ res, (long)(mountdirfd), (long)(handle), (long)(flags))
+#define __sanitizer_syscall_pre_setns(fd, nstype) \
+ __sanitizer_syscall_pre_impl_setns((long)(fd), (long)(nstype))
+#define __sanitizer_syscall_post_setns(res, fd, nstype) \
+ __sanitizer_syscall_post_impl_setns(res, (long)(fd), (long)(nstype))
+#define __sanitizer_syscall_pre_process_vm_readv(pid, lvec, liovcnt, rvec, \
+ riovcnt, flags) \
+ __sanitizer_syscall_pre_impl_process_vm_readv( \
+ (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \
+ (long)(riovcnt), (long)(flags))
+#define __sanitizer_syscall_post_process_vm_readv(res, pid, lvec, liovcnt, \
+ rvec, riovcnt, flags) \
+ __sanitizer_syscall_post_impl_process_vm_readv( \
+ res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \
+ (long)(riovcnt), (long)(flags))
+#define __sanitizer_syscall_pre_process_vm_writev(pid, lvec, liovcnt, rvec, \
+ riovcnt, flags) \
+ __sanitizer_syscall_pre_impl_process_vm_writev( \
+ (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \
+ (long)(riovcnt), (long)(flags))
+#define __sanitizer_syscall_post_process_vm_writev(res, pid, lvec, liovcnt, \
+ rvec, riovcnt, flags) \
+ __sanitizer_syscall_post_impl_process_vm_writev( \
+ res, (long)(pid), (long)(lvec), (long)(liovcnt), (long)(rvec), \
+ (long)(riovcnt), (long)(flags))
+#define __sanitizer_syscall_pre_fork() \
+ __sanitizer_syscall_pre_impl_fork()
+#define __sanitizer_syscall_post_fork(res) \
+ __sanitizer_syscall_post_impl_fork(res)
+#define __sanitizer_syscall_pre_vfork() \
+ __sanitizer_syscall_pre_impl_vfork()
+#define __sanitizer_syscall_post_vfork(res) \
+ __sanitizer_syscall_post_impl_vfork(res)
+#define __sanitizer_syscall_pre_sigaction(signum, act, oldact) \
+ __sanitizer_syscall_pre_impl_sigaction((long)signum, (long)act, (long)oldact)
+#define __sanitizer_syscall_post_sigaction(res, signum, act, oldact) \
+ __sanitizer_syscall_post_impl_sigaction(res, (long)signum, (long)act, \
+ (long)oldact)
+#define __sanitizer_syscall_pre_rt_sigaction(signum, act, oldact, sz) \
+ __sanitizer_syscall_pre_impl_rt_sigaction((long)signum, (long)act, \
+ (long)oldact, (long)sz)
+#define __sanitizer_syscall_post_rt_sigaction(res, signum, act, oldact, sz) \
+ __sanitizer_syscall_post_impl_rt_sigaction(res, (long)signum, (long)act, \
+ (long)oldact, (long)sz)
+
+// And now a few syscalls we don't handle yet.
+#define __sanitizer_syscall_pre_afs_syscall(...)
+#define __sanitizer_syscall_pre_arch_prctl(...)
+#define __sanitizer_syscall_pre_break(...)
+#define __sanitizer_syscall_pre_chown32(...)
+#define __sanitizer_syscall_pre_clone(...)
+#define __sanitizer_syscall_pre_create_module(...)
+#define __sanitizer_syscall_pre_epoll_ctl_old(...)
+#define __sanitizer_syscall_pre_epoll_wait_old(...)
+#define __sanitizer_syscall_pre_execve(...)
+#define __sanitizer_syscall_pre_fadvise64(...)
+#define __sanitizer_syscall_pre_fadvise64_64(...)
+#define __sanitizer_syscall_pre_fallocate(...)
+#define __sanitizer_syscall_pre_fanotify_init(...)
+#define __sanitizer_syscall_pre_fanotify_mark(...)
+#define __sanitizer_syscall_pre_fchown32(...)
+#define __sanitizer_syscall_pre_ftime(...)
+#define __sanitizer_syscall_pre_ftruncate64(...)
+#define __sanitizer_syscall_pre_futex(...)
+#define __sanitizer_syscall_pre_getegid32(...)
+#define __sanitizer_syscall_pre_geteuid32(...)
+#define __sanitizer_syscall_pre_getgid32(...)
+#define __sanitizer_syscall_pre_getgroups32(...)
+#define __sanitizer_syscall_pre_get_kernel_syms(...)
+#define __sanitizer_syscall_pre_getpmsg(...)
+#define __sanitizer_syscall_pre_getresgid32(...)
+#define __sanitizer_syscall_pre_getresuid32(...)
+#define __sanitizer_syscall_pre_get_thread_area(...)
+#define __sanitizer_syscall_pre_getuid32(...)
+#define __sanitizer_syscall_pre_gtty(...)
+#define __sanitizer_syscall_pre_idle(...)
+#define __sanitizer_syscall_pre_iopl(...)
+#define __sanitizer_syscall_pre_lchown32(...)
+#define __sanitizer_syscall_pre__llseek(...)
+#define __sanitizer_syscall_pre_lock(...)
+#define __sanitizer_syscall_pre_madvise1(...)
+#define __sanitizer_syscall_pre_mmap(...)
+#define __sanitizer_syscall_pre_mmap2(...)
+#define __sanitizer_syscall_pre_modify_ldt(...)
+#define __sanitizer_syscall_pre_mpx(...)
+#define __sanitizer_syscall_pre__newselect(...)
+#define __sanitizer_syscall_pre_nfsservctl(...)
+#define __sanitizer_syscall_pre_oldfstat(...)
+#define __sanitizer_syscall_pre_oldlstat(...)
+#define __sanitizer_syscall_pre_oldolduname(...)
+#define __sanitizer_syscall_pre_oldstat(...)
+#define __sanitizer_syscall_pre_prctl(...)
+#define __sanitizer_syscall_pre_prof(...)
+#define __sanitizer_syscall_pre_profil(...)
+#define __sanitizer_syscall_pre_putpmsg(...)
+#define __sanitizer_syscall_pre_query_module(...)
+#define __sanitizer_syscall_pre_readahead(...)
+#define __sanitizer_syscall_pre_readdir(...)
+#define __sanitizer_syscall_pre_rt_sigreturn(...)
+#define __sanitizer_syscall_pre_rt_sigsuspend(...)
+#define __sanitizer_syscall_pre_security(...)
+#define __sanitizer_syscall_pre_setfsgid32(...)
+#define __sanitizer_syscall_pre_setfsuid32(...)
+#define __sanitizer_syscall_pre_setgid32(...)
+#define __sanitizer_syscall_pre_setgroups32(...)
+#define __sanitizer_syscall_pre_setregid32(...)
+#define __sanitizer_syscall_pre_setresgid32(...)
+#define __sanitizer_syscall_pre_setresuid32(...)
+#define __sanitizer_syscall_pre_setreuid32(...)
+#define __sanitizer_syscall_pre_set_thread_area(...)
+#define __sanitizer_syscall_pre_setuid32(...)
+#define __sanitizer_syscall_pre_sigaltstack(...)
+#define __sanitizer_syscall_pre_sigreturn(...)
+#define __sanitizer_syscall_pre_sigsuspend(...)
+#define __sanitizer_syscall_pre_stty(...)
+#define __sanitizer_syscall_pre_sync_file_range(...)
+#define __sanitizer_syscall_pre__sysctl(...)
+#define __sanitizer_syscall_pre_truncate64(...)
+#define __sanitizer_syscall_pre_tuxcall(...)
+#define __sanitizer_syscall_pre_ugetrlimit(...)
+#define __sanitizer_syscall_pre_ulimit(...)
+#define __sanitizer_syscall_pre_umount2(...)
+#define __sanitizer_syscall_pre_vm86(...)
+#define __sanitizer_syscall_pre_vm86old(...)
+#define __sanitizer_syscall_pre_vserver(...)
+
+#define __sanitizer_syscall_post_afs_syscall(res, ...)
+#define __sanitizer_syscall_post_arch_prctl(res, ...)
+#define __sanitizer_syscall_post_break(res, ...)
+#define __sanitizer_syscall_post_chown32(res, ...)
+#define __sanitizer_syscall_post_clone(res, ...)
+#define __sanitizer_syscall_post_create_module(res, ...)
+#define __sanitizer_syscall_post_epoll_ctl_old(res, ...)
+#define __sanitizer_syscall_post_epoll_wait_old(res, ...)
+#define __sanitizer_syscall_post_execve(res, ...)
+#define __sanitizer_syscall_post_fadvise64(res, ...)
+#define __sanitizer_syscall_post_fadvise64_64(res, ...)
+#define __sanitizer_syscall_post_fallocate(res, ...)
+#define __sanitizer_syscall_post_fanotify_init(res, ...)
+#define __sanitizer_syscall_post_fanotify_mark(res, ...)
+#define __sanitizer_syscall_post_fchown32(res, ...)
+#define __sanitizer_syscall_post_ftime(res, ...)
+#define __sanitizer_syscall_post_ftruncate64(res, ...)
+#define __sanitizer_syscall_post_futex(res, ...)
+#define __sanitizer_syscall_post_getegid32(res, ...)
+#define __sanitizer_syscall_post_geteuid32(res, ...)
+#define __sanitizer_syscall_post_getgid32(res, ...)
+#define __sanitizer_syscall_post_getgroups32(res, ...)
+#define __sanitizer_syscall_post_get_kernel_syms(res, ...)
+#define __sanitizer_syscall_post_getpmsg(res, ...)
+#define __sanitizer_syscall_post_getresgid32(res, ...)
+#define __sanitizer_syscall_post_getresuid32(res, ...)
+#define __sanitizer_syscall_post_get_thread_area(res, ...)
+#define __sanitizer_syscall_post_getuid32(res, ...)
+#define __sanitizer_syscall_post_gtty(res, ...)
+#define __sanitizer_syscall_post_idle(res, ...)
+#define __sanitizer_syscall_post_iopl(res, ...)
+#define __sanitizer_syscall_post_lchown32(res, ...)
+#define __sanitizer_syscall_post__llseek(res, ...)
+#define __sanitizer_syscall_post_lock(res, ...)
+#define __sanitizer_syscall_post_madvise1(res, ...)
+#define __sanitizer_syscall_post_mmap2(res, ...)
+#define __sanitizer_syscall_post_mmap(res, ...)
+#define __sanitizer_syscall_post_modify_ldt(res, ...)
+#define __sanitizer_syscall_post_mpx(res, ...)
+#define __sanitizer_syscall_post__newselect(res, ...)
+#define __sanitizer_syscall_post_nfsservctl(res, ...)
+#define __sanitizer_syscall_post_oldfstat(res, ...)
+#define __sanitizer_syscall_post_oldlstat(res, ...)
+#define __sanitizer_syscall_post_oldolduname(res, ...)
+#define __sanitizer_syscall_post_oldstat(res, ...)
+#define __sanitizer_syscall_post_prctl(res, ...)
+#define __sanitizer_syscall_post_profil(res, ...)
+#define __sanitizer_syscall_post_prof(res, ...)
+#define __sanitizer_syscall_post_putpmsg(res, ...)
+#define __sanitizer_syscall_post_query_module(res, ...)
+#define __sanitizer_syscall_post_readahead(res, ...)
+#define __sanitizer_syscall_post_readdir(res, ...)
+#define __sanitizer_syscall_post_rt_sigreturn(res, ...)
+#define __sanitizer_syscall_post_rt_sigsuspend(res, ...)
+#define __sanitizer_syscall_post_security(res, ...)
+#define __sanitizer_syscall_post_setfsgid32(res, ...)
+#define __sanitizer_syscall_post_setfsuid32(res, ...)
+#define __sanitizer_syscall_post_setgid32(res, ...)
+#define __sanitizer_syscall_post_setgroups32(res, ...)
+#define __sanitizer_syscall_post_setregid32(res, ...)
+#define __sanitizer_syscall_post_setresgid32(res, ...)
+#define __sanitizer_syscall_post_setresuid32(res, ...)
+#define __sanitizer_syscall_post_setreuid32(res, ...)
+#define __sanitizer_syscall_post_set_thread_area(res, ...)
+#define __sanitizer_syscall_post_setuid32(res, ...)
+#define __sanitizer_syscall_post_sigaltstack(res, ...)
+#define __sanitizer_syscall_post_sigreturn(res, ...)
+#define __sanitizer_syscall_post_sigsuspend(res, ...)
+#define __sanitizer_syscall_post_stty(res, ...)
+#define __sanitizer_syscall_post_sync_file_range(res, ...)
+#define __sanitizer_syscall_post__sysctl(res, ...)
+#define __sanitizer_syscall_post_truncate64(res, ...)
+#define __sanitizer_syscall_post_tuxcall(res, ...)
+#define __sanitizer_syscall_post_ugetrlimit(res, ...)
+#define __sanitizer_syscall_post_ulimit(res, ...)
+#define __sanitizer_syscall_post_umount2(res, ...)
+#define __sanitizer_syscall_post_vm86old(res, ...)
+#define __sanitizer_syscall_post_vm86(res, ...)
+#define __sanitizer_syscall_post_vserver(res, ...)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Private declarations. Do not call directly from user code. Use macros above.
+void __sanitizer_syscall_pre_impl_time(long tloc);
+void __sanitizer_syscall_post_impl_time(long res, long tloc);
+void __sanitizer_syscall_pre_impl_stime(long tptr);
+void __sanitizer_syscall_post_impl_stime(long res, long tptr);
+void __sanitizer_syscall_pre_impl_gettimeofday(long tv, long tz);
+void __sanitizer_syscall_post_impl_gettimeofday(long res, long tv, long tz);
+void __sanitizer_syscall_pre_impl_settimeofday(long tv, long tz);
+void __sanitizer_syscall_post_impl_settimeofday(long res, long tv, long tz);
+void __sanitizer_syscall_pre_impl_adjtimex(long txc_p);
+void __sanitizer_syscall_post_impl_adjtimex(long res, long txc_p);
+void __sanitizer_syscall_pre_impl_times(long tbuf);
+void __sanitizer_syscall_post_impl_times(long res, long tbuf);
+void __sanitizer_syscall_pre_impl_gettid();
+void __sanitizer_syscall_post_impl_gettid(long res);
+void __sanitizer_syscall_pre_impl_nanosleep(long rqtp, long rmtp);
+void __sanitizer_syscall_post_impl_nanosleep(long res, long rqtp, long rmtp);
+void __sanitizer_syscall_pre_impl_alarm(long seconds);
+void __sanitizer_syscall_post_impl_alarm(long res, long seconds);
+void __sanitizer_syscall_pre_impl_getpid();
+void __sanitizer_syscall_post_impl_getpid(long res);
+void __sanitizer_syscall_pre_impl_getppid();
+void __sanitizer_syscall_post_impl_getppid(long res);
+void __sanitizer_syscall_pre_impl_getuid();
+void __sanitizer_syscall_post_impl_getuid(long res);
+void __sanitizer_syscall_pre_impl_geteuid();
+void __sanitizer_syscall_post_impl_geteuid(long res);
+void __sanitizer_syscall_pre_impl_getgid();
+void __sanitizer_syscall_post_impl_getgid(long res);
+void __sanitizer_syscall_pre_impl_getegid();
+void __sanitizer_syscall_post_impl_getegid(long res);
+void __sanitizer_syscall_pre_impl_getresuid(long ruid, long euid, long suid);
+void __sanitizer_syscall_post_impl_getresuid(long res, long ruid, long euid,
+ long suid);
+void __sanitizer_syscall_pre_impl_getresgid(long rgid, long egid, long sgid);
+void __sanitizer_syscall_post_impl_getresgid(long res, long rgid, long egid,
+ long sgid);
+void __sanitizer_syscall_pre_impl_getpgid(long pid);
+void __sanitizer_syscall_post_impl_getpgid(long res, long pid);
+void __sanitizer_syscall_pre_impl_getpgrp();
+void __sanitizer_syscall_post_impl_getpgrp(long res);
+void __sanitizer_syscall_pre_impl_getsid(long pid);
+void __sanitizer_syscall_post_impl_getsid(long res, long pid);
+void __sanitizer_syscall_pre_impl_getgroups(long gidsetsize, long grouplist);
+void __sanitizer_syscall_post_impl_getgroups(long res, long gidsetsize,
+ long grouplist);
+void __sanitizer_syscall_pre_impl_setregid(long rgid, long egid);
+void __sanitizer_syscall_post_impl_setregid(long res, long rgid, long egid);
+void __sanitizer_syscall_pre_impl_setgid(long gid);
+void __sanitizer_syscall_post_impl_setgid(long res, long gid);
+void __sanitizer_syscall_pre_impl_setreuid(long ruid, long euid);
+void __sanitizer_syscall_post_impl_setreuid(long res, long ruid, long euid);
+void __sanitizer_syscall_pre_impl_setuid(long uid);
+void __sanitizer_syscall_post_impl_setuid(long res, long uid);
+void __sanitizer_syscall_pre_impl_setresuid(long ruid, long euid, long suid);
+void __sanitizer_syscall_post_impl_setresuid(long res, long ruid, long euid,
+ long suid);
+void __sanitizer_syscall_pre_impl_setresgid(long rgid, long egid, long sgid);
+void __sanitizer_syscall_post_impl_setresgid(long res, long rgid, long egid,
+ long sgid);
+void __sanitizer_syscall_pre_impl_setfsuid(long uid);
+void __sanitizer_syscall_post_impl_setfsuid(long res, long uid);
+void __sanitizer_syscall_pre_impl_setfsgid(long gid);
+void __sanitizer_syscall_post_impl_setfsgid(long res, long gid);
+void __sanitizer_syscall_pre_impl_setpgid(long pid, long pgid);
+void __sanitizer_syscall_post_impl_setpgid(long res, long pid, long pgid);
+void __sanitizer_syscall_pre_impl_setsid();
+void __sanitizer_syscall_post_impl_setsid(long res);
+void __sanitizer_syscall_pre_impl_setgroups(long gidsetsize, long grouplist);
+void __sanitizer_syscall_post_impl_setgroups(long res, long gidsetsize,
+ long grouplist);
+void __sanitizer_syscall_pre_impl_acct(long name);
+void __sanitizer_syscall_post_impl_acct(long res, long name);
+void __sanitizer_syscall_pre_impl_capget(long header, long dataptr);
+void __sanitizer_syscall_post_impl_capget(long res, long header, long dataptr);
+void __sanitizer_syscall_pre_impl_capset(long header, long data);
+void __sanitizer_syscall_post_impl_capset(long res, long header, long data);
+void __sanitizer_syscall_pre_impl_personality(long personality);
+void __sanitizer_syscall_post_impl_personality(long res, long personality);
+void __sanitizer_syscall_pre_impl_sigpending(long set);
+void __sanitizer_syscall_post_impl_sigpending(long res, long set);
+void __sanitizer_syscall_pre_impl_sigprocmask(long how, long set, long oset);
+void __sanitizer_syscall_post_impl_sigprocmask(long res, long how, long set,
+ long oset);
+void __sanitizer_syscall_pre_impl_getitimer(long which, long value);
+void __sanitizer_syscall_post_impl_getitimer(long res, long which, long value);
+void __sanitizer_syscall_pre_impl_setitimer(long which, long value,
+ long ovalue);
+void __sanitizer_syscall_post_impl_setitimer(long res, long which, long value,
+ long ovalue);
+void __sanitizer_syscall_pre_impl_timer_create(long which_clock,
+ long timer_event_spec,
+ long created_timer_id);
+void __sanitizer_syscall_post_impl_timer_create(long res, long which_clock,
+ long timer_event_spec,
+ long created_timer_id);
+void __sanitizer_syscall_pre_impl_timer_gettime(long timer_id, long setting);
+void __sanitizer_syscall_post_impl_timer_gettime(long res, long timer_id,
+ long setting);
+void __sanitizer_syscall_pre_impl_timer_getoverrun(long timer_id);
+void __sanitizer_syscall_post_impl_timer_getoverrun(long res, long timer_id);
+void __sanitizer_syscall_pre_impl_timer_settime(long timer_id, long flags,
+ long new_setting,
+ long old_setting);
+void __sanitizer_syscall_post_impl_timer_settime(long res, long timer_id,
+ long flags, long new_setting,
+ long old_setting);
+void __sanitizer_syscall_pre_impl_timer_delete(long timer_id);
+void __sanitizer_syscall_post_impl_timer_delete(long res, long timer_id);
+void __sanitizer_syscall_pre_impl_clock_settime(long which_clock, long tp);
+void __sanitizer_syscall_post_impl_clock_settime(long res, long which_clock,
+ long tp);
+void __sanitizer_syscall_pre_impl_clock_gettime(long which_clock, long tp);
+void __sanitizer_syscall_post_impl_clock_gettime(long res, long which_clock,
+ long tp);
+void __sanitizer_syscall_pre_impl_clock_adjtime(long which_clock, long tx);
+void __sanitizer_syscall_post_impl_clock_adjtime(long res, long which_clock,
+ long tx);
+void __sanitizer_syscall_pre_impl_clock_getres(long which_clock, long tp);
+void __sanitizer_syscall_post_impl_clock_getres(long res, long which_clock,
+ long tp);
+void __sanitizer_syscall_pre_impl_clock_nanosleep(long which_clock, long flags,
+ long rqtp, long rmtp);
+void __sanitizer_syscall_post_impl_clock_nanosleep(long res, long which_clock,
+ long flags, long rqtp,
+ long rmtp);
+void __sanitizer_syscall_pre_impl_nice(long increment);
+void __sanitizer_syscall_post_impl_nice(long res, long increment);
+void __sanitizer_syscall_pre_impl_sched_setscheduler(long pid, long policy,
+ long param);
+void __sanitizer_syscall_post_impl_sched_setscheduler(long res, long pid,
+ long policy, long param);
+void __sanitizer_syscall_pre_impl_sched_setparam(long pid, long param);
+void __sanitizer_syscall_post_impl_sched_setparam(long res, long pid,
+ long param);
+void __sanitizer_syscall_pre_impl_sched_getscheduler(long pid);
+void __sanitizer_syscall_post_impl_sched_getscheduler(long res, long pid);
+void __sanitizer_syscall_pre_impl_sched_getparam(long pid, long param);
+void __sanitizer_syscall_post_impl_sched_getparam(long res, long pid,
+ long param);
+void __sanitizer_syscall_pre_impl_sched_setaffinity(long pid, long len,
+ long user_mask_ptr);
+void __sanitizer_syscall_post_impl_sched_setaffinity(long res, long pid,
+ long len,
+ long user_mask_ptr);
+void __sanitizer_syscall_pre_impl_sched_getaffinity(long pid, long len,
+ long user_mask_ptr);
+void __sanitizer_syscall_post_impl_sched_getaffinity(long res, long pid,
+ long len,
+ long user_mask_ptr);
+void __sanitizer_syscall_pre_impl_sched_yield();
+void __sanitizer_syscall_post_impl_sched_yield(long res);
+void __sanitizer_syscall_pre_impl_sched_get_priority_max(long policy);
+void __sanitizer_syscall_post_impl_sched_get_priority_max(long res,
+ long policy);
+void __sanitizer_syscall_pre_impl_sched_get_priority_min(long policy);
+void __sanitizer_syscall_post_impl_sched_get_priority_min(long res,
+ long policy);
+void __sanitizer_syscall_pre_impl_sched_rr_get_interval(long pid,
+ long interval);
+void __sanitizer_syscall_post_impl_sched_rr_get_interval(long res, long pid,
+ long interval);
+void __sanitizer_syscall_pre_impl_setpriority(long which, long who,
+ long niceval);
+void __sanitizer_syscall_post_impl_setpriority(long res, long which, long who,
+ long niceval);
+void __sanitizer_syscall_pre_impl_getpriority(long which, long who);
+void __sanitizer_syscall_post_impl_getpriority(long res, long which, long who);
+void __sanitizer_syscall_pre_impl_shutdown(long arg0, long arg1);
+void __sanitizer_syscall_post_impl_shutdown(long res, long arg0, long arg1);
+void __sanitizer_syscall_pre_impl_reboot(long magic1, long magic2, long cmd,
+ long arg);
+void __sanitizer_syscall_post_impl_reboot(long res, long magic1, long magic2,
+ long cmd, long arg);
+void __sanitizer_syscall_pre_impl_restart_syscall();
+void __sanitizer_syscall_post_impl_restart_syscall(long res);
+void __sanitizer_syscall_pre_impl_kexec_load(long entry, long nr_segments,
+ long segments, long flags);
+void __sanitizer_syscall_post_impl_kexec_load(long res, long entry,
+ long nr_segments, long segments,
+ long flags);
+void __sanitizer_syscall_pre_impl_exit(long error_code);
+void __sanitizer_syscall_post_impl_exit(long res, long error_code);
+void __sanitizer_syscall_pre_impl_exit_group(long error_code);
+void __sanitizer_syscall_post_impl_exit_group(long res, long error_code);
+void __sanitizer_syscall_pre_impl_wait4(long pid, long stat_addr, long options,
+ long ru);
+void __sanitizer_syscall_post_impl_wait4(long res, long pid, long stat_addr,
+ long options, long ru);
+void __sanitizer_syscall_pre_impl_waitid(long which, long pid, long infop,
+ long options, long ru);
+void __sanitizer_syscall_post_impl_waitid(long res, long which, long pid,
+ long infop, long options, long ru);
+void __sanitizer_syscall_pre_impl_waitpid(long pid, long stat_addr,
+ long options);
+void __sanitizer_syscall_post_impl_waitpid(long res, long pid, long stat_addr,
+ long options);
+void __sanitizer_syscall_pre_impl_set_tid_address(long tidptr);
+void __sanitizer_syscall_post_impl_set_tid_address(long res, long tidptr);
+void __sanitizer_syscall_pre_impl_init_module(long umod, long len, long uargs);
+void __sanitizer_syscall_post_impl_init_module(long res, long umod, long len,
+ long uargs);
+void __sanitizer_syscall_pre_impl_delete_module(long name_user, long flags);
+void __sanitizer_syscall_post_impl_delete_module(long res, long name_user,
+ long flags);
+void __sanitizer_syscall_pre_impl_rt_sigprocmask(long how, long set, long oset,
+ long sigsetsize);
+void __sanitizer_syscall_post_impl_rt_sigprocmask(long res, long how, long set,
+ long oset, long sigsetsize);
+void __sanitizer_syscall_pre_impl_rt_sigpending(long set, long sigsetsize);
+void __sanitizer_syscall_post_impl_rt_sigpending(long res, long set,
+ long sigsetsize);
+void __sanitizer_syscall_pre_impl_rt_sigtimedwait(long uthese, long uinfo,
+ long uts, long sigsetsize);
+void __sanitizer_syscall_post_impl_rt_sigtimedwait(long res, long uthese,
+ long uinfo, long uts,
+ long sigsetsize);
+void __sanitizer_syscall_pre_impl_rt_tgsigqueueinfo(long tgid, long pid,
+ long sig, long uinfo);
+void __sanitizer_syscall_post_impl_rt_tgsigqueueinfo(long res, long tgid,
+ long pid, long sig,
+ long uinfo);
+void __sanitizer_syscall_pre_impl_kill(long pid, long sig);
+void __sanitizer_syscall_post_impl_kill(long res, long pid, long sig);
+void __sanitizer_syscall_pre_impl_tgkill(long tgid, long pid, long sig);
+void __sanitizer_syscall_post_impl_tgkill(long res, long tgid, long pid,
+ long sig);
+void __sanitizer_syscall_pre_impl_tkill(long pid, long sig);
+void __sanitizer_syscall_post_impl_tkill(long res, long pid, long sig);
+void __sanitizer_syscall_pre_impl_rt_sigqueueinfo(long pid, long sig,
+ long uinfo);
+void __sanitizer_syscall_post_impl_rt_sigqueueinfo(long res, long pid, long sig,
+ long uinfo);
+void __sanitizer_syscall_pre_impl_sgetmask();
+void __sanitizer_syscall_post_impl_sgetmask(long res);
+void __sanitizer_syscall_pre_impl_ssetmask(long newmask);
+void __sanitizer_syscall_post_impl_ssetmask(long res, long newmask);
+void __sanitizer_syscall_pre_impl_signal(long sig, long handler);
+void __sanitizer_syscall_post_impl_signal(long res, long sig, long handler);
+void __sanitizer_syscall_pre_impl_pause();
+void __sanitizer_syscall_post_impl_pause(long res);
+void __sanitizer_syscall_pre_impl_sync();
+void __sanitizer_syscall_post_impl_sync(long res);
+void __sanitizer_syscall_pre_impl_fsync(long fd);
+void __sanitizer_syscall_post_impl_fsync(long res, long fd);
+void __sanitizer_syscall_pre_impl_fdatasync(long fd);
+void __sanitizer_syscall_post_impl_fdatasync(long res, long fd);
+void __sanitizer_syscall_pre_impl_bdflush(long func, long data);
+void __sanitizer_syscall_post_impl_bdflush(long res, long func, long data);
+void __sanitizer_syscall_pre_impl_mount(long dev_name, long dir_name, long type,
+ long flags, long data);
+void __sanitizer_syscall_post_impl_mount(long res, long dev_name, long dir_name,
+ long type, long flags, long data);
+void __sanitizer_syscall_pre_impl_umount(long name, long flags);
+void __sanitizer_syscall_post_impl_umount(long res, long name, long flags);
+void __sanitizer_syscall_pre_impl_oldumount(long name);
+void __sanitizer_syscall_post_impl_oldumount(long res, long name);
+void __sanitizer_syscall_pre_impl_truncate(long path, long length);
+void __sanitizer_syscall_post_impl_truncate(long res, long path, long length);
+void __sanitizer_syscall_pre_impl_ftruncate(long fd, long length);
+void __sanitizer_syscall_post_impl_ftruncate(long res, long fd, long length);
+void __sanitizer_syscall_pre_impl_stat(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_stat(long res, long filename, long statbuf);
+void __sanitizer_syscall_pre_impl_statfs(long path, long buf);
+void __sanitizer_syscall_post_impl_statfs(long res, long path, long buf);
+void __sanitizer_syscall_pre_impl_statfs64(long path, long sz, long buf);
+void __sanitizer_syscall_post_impl_statfs64(long res, long path, long sz,
+ long buf);
+void __sanitizer_syscall_pre_impl_fstatfs(long fd, long buf);
+void __sanitizer_syscall_post_impl_fstatfs(long res, long fd, long buf);
+void __sanitizer_syscall_pre_impl_fstatfs64(long fd, long sz, long buf);
+void __sanitizer_syscall_post_impl_fstatfs64(long res, long fd, long sz,
+ long buf);
+void __sanitizer_syscall_pre_impl_lstat(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_lstat(long res, long filename, long statbuf);
+void __sanitizer_syscall_pre_impl_fstat(long fd, long statbuf);
+void __sanitizer_syscall_post_impl_fstat(long res, long fd, long statbuf);
+void __sanitizer_syscall_pre_impl_newstat(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_newstat(long res, long filename,
+ long statbuf);
+void __sanitizer_syscall_pre_impl_newlstat(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_newlstat(long res, long filename,
+ long statbuf);
+void __sanitizer_syscall_pre_impl_newfstat(long fd, long statbuf);
+void __sanitizer_syscall_post_impl_newfstat(long res, long fd, long statbuf);
+void __sanitizer_syscall_pre_impl_ustat(long dev, long ubuf);
+void __sanitizer_syscall_post_impl_ustat(long res, long dev, long ubuf);
+void __sanitizer_syscall_pre_impl_stat64(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_stat64(long res, long filename,
+ long statbuf);
+void __sanitizer_syscall_pre_impl_fstat64(long fd, long statbuf);
+void __sanitizer_syscall_post_impl_fstat64(long res, long fd, long statbuf);
+void __sanitizer_syscall_pre_impl_lstat64(long filename, long statbuf);
+void __sanitizer_syscall_post_impl_lstat64(long res, long filename,
+ long statbuf);
+void __sanitizer_syscall_pre_impl_setxattr(long path, long name, long value,
+ long size, long flags);
+void __sanitizer_syscall_post_impl_setxattr(long res, long path, long name,
+ long value, long size, long flags);
+void __sanitizer_syscall_pre_impl_lsetxattr(long path, long name, long value,
+ long size, long flags);
+void __sanitizer_syscall_post_impl_lsetxattr(long res, long path, long name,
+ long value, long size, long flags);
+void __sanitizer_syscall_pre_impl_fsetxattr(long fd, long name, long value,
+ long size, long flags);
+void __sanitizer_syscall_post_impl_fsetxattr(long res, long fd, long name,
+ long value, long size, long flags);
+void __sanitizer_syscall_pre_impl_getxattr(long path, long name, long value,
+ long size);
+void __sanitizer_syscall_post_impl_getxattr(long res, long path, long name,
+ long value, long size);
+void __sanitizer_syscall_pre_impl_lgetxattr(long path, long name, long value,
+ long size);
+void __sanitizer_syscall_post_impl_lgetxattr(long res, long path, long name,
+ long value, long size);
+void __sanitizer_syscall_pre_impl_fgetxattr(long fd, long name, long value,
+ long size);
+void __sanitizer_syscall_post_impl_fgetxattr(long res, long fd, long name,
+ long value, long size);
+void __sanitizer_syscall_pre_impl_listxattr(long path, long list, long size);
+void __sanitizer_syscall_post_impl_listxattr(long res, long path, long list,
+ long size);
+void __sanitizer_syscall_pre_impl_llistxattr(long path, long list, long size);
+void __sanitizer_syscall_post_impl_llistxattr(long res, long path, long list,
+ long size);
+void __sanitizer_syscall_pre_impl_flistxattr(long fd, long list, long size);
+void __sanitizer_syscall_post_impl_flistxattr(long res, long fd, long list,
+ long size);
+void __sanitizer_syscall_pre_impl_removexattr(long path, long name);
+void __sanitizer_syscall_post_impl_removexattr(long res, long path, long name);
+void __sanitizer_syscall_pre_impl_lremovexattr(long path, long name);
+void __sanitizer_syscall_post_impl_lremovexattr(long res, long path, long name);
+void __sanitizer_syscall_pre_impl_fremovexattr(long fd, long name);
+void __sanitizer_syscall_post_impl_fremovexattr(long res, long fd, long name);
+void __sanitizer_syscall_pre_impl_brk(long brk);
+void __sanitizer_syscall_post_impl_brk(long res, long brk);
+void __sanitizer_syscall_pre_impl_mprotect(long start, long len, long prot);
+void __sanitizer_syscall_post_impl_mprotect(long res, long start, long len,
+ long prot);
+void __sanitizer_syscall_pre_impl_mremap(long addr, long old_len, long new_len,
+ long flags, long new_addr);
+void __sanitizer_syscall_post_impl_mremap(long res, long addr, long old_len,
+ long new_len, long flags,
+ long new_addr);
+void __sanitizer_syscall_pre_impl_remap_file_pages(long start, long size,
+ long prot, long pgoff,
+ long flags);
+void __sanitizer_syscall_post_impl_remap_file_pages(long res, long start,
+ long size, long prot,
+ long pgoff, long flags);
+void __sanitizer_syscall_pre_impl_msync(long start, long len, long flags);
+void __sanitizer_syscall_post_impl_msync(long res, long start, long len,
+ long flags);
+void __sanitizer_syscall_pre_impl_munmap(long addr, long len);
+void __sanitizer_syscall_post_impl_munmap(long res, long addr, long len);
+void __sanitizer_syscall_pre_impl_mlock(long start, long len);
+void __sanitizer_syscall_post_impl_mlock(long res, long start, long len);
+void __sanitizer_syscall_pre_impl_munlock(long start, long len);
+void __sanitizer_syscall_post_impl_munlock(long res, long start, long len);
+void __sanitizer_syscall_pre_impl_mlockall(long flags);
+void __sanitizer_syscall_post_impl_mlockall(long res, long flags);
+void __sanitizer_syscall_pre_impl_munlockall();
+void __sanitizer_syscall_post_impl_munlockall(long res);
+void __sanitizer_syscall_pre_impl_madvise(long start, long len, long behavior);
+void __sanitizer_syscall_post_impl_madvise(long res, long start, long len,
+ long behavior);
+void __sanitizer_syscall_pre_impl_mincore(long start, long len, long vec);
+void __sanitizer_syscall_post_impl_mincore(long res, long start, long len,
+ long vec);
+void __sanitizer_syscall_pre_impl_pivot_root(long new_root, long put_old);
+void __sanitizer_syscall_post_impl_pivot_root(long res, long new_root,
+ long put_old);
+void __sanitizer_syscall_pre_impl_chroot(long filename);
+void __sanitizer_syscall_post_impl_chroot(long res, long filename);
+void __sanitizer_syscall_pre_impl_mknod(long filename, long mode, long dev);
+void __sanitizer_syscall_post_impl_mknod(long res, long filename, long mode,
+ long dev);
+void __sanitizer_syscall_pre_impl_link(long oldname, long newname);
+void __sanitizer_syscall_post_impl_link(long res, long oldname, long newname);
+void __sanitizer_syscall_pre_impl_symlink(long old, long new_);
+void __sanitizer_syscall_post_impl_symlink(long res, long old, long new_);
+void __sanitizer_syscall_pre_impl_unlink(long pathname);
+void __sanitizer_syscall_post_impl_unlink(long res, long pathname);
+void __sanitizer_syscall_pre_impl_rename(long oldname, long newname);
+void __sanitizer_syscall_post_impl_rename(long res, long oldname, long newname);
+void __sanitizer_syscall_pre_impl_chmod(long filename, long mode);
+void __sanitizer_syscall_post_impl_chmod(long res, long filename, long mode);
+void __sanitizer_syscall_pre_impl_fchmod(long fd, long mode);
+void __sanitizer_syscall_post_impl_fchmod(long res, long fd, long mode);
+void __sanitizer_syscall_pre_impl_fcntl(long fd, long cmd, long arg);
+void __sanitizer_syscall_post_impl_fcntl(long res, long fd, long cmd, long arg);
+void __sanitizer_syscall_pre_impl_fcntl64(long fd, long cmd, long arg);
+void __sanitizer_syscall_post_impl_fcntl64(long res, long fd, long cmd,
+ long arg);
+void __sanitizer_syscall_pre_impl_pipe(long fildes);
+void __sanitizer_syscall_post_impl_pipe(long res, long fildes);
+void __sanitizer_syscall_pre_impl_pipe2(long fildes, long flags);
+void __sanitizer_syscall_post_impl_pipe2(long res, long fildes, long flags);
+void __sanitizer_syscall_pre_impl_dup(long fildes);
+void __sanitizer_syscall_post_impl_dup(long res, long fildes);
+void __sanitizer_syscall_pre_impl_dup2(long oldfd, long newfd);
+void __sanitizer_syscall_post_impl_dup2(long res, long oldfd, long newfd);
+void __sanitizer_syscall_pre_impl_dup3(long oldfd, long newfd, long flags);
+void __sanitizer_syscall_post_impl_dup3(long res, long oldfd, long newfd,
+ long flags);
+void __sanitizer_syscall_pre_impl_ioperm(long from, long num, long on);
+void __sanitizer_syscall_post_impl_ioperm(long res, long from, long num,
+ long on);
+void __sanitizer_syscall_pre_impl_ioctl(long fd, long cmd, long arg);
+void __sanitizer_syscall_post_impl_ioctl(long res, long fd, long cmd, long arg);
+void __sanitizer_syscall_pre_impl_flock(long fd, long cmd);
+void __sanitizer_syscall_post_impl_flock(long res, long fd, long cmd);
+void __sanitizer_syscall_pre_impl_io_setup(long nr_reqs, long ctx);
+void __sanitizer_syscall_post_impl_io_setup(long res, long nr_reqs, long ctx);
+void __sanitizer_syscall_pre_impl_io_destroy(long ctx);
+void __sanitizer_syscall_post_impl_io_destroy(long res, long ctx);
+void __sanitizer_syscall_pre_impl_io_getevents(long ctx_id, long min_nr,
+ long nr, long events,
+ long timeout);
+void __sanitizer_syscall_post_impl_io_getevents(long res, long ctx_id,
+ long min_nr, long nr,
+ long events, long timeout);
+void __sanitizer_syscall_pre_impl_io_submit(long ctx_id, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_io_submit(long res, long ctx_id, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_io_cancel(long ctx_id, long iocb,
+ long result);
+void __sanitizer_syscall_post_impl_io_cancel(long res, long ctx_id, long iocb,
+ long result);
+void __sanitizer_syscall_pre_impl_sendfile(long out_fd, long in_fd, long offset,
+ long count);
+void __sanitizer_syscall_post_impl_sendfile(long res, long out_fd, long in_fd,
+ long offset, long count);
+void __sanitizer_syscall_pre_impl_sendfile64(long out_fd, long in_fd,
+ long offset, long count);
+void __sanitizer_syscall_post_impl_sendfile64(long res, long out_fd, long in_fd,
+ long offset, long count);
+void __sanitizer_syscall_pre_impl_readlink(long path, long buf, long bufsiz);
+void __sanitizer_syscall_post_impl_readlink(long res, long path, long buf,
+ long bufsiz);
+void __sanitizer_syscall_pre_impl_creat(long pathname, long mode);
+void __sanitizer_syscall_post_impl_creat(long res, long pathname, long mode);
+void __sanitizer_syscall_pre_impl_open(long filename, long flags, long mode);
+void __sanitizer_syscall_post_impl_open(long res, long filename, long flags,
+ long mode);
+void __sanitizer_syscall_pre_impl_close(long fd);
+void __sanitizer_syscall_post_impl_close(long res, long fd);
+void __sanitizer_syscall_pre_impl_access(long filename, long mode);
+void __sanitizer_syscall_post_impl_access(long res, long filename, long mode);
+void __sanitizer_syscall_pre_impl_vhangup();
+void __sanitizer_syscall_post_impl_vhangup(long res);
+void __sanitizer_syscall_pre_impl_chown(long filename, long user, long group);
+void __sanitizer_syscall_post_impl_chown(long res, long filename, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_lchown(long filename, long user, long group);
+void __sanitizer_syscall_post_impl_lchown(long res, long filename, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_fchown(long fd, long user, long group);
+void __sanitizer_syscall_post_impl_fchown(long res, long fd, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_chown16(long filename, long user, long group);
+void __sanitizer_syscall_post_impl_chown16(long res, long filename, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_lchown16(long filename, long user,
+ long group);
+void __sanitizer_syscall_post_impl_lchown16(long res, long filename, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_fchown16(long fd, long user, long group);
+void __sanitizer_syscall_post_impl_fchown16(long res, long fd, long user,
+ long group);
+void __sanitizer_syscall_pre_impl_setregid16(long rgid, long egid);
+void __sanitizer_syscall_post_impl_setregid16(long res, long rgid, long egid);
+void __sanitizer_syscall_pre_impl_setgid16(long gid);
+void __sanitizer_syscall_post_impl_setgid16(long res, long gid);
+void __sanitizer_syscall_pre_impl_setreuid16(long ruid, long euid);
+void __sanitizer_syscall_post_impl_setreuid16(long res, long ruid, long euid);
+void __sanitizer_syscall_pre_impl_setuid16(long uid);
+void __sanitizer_syscall_post_impl_setuid16(long res, long uid);
+void __sanitizer_syscall_pre_impl_setresuid16(long ruid, long euid, long suid);
+void __sanitizer_syscall_post_impl_setresuid16(long res, long ruid, long euid,
+ long suid);
+void __sanitizer_syscall_pre_impl_getresuid16(long ruid, long euid, long suid);
+void __sanitizer_syscall_post_impl_getresuid16(long res, long ruid, long euid,
+ long suid);
+void __sanitizer_syscall_pre_impl_setresgid16(long rgid, long egid, long sgid);
+void __sanitizer_syscall_post_impl_setresgid16(long res, long rgid, long egid,
+ long sgid);
+void __sanitizer_syscall_pre_impl_getresgid16(long rgid, long egid, long sgid);
+void __sanitizer_syscall_post_impl_getresgid16(long res, long rgid, long egid,
+ long sgid);
+void __sanitizer_syscall_pre_impl_setfsuid16(long uid);
+void __sanitizer_syscall_post_impl_setfsuid16(long res, long uid);
+void __sanitizer_syscall_pre_impl_setfsgid16(long gid);
+void __sanitizer_syscall_post_impl_setfsgid16(long res, long gid);
+void __sanitizer_syscall_pre_impl_getgroups16(long gidsetsize, long grouplist);
+void __sanitizer_syscall_post_impl_getgroups16(long res, long gidsetsize,
+ long grouplist);
+void __sanitizer_syscall_pre_impl_setgroups16(long gidsetsize, long grouplist);
+void __sanitizer_syscall_post_impl_setgroups16(long res, long gidsetsize,
+ long grouplist);
+void __sanitizer_syscall_pre_impl_getuid16();
+void __sanitizer_syscall_post_impl_getuid16(long res);
+void __sanitizer_syscall_pre_impl_geteuid16();
+void __sanitizer_syscall_post_impl_geteuid16(long res);
+void __sanitizer_syscall_pre_impl_getgid16();
+void __sanitizer_syscall_post_impl_getgid16(long res);
+void __sanitizer_syscall_pre_impl_getegid16();
+void __sanitizer_syscall_post_impl_getegid16(long res);
+void __sanitizer_syscall_pre_impl_utime(long filename, long times);
+void __sanitizer_syscall_post_impl_utime(long res, long filename, long times);
+void __sanitizer_syscall_pre_impl_utimes(long filename, long utimes);
+void __sanitizer_syscall_post_impl_utimes(long res, long filename, long utimes);
+void __sanitizer_syscall_pre_impl_lseek(long fd, long offset, long origin);
+void __sanitizer_syscall_post_impl_lseek(long res, long fd, long offset,
+ long origin);
+void __sanitizer_syscall_pre_impl_llseek(long fd, long offset_high,
+ long offset_low, long result,
+ long origin);
+void __sanitizer_syscall_post_impl_llseek(long res, long fd, long offset_high,
+ long offset_low, long result,
+ long origin);
+void __sanitizer_syscall_pre_impl_read(long fd, long buf, long count);
+void __sanitizer_syscall_post_impl_read(long res, long fd, long buf,
+ long count);
+void __sanitizer_syscall_pre_impl_readv(long fd, long vec, long vlen);
+void __sanitizer_syscall_post_impl_readv(long res, long fd, long vec,
+ long vlen);
+void __sanitizer_syscall_pre_impl_write(long fd, long buf, long count);
+void __sanitizer_syscall_post_impl_write(long res, long fd, long buf,
+ long count);
+void __sanitizer_syscall_pre_impl_writev(long fd, long vec, long vlen);
+void __sanitizer_syscall_post_impl_writev(long res, long fd, long vec,
+ long vlen);
+
+#ifdef _LP64
+void __sanitizer_syscall_pre_impl_pread64(long fd, long buf, long count,
+ long pos);
+void __sanitizer_syscall_post_impl_pread64(long res, long fd, long buf,
+ long count, long pos);
+void __sanitizer_syscall_pre_impl_pwrite64(long fd, long buf, long count,
+ long pos);
+void __sanitizer_syscall_post_impl_pwrite64(long res, long fd, long buf,
+ long count, long pos);
+#else
+void __sanitizer_syscall_pre_impl_pread64(long fd, long buf, long count,
+ long pos0, long pos1);
+void __sanitizer_syscall_post_impl_pread64(long res, long fd, long buf,
+ long count, long pos0, long pos1);
+void __sanitizer_syscall_pre_impl_pwrite64(long fd, long buf, long count,
+ long pos0, long pos1);
+void __sanitizer_syscall_post_impl_pwrite64(long res, long fd, long buf,
+ long count, long pos0, long pos1);
+#endif
+
+void __sanitizer_syscall_pre_impl_preadv(long fd, long vec, long vlen,
+ long pos_l, long pos_h);
+void __sanitizer_syscall_post_impl_preadv(long res, long fd, long vec,
+ long vlen, long pos_l, long pos_h);
+void __sanitizer_syscall_pre_impl_pwritev(long fd, long vec, long vlen,
+ long pos_l, long pos_h);
+void __sanitizer_syscall_post_impl_pwritev(long res, long fd, long vec,
+ long vlen, long pos_l, long pos_h);
+void __sanitizer_syscall_pre_impl_getcwd(long buf, long size);
+void __sanitizer_syscall_post_impl_getcwd(long res, long buf, long size);
+void __sanitizer_syscall_pre_impl_mkdir(long pathname, long mode);
+void __sanitizer_syscall_post_impl_mkdir(long res, long pathname, long mode);
+void __sanitizer_syscall_pre_impl_chdir(long filename);
+void __sanitizer_syscall_post_impl_chdir(long res, long filename);
+void __sanitizer_syscall_pre_impl_fchdir(long fd);
+void __sanitizer_syscall_post_impl_fchdir(long res, long fd);
+void __sanitizer_syscall_pre_impl_rmdir(long pathname);
+void __sanitizer_syscall_post_impl_rmdir(long res, long pathname);
+void __sanitizer_syscall_pre_impl_lookup_dcookie(long cookie64, long buf,
+ long len);
+void __sanitizer_syscall_post_impl_lookup_dcookie(long res, long cookie64,
+ long buf, long len);
+void __sanitizer_syscall_pre_impl_quotactl(long cmd, long special, long id,
+ long addr);
+void __sanitizer_syscall_post_impl_quotactl(long res, long cmd, long special,
+ long id, long addr);
+void __sanitizer_syscall_pre_impl_getdents(long fd, long dirent, long count);
+void __sanitizer_syscall_post_impl_getdents(long res, long fd, long dirent,
+ long count);
+void __sanitizer_syscall_pre_impl_getdents64(long fd, long dirent, long count);
+void __sanitizer_syscall_post_impl_getdents64(long res, long fd, long dirent,
+ long count);
+void __sanitizer_syscall_pre_impl_setsockopt(long fd, long level, long optname,
+ long optval, long optlen);
+void __sanitizer_syscall_post_impl_setsockopt(long res, long fd, long level,
+ long optname, long optval,
+ long optlen);
+void __sanitizer_syscall_pre_impl_getsockopt(long fd, long level, long optname,
+ long optval, long optlen);
+void __sanitizer_syscall_post_impl_getsockopt(long res, long fd, long level,
+ long optname, long optval,
+ long optlen);
+void __sanitizer_syscall_pre_impl_bind(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_bind(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_connect(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_connect(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_accept(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_accept(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_accept4(long arg0, long arg1, long arg2,
+ long arg3);
+void __sanitizer_syscall_post_impl_accept4(long res, long arg0, long arg1,
+ long arg2, long arg3);
+void __sanitizer_syscall_pre_impl_getsockname(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_getsockname(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_getpeername(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_getpeername(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_send(long arg0, long arg1, long arg2,
+ long arg3);
+void __sanitizer_syscall_post_impl_send(long res, long arg0, long arg1,
+ long arg2, long arg3);
+void __sanitizer_syscall_pre_impl_sendto(long arg0, long arg1, long arg2,
+ long arg3, long arg4, long arg5);
+void __sanitizer_syscall_post_impl_sendto(long res, long arg0, long arg1,
+ long arg2, long arg3, long arg4,
+ long arg5);
+void __sanitizer_syscall_pre_impl_sendmsg(long fd, long msg, long flags);
+void __sanitizer_syscall_post_impl_sendmsg(long res, long fd, long msg,
+ long flags);
+void __sanitizer_syscall_pre_impl_sendmmsg(long fd, long msg, long vlen,
+ long flags);
+void __sanitizer_syscall_post_impl_sendmmsg(long res, long fd, long msg,
+ long vlen, long flags);
+void __sanitizer_syscall_pre_impl_recv(long arg0, long arg1, long arg2,
+ long arg3);
+void __sanitizer_syscall_post_impl_recv(long res, long arg0, long arg1,
+ long arg2, long arg3);
+void __sanitizer_syscall_pre_impl_recvfrom(long arg0, long arg1, long arg2,
+ long arg3, long arg4, long arg5);
+void __sanitizer_syscall_post_impl_recvfrom(long res, long arg0, long arg1,
+ long arg2, long arg3, long arg4,
+ long arg5);
+void __sanitizer_syscall_pre_impl_recvmsg(long fd, long msg, long flags);
+void __sanitizer_syscall_post_impl_recvmsg(long res, long fd, long msg,
+ long flags);
+void __sanitizer_syscall_pre_impl_recvmmsg(long fd, long msg, long vlen,
+ long flags, long timeout);
+void __sanitizer_syscall_post_impl_recvmmsg(long res, long fd, long msg,
+ long vlen, long flags,
+ long timeout);
+void __sanitizer_syscall_pre_impl_socket(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_socket(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_socketpair(long arg0, long arg1, long arg2,
+ long arg3);
+void __sanitizer_syscall_post_impl_socketpair(long res, long arg0, long arg1,
+ long arg2, long arg3);
+void __sanitizer_syscall_pre_impl_socketcall(long call, long args);
+void __sanitizer_syscall_post_impl_socketcall(long res, long call, long args);
+void __sanitizer_syscall_pre_impl_listen(long arg0, long arg1);
+void __sanitizer_syscall_post_impl_listen(long res, long arg0, long arg1);
+void __sanitizer_syscall_pre_impl_poll(long ufds, long nfds, long timeout);
+void __sanitizer_syscall_post_impl_poll(long res, long ufds, long nfds,
+ long timeout);
+void __sanitizer_syscall_pre_impl_select(long n, long inp, long outp, long exp,
+ long tvp);
+void __sanitizer_syscall_post_impl_select(long res, long n, long inp, long outp,
+ long exp, long tvp);
+void __sanitizer_syscall_pre_impl_old_select(long arg);
+void __sanitizer_syscall_post_impl_old_select(long res, long arg);
+void __sanitizer_syscall_pre_impl_epoll_create(long size);
+void __sanitizer_syscall_post_impl_epoll_create(long res, long size);
+void __sanitizer_syscall_pre_impl_epoll_create1(long flags);
+void __sanitizer_syscall_post_impl_epoll_create1(long res, long flags);
+void __sanitizer_syscall_pre_impl_epoll_ctl(long epfd, long op, long fd,
+ long event);
+void __sanitizer_syscall_post_impl_epoll_ctl(long res, long epfd, long op,
+ long fd, long event);
+void __sanitizer_syscall_pre_impl_epoll_wait(long epfd, long events,
+ long maxevents, long timeout);
+void __sanitizer_syscall_post_impl_epoll_wait(long res, long epfd, long events,
+ long maxevents, long timeout);
+void __sanitizer_syscall_pre_impl_epoll_pwait(long epfd, long events,
+ long maxevents, long timeout,
+ long sigmask, long sigsetsize);
+void __sanitizer_syscall_post_impl_epoll_pwait(long res, long epfd, long events,
+ long maxevents, long timeout,
+ long sigmask, long sigsetsize);
+void __sanitizer_syscall_pre_impl_gethostname(long name, long len);
+void __sanitizer_syscall_post_impl_gethostname(long res, long name, long len);
+void __sanitizer_syscall_pre_impl_sethostname(long name, long len);
+void __sanitizer_syscall_post_impl_sethostname(long res, long name, long len);
+void __sanitizer_syscall_pre_impl_setdomainname(long name, long len);
+void __sanitizer_syscall_post_impl_setdomainname(long res, long name, long len);
+void __sanitizer_syscall_pre_impl_newuname(long name);
+void __sanitizer_syscall_post_impl_newuname(long res, long name);
+void __sanitizer_syscall_pre_impl_uname(long arg0);
+void __sanitizer_syscall_post_impl_uname(long res, long arg0);
+void __sanitizer_syscall_pre_impl_olduname(long arg0);
+void __sanitizer_syscall_post_impl_olduname(long res, long arg0);
+void __sanitizer_syscall_pre_impl_getrlimit(long resource, long rlim);
+void __sanitizer_syscall_post_impl_getrlimit(long res, long resource,
+ long rlim);
+void __sanitizer_syscall_pre_impl_old_getrlimit(long resource, long rlim);
+void __sanitizer_syscall_post_impl_old_getrlimit(long res, long resource,
+ long rlim);
+void __sanitizer_syscall_pre_impl_setrlimit(long resource, long rlim);
+void __sanitizer_syscall_post_impl_setrlimit(long res, long resource,
+ long rlim);
+void __sanitizer_syscall_pre_impl_prlimit64(long pid, long resource,
+ long new_rlim, long old_rlim);
+void __sanitizer_syscall_post_impl_prlimit64(long res, long pid, long resource,
+ long new_rlim, long old_rlim);
+void __sanitizer_syscall_pre_impl_getrusage(long who, long ru);
+void __sanitizer_syscall_post_impl_getrusage(long res, long who, long ru);
+void __sanitizer_syscall_pre_impl_umask(long mask);
+void __sanitizer_syscall_post_impl_umask(long res, long mask);
+void __sanitizer_syscall_pre_impl_msgget(long key, long msgflg);
+void __sanitizer_syscall_post_impl_msgget(long res, long key, long msgflg);
+void __sanitizer_syscall_pre_impl_msgsnd(long msqid, long msgp, long msgsz,
+ long msgflg);
+void __sanitizer_syscall_post_impl_msgsnd(long res, long msqid, long msgp,
+ long msgsz, long msgflg);
+void __sanitizer_syscall_pre_impl_msgrcv(long msqid, long msgp, long msgsz,
+ long msgtyp, long msgflg);
+void __sanitizer_syscall_post_impl_msgrcv(long res, long msqid, long msgp,
+ long msgsz, long msgtyp, long msgflg);
+void __sanitizer_syscall_pre_impl_msgctl(long msqid, long cmd, long buf);
+void __sanitizer_syscall_post_impl_msgctl(long res, long msqid, long cmd,
+ long buf);
+void __sanitizer_syscall_pre_impl_semget(long key, long nsems, long semflg);
+void __sanitizer_syscall_post_impl_semget(long res, long key, long nsems,
+ long semflg);
+void __sanitizer_syscall_pre_impl_semop(long semid, long sops, long nsops);
+void __sanitizer_syscall_post_impl_semop(long res, long semid, long sops,
+ long nsops);
+void __sanitizer_syscall_pre_impl_semctl(long semid, long semnum, long cmd,
+ long arg);
+void __sanitizer_syscall_post_impl_semctl(long res, long semid, long semnum,
+ long cmd, long arg);
+void __sanitizer_syscall_pre_impl_semtimedop(long semid, long sops, long nsops,
+ long timeout);
+void __sanitizer_syscall_post_impl_semtimedop(long res, long semid, long sops,
+ long nsops, long timeout);
+void __sanitizer_syscall_pre_impl_shmat(long shmid, long shmaddr, long shmflg);
+void __sanitizer_syscall_post_impl_shmat(long res, long shmid, long shmaddr,
+ long shmflg);
+void __sanitizer_syscall_pre_impl_shmget(long key, long size, long flag);
+void __sanitizer_syscall_post_impl_shmget(long res, long key, long size,
+ long flag);
+void __sanitizer_syscall_pre_impl_shmdt(long shmaddr);
+void __sanitizer_syscall_post_impl_shmdt(long res, long shmaddr);
+void __sanitizer_syscall_pre_impl_shmctl(long shmid, long cmd, long buf);
+void __sanitizer_syscall_post_impl_shmctl(long res, long shmid, long cmd,
+ long buf);
+void __sanitizer_syscall_pre_impl_ipc(long call, long first, long second,
+ long third, long ptr, long fifth);
+void __sanitizer_syscall_post_impl_ipc(long res, long call, long first,
+ long second, long third, long ptr,
+ long fifth);
+void __sanitizer_syscall_pre_impl_mq_open(long name, long oflag, long mode,
+ long attr);
+void __sanitizer_syscall_post_impl_mq_open(long res, long name, long oflag,
+ long mode, long attr);
+void __sanitizer_syscall_pre_impl_mq_unlink(long name);
+void __sanitizer_syscall_post_impl_mq_unlink(long res, long name);
+void __sanitizer_syscall_pre_impl_mq_timedsend(long mqdes, long msg_ptr,
+ long msg_len, long msg_prio,
+ long abs_timeout);
+void __sanitizer_syscall_post_impl_mq_timedsend(long res, long mqdes,
+ long msg_ptr, long msg_len,
+ long msg_prio,
+ long abs_timeout);
+void __sanitizer_syscall_pre_impl_mq_timedreceive(long mqdes, long msg_ptr,
+ long msg_len, long msg_prio,
+ long abs_timeout);
+void __sanitizer_syscall_post_impl_mq_timedreceive(long res, long mqdes,
+ long msg_ptr, long msg_len,
+ long msg_prio,
+ long abs_timeout);
+void __sanitizer_syscall_pre_impl_mq_notify(long mqdes, long notification);
+void __sanitizer_syscall_post_impl_mq_notify(long res, long mqdes,
+ long notification);
+void __sanitizer_syscall_pre_impl_mq_getsetattr(long mqdes, long mqstat,
+ long omqstat);
+void __sanitizer_syscall_post_impl_mq_getsetattr(long res, long mqdes,
+ long mqstat, long omqstat);
+void __sanitizer_syscall_pre_impl_pciconfig_iobase(long which, long bus,
+ long devfn);
+void __sanitizer_syscall_post_impl_pciconfig_iobase(long res, long which,
+ long bus, long devfn);
+void __sanitizer_syscall_pre_impl_pciconfig_read(long bus, long dfn, long off,
+ long len, long buf);
+void __sanitizer_syscall_post_impl_pciconfig_read(long res, long bus, long dfn,
+ long off, long len, long buf);
+void __sanitizer_syscall_pre_impl_pciconfig_write(long bus, long dfn, long off,
+ long len, long buf);
+void __sanitizer_syscall_post_impl_pciconfig_write(long res, long bus, long dfn,
+ long off, long len,
+ long buf);
+void __sanitizer_syscall_pre_impl_swapon(long specialfile, long swap_flags);
+void __sanitizer_syscall_post_impl_swapon(long res, long specialfile,
+ long swap_flags);
+void __sanitizer_syscall_pre_impl_swapoff(long specialfile);
+void __sanitizer_syscall_post_impl_swapoff(long res, long specialfile);
+void __sanitizer_syscall_pre_impl_sysctl(long args);
+void __sanitizer_syscall_post_impl_sysctl(long res, long args);
+void __sanitizer_syscall_pre_impl_sysinfo(long info);
+void __sanitizer_syscall_post_impl_sysinfo(long res, long info);
+void __sanitizer_syscall_pre_impl_sysfs(long option, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_sysfs(long res, long option, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_syslog(long type, long buf, long len);
+void __sanitizer_syscall_post_impl_syslog(long res, long type, long buf,
+ long len);
+void __sanitizer_syscall_pre_impl_uselib(long library);
+void __sanitizer_syscall_post_impl_uselib(long res, long library);
+void __sanitizer_syscall_pre_impl_ni_syscall();
+void __sanitizer_syscall_post_impl_ni_syscall(long res);
+void __sanitizer_syscall_pre_impl_ptrace(long request, long pid, long addr,
+ long data);
+void __sanitizer_syscall_post_impl_ptrace(long res, long request, long pid,
+ long addr, long data);
+void __sanitizer_syscall_pre_impl_add_key(long _type, long _description,
+ long _payload, long plen,
+ long destringid);
+void __sanitizer_syscall_post_impl_add_key(long res, long _type,
+ long _description, long _payload,
+ long plen, long destringid);
+void __sanitizer_syscall_pre_impl_request_key(long _type, long _description,
+ long _callout_info,
+ long destringid);
+void __sanitizer_syscall_post_impl_request_key(long res, long _type,
+ long _description,
+ long _callout_info,
+ long destringid);
+void __sanitizer_syscall_pre_impl_keyctl(long cmd, long arg2, long arg3,
+ long arg4, long arg5);
+void __sanitizer_syscall_post_impl_keyctl(long res, long cmd, long arg2,
+ long arg3, long arg4, long arg5);
+void __sanitizer_syscall_pre_impl_ioprio_set(long which, long who, long ioprio);
+void __sanitizer_syscall_post_impl_ioprio_set(long res, long which, long who,
+ long ioprio);
+void __sanitizer_syscall_pre_impl_ioprio_get(long which, long who);
+void __sanitizer_syscall_post_impl_ioprio_get(long res, long which, long who);
+void __sanitizer_syscall_pre_impl_set_mempolicy(long mode, long nmask,
+ long maxnode);
+void __sanitizer_syscall_post_impl_set_mempolicy(long res, long mode,
+ long nmask, long maxnode);
+void __sanitizer_syscall_pre_impl_migrate_pages(long pid, long maxnode,
+ long from, long to);
+void __sanitizer_syscall_post_impl_migrate_pages(long res, long pid,
+ long maxnode, long from,
+ long to);
+void __sanitizer_syscall_pre_impl_move_pages(long pid, long nr_pages,
+ long pages, long nodes,
+ long status, long flags);
+void __sanitizer_syscall_post_impl_move_pages(long res, long pid, long nr_pages,
+ long pages, long nodes,
+ long status, long flags);
+void __sanitizer_syscall_pre_impl_mbind(long start, long len, long mode,
+ long nmask, long maxnode, long flags);
+void __sanitizer_syscall_post_impl_mbind(long res, long start, long len,
+ long mode, long nmask, long maxnode,
+ long flags);
+void __sanitizer_syscall_pre_impl_get_mempolicy(long policy, long nmask,
+ long maxnode, long addr,
+ long flags);
+void __sanitizer_syscall_post_impl_get_mempolicy(long res, long policy,
+ long nmask, long maxnode,
+ long addr, long flags);
+void __sanitizer_syscall_pre_impl_inotify_init();
+void __sanitizer_syscall_post_impl_inotify_init(long res);
+void __sanitizer_syscall_pre_impl_inotify_init1(long flags);
+void __sanitizer_syscall_post_impl_inotify_init1(long res, long flags);
+void __sanitizer_syscall_pre_impl_inotify_add_watch(long fd, long path,
+ long mask);
+void __sanitizer_syscall_post_impl_inotify_add_watch(long res, long fd,
+ long path, long mask);
+void __sanitizer_syscall_pre_impl_inotify_rm_watch(long fd, long wd);
+void __sanitizer_syscall_post_impl_inotify_rm_watch(long res, long fd, long wd);
+void __sanitizer_syscall_pre_impl_spu_run(long fd, long unpc, long ustatus);
+void __sanitizer_syscall_post_impl_spu_run(long res, long fd, long unpc,
+ long ustatus);
+void __sanitizer_syscall_pre_impl_spu_create(long name, long flags, long mode,
+ long fd);
+void __sanitizer_syscall_post_impl_spu_create(long res, long name, long flags,
+ long mode, long fd);
+void __sanitizer_syscall_pre_impl_mknodat(long dfd, long filename, long mode,
+ long dev);
+void __sanitizer_syscall_post_impl_mknodat(long res, long dfd, long filename,
+ long mode, long dev);
+void __sanitizer_syscall_pre_impl_mkdirat(long dfd, long pathname, long mode);
+void __sanitizer_syscall_post_impl_mkdirat(long res, long dfd, long pathname,
+ long mode);
+void __sanitizer_syscall_pre_impl_unlinkat(long dfd, long pathname, long flag);
+void __sanitizer_syscall_post_impl_unlinkat(long res, long dfd, long pathname,
+ long flag);
+void __sanitizer_syscall_pre_impl_symlinkat(long oldname, long newdfd,
+ long newname);
+void __sanitizer_syscall_post_impl_symlinkat(long res, long oldname,
+ long newdfd, long newname);
+void __sanitizer_syscall_pre_impl_linkat(long olddfd, long oldname, long newdfd,
+ long newname, long flags);
+void __sanitizer_syscall_post_impl_linkat(long res, long olddfd, long oldname,
+ long newdfd, long newname,
+ long flags);
+void __sanitizer_syscall_pre_impl_renameat(long olddfd, long oldname,
+ long newdfd, long newname);
+void __sanitizer_syscall_post_impl_renameat(long res, long olddfd, long oldname,
+ long newdfd, long newname);
+void __sanitizer_syscall_pre_impl_futimesat(long dfd, long filename,
+ long utimes);
+void __sanitizer_syscall_post_impl_futimesat(long res, long dfd, long filename,
+ long utimes);
+void __sanitizer_syscall_pre_impl_faccessat(long dfd, long filename, long mode);
+void __sanitizer_syscall_post_impl_faccessat(long res, long dfd, long filename,
+ long mode);
+void __sanitizer_syscall_pre_impl_fchmodat(long dfd, long filename, long mode);
+void __sanitizer_syscall_post_impl_fchmodat(long res, long dfd, long filename,
+ long mode);
+void __sanitizer_syscall_pre_impl_fchownat(long dfd, long filename, long user,
+ long group, long flag);
+void __sanitizer_syscall_post_impl_fchownat(long res, long dfd, long filename,
+ long user, long group, long flag);
+void __sanitizer_syscall_pre_impl_openat(long dfd, long filename, long flags,
+ long mode);
+void __sanitizer_syscall_post_impl_openat(long res, long dfd, long filename,
+ long flags, long mode);
+void __sanitizer_syscall_pre_impl_newfstatat(long dfd, long filename,
+ long statbuf, long flag);
+void __sanitizer_syscall_post_impl_newfstatat(long res, long dfd, long filename,
+ long statbuf, long flag);
+void __sanitizer_syscall_pre_impl_fstatat64(long dfd, long filename,
+ long statbuf, long flag);
+void __sanitizer_syscall_post_impl_fstatat64(long res, long dfd, long filename,
+ long statbuf, long flag);
+void __sanitizer_syscall_pre_impl_readlinkat(long dfd, long path, long buf,
+ long bufsiz);
+void __sanitizer_syscall_post_impl_readlinkat(long res, long dfd, long path,
+ long buf, long bufsiz);
+void __sanitizer_syscall_pre_impl_utimensat(long dfd, long filename,
+ long utimes, long flags);
+void __sanitizer_syscall_post_impl_utimensat(long res, long dfd, long filename,
+ long utimes, long flags);
+void __sanitizer_syscall_pre_impl_unshare(long unshare_flags);
+void __sanitizer_syscall_post_impl_unshare(long res, long unshare_flags);
+void __sanitizer_syscall_pre_impl_splice(long fd_in, long off_in, long fd_out,
+ long off_out, long len, long flags);
+void __sanitizer_syscall_post_impl_splice(long res, long fd_in, long off_in,
+ long fd_out, long off_out, long len,
+ long flags);
+void __sanitizer_syscall_pre_impl_vmsplice(long fd, long iov, long nr_segs,
+ long flags);
+void __sanitizer_syscall_post_impl_vmsplice(long res, long fd, long iov,
+ long nr_segs, long flags);
+void __sanitizer_syscall_pre_impl_tee(long fdin, long fdout, long len,
+ long flags);
+void __sanitizer_syscall_post_impl_tee(long res, long fdin, long fdout,
+ long len, long flags);
+void __sanitizer_syscall_pre_impl_get_robust_list(long pid, long head_ptr,
+ long len_ptr);
+void __sanitizer_syscall_post_impl_get_robust_list(long res, long pid,
+ long head_ptr, long len_ptr);
+void __sanitizer_syscall_pre_impl_set_robust_list(long head, long len);
+void __sanitizer_syscall_post_impl_set_robust_list(long res, long head,
+ long len);
+void __sanitizer_syscall_pre_impl_getcpu(long cpu, long node, long cache);
+void __sanitizer_syscall_post_impl_getcpu(long res, long cpu, long node,
+ long cache);
+void __sanitizer_syscall_pre_impl_signalfd(long ufd, long user_mask,
+ long sizemask);
+void __sanitizer_syscall_post_impl_signalfd(long res, long ufd, long user_mask,
+ long sizemask);
+void __sanitizer_syscall_pre_impl_signalfd4(long ufd, long user_mask,
+ long sizemask, long flags);
+void __sanitizer_syscall_post_impl_signalfd4(long res, long ufd, long user_mask,
+ long sizemask, long flags);
+void __sanitizer_syscall_pre_impl_timerfd_create(long clockid, long flags);
+void __sanitizer_syscall_post_impl_timerfd_create(long res, long clockid,
+ long flags);
+void __sanitizer_syscall_pre_impl_timerfd_settime(long ufd, long flags,
+ long utmr, long otmr);
+void __sanitizer_syscall_post_impl_timerfd_settime(long res, long ufd,
+ long flags, long utmr,
+ long otmr);
+void __sanitizer_syscall_pre_impl_timerfd_gettime(long ufd, long otmr);
+void __sanitizer_syscall_post_impl_timerfd_gettime(long res, long ufd,
+ long otmr);
+void __sanitizer_syscall_pre_impl_eventfd(long count);
+void __sanitizer_syscall_post_impl_eventfd(long res, long count);
+void __sanitizer_syscall_pre_impl_eventfd2(long count, long flags);
+void __sanitizer_syscall_post_impl_eventfd2(long res, long count, long flags);
+void __sanitizer_syscall_pre_impl_old_readdir(long arg0, long arg1, long arg2);
+void __sanitizer_syscall_post_impl_old_readdir(long res, long arg0, long arg1,
+ long arg2);
+void __sanitizer_syscall_pre_impl_pselect6(long arg0, long arg1, long arg2,
+ long arg3, long arg4, long arg5);
+void __sanitizer_syscall_post_impl_pselect6(long res, long arg0, long arg1,
+ long arg2, long arg3, long arg4,
+ long arg5);
+void __sanitizer_syscall_pre_impl_ppoll(long arg0, long arg1, long arg2,
+ long arg3, long arg4);
+void __sanitizer_syscall_post_impl_ppoll(long res, long arg0, long arg1,
+ long arg2, long arg3, long arg4);
+void __sanitizer_syscall_pre_impl_fanotify_init(long flags, long event_f_flags);
+void __sanitizer_syscall_post_impl_fanotify_init(long res, long flags,
+ long event_f_flags);
+void __sanitizer_syscall_pre_impl_fanotify_mark(long fanotify_fd, long flags,
+ long mask, long fd,
+ long pathname);
+void __sanitizer_syscall_post_impl_fanotify_mark(long res, long fanotify_fd,
+ long flags, long mask, long fd,
+ long pathname);
+void __sanitizer_syscall_pre_impl_syncfs(long fd);
+void __sanitizer_syscall_post_impl_syncfs(long res, long fd);
+void __sanitizer_syscall_pre_impl_perf_event_open(long attr_uptr, long pid,
+ long cpu, long group_fd,
+ long flags);
+void __sanitizer_syscall_post_impl_perf_event_open(long res, long attr_uptr,
+ long pid, long cpu,
+ long group_fd, long flags);
+void __sanitizer_syscall_pre_impl_mmap_pgoff(long addr, long len, long prot,
+ long flags, long fd, long pgoff);
+void __sanitizer_syscall_post_impl_mmap_pgoff(long res, long addr, long len,
+ long prot, long flags, long fd,
+ long pgoff);
+void __sanitizer_syscall_pre_impl_old_mmap(long arg);
+void __sanitizer_syscall_post_impl_old_mmap(long res, long arg);
+void __sanitizer_syscall_pre_impl_name_to_handle_at(long dfd, long name,
+ long handle, long mnt_id,
+ long flag);
+void __sanitizer_syscall_post_impl_name_to_handle_at(long res, long dfd,
+ long name, long handle,
+ long mnt_id, long flag);
+void __sanitizer_syscall_pre_impl_open_by_handle_at(long mountdirfd,
+ long handle, long flags);
+void __sanitizer_syscall_post_impl_open_by_handle_at(long res, long mountdirfd,
+ long handle, long flags);
+void __sanitizer_syscall_pre_impl_setns(long fd, long nstype);
+void __sanitizer_syscall_post_impl_setns(long res, long fd, long nstype);
+void __sanitizer_syscall_pre_impl_process_vm_readv(long pid, long lvec,
+ long liovcnt, long rvec,
+ long riovcnt, long flags);
+void __sanitizer_syscall_post_impl_process_vm_readv(long res, long pid,
+ long lvec, long liovcnt,
+ long rvec, long riovcnt,
+ long flags);
+void __sanitizer_syscall_pre_impl_process_vm_writev(long pid, long lvec,
+ long liovcnt, long rvec,
+ long riovcnt, long flags);
+void __sanitizer_syscall_post_impl_process_vm_writev(long res, long pid,
+ long lvec, long liovcnt,
+ long rvec, long riovcnt,
+ long flags);
+void __sanitizer_syscall_pre_impl_fork();
+void __sanitizer_syscall_post_impl_fork(long res);
+void __sanitizer_syscall_pre_impl_vfork();
+void __sanitizer_syscall_post_impl_vfork(long res);
+void __sanitizer_syscall_pre_impl_sigaction(long signum, long act, long oldact);
+void __sanitizer_syscall_post_impl_sigaction(long res, long signum, long act,
+ long oldact);
+void __sanitizer_syscall_pre_impl_rt_sigaction(long signum, long act,
+ long oldact, long sz);
+void __sanitizer_syscall_post_impl_rt_sigaction(long res, long signum, long act,
+ long oldact, long sz);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_LINUX_SYSCALL_HOOKS_H
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/lsan_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/lsan_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/lsan_interface.h (revision 351984)
@@ -0,0 +1,89 @@
+//===-- sanitizer/lsan_interface.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of LeakSanitizer.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_LSAN_INTERFACE_H
+#define SANITIZER_LSAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ // Allocations made between calls to __lsan_disable() and __lsan_enable() will
+ // be treated as non-leaks. Disable/enable pairs may be nested.
+ void __lsan_disable(void);
+ void __lsan_enable(void);
+
+ // The heap object into which p points will be treated as a non-leak.
+ void __lsan_ignore_object(const void *p);
+
+ // Memory regions registered through this interface will be treated as sources
+ // of live pointers during leak checking. Useful if you store pointers in
+ // mapped memory.
+ // Points of note:
+ // - __lsan_unregister_root_region() must be called with the same pointer and
+ // size that have earlier been passed to __lsan_register_root_region()
+ // - LSan will skip any inaccessible memory when scanning a root region. E.g.,
+ // if you map memory within a larger region that you have mprotect'ed, you can
+ // register the entire large region.
+ // - the implementation is not optimized for performance. This interface is
+ // intended to be used for a small number of relatively static regions.
+ void __lsan_register_root_region(const void *p, size_t size);
+ void __lsan_unregister_root_region(const void *p, size_t size);
+
+ // Check for leaks now. This function behaves identically to the default
+ // end-of-process leak check. In particular, it will terminate the process if
+ // leaks are found and the exitcode runtime flag is non-zero.
+ // Subsequent calls to this function will have no effect and end-of-process
+ // leak check will not run. Effectively, end-of-process leak check is moved to
+ // the time of first invocation of this function.
+ // By calling this function early during process shutdown, you can instruct
+ // LSan to ignore shutdown-only leaks which happen later on.
+ void __lsan_do_leak_check(void);
+
+ // Check for leaks now. Returns zero if no leaks have been found or if leak
+ // detection is disabled, non-zero otherwise.
+ // This function may be called repeatedly, e.g. to periodically check a
+ // long-running process. It prints a leak report if appropriate, but does not
+ // terminate the process. It does not affect the behavior of
+ // __lsan_do_leak_check() or the end-of-process leak check, and is not
+ // affected by them.
+ int __lsan_do_recoverable_leak_check(void);
+
+ // The user may optionally provide this function to disallow leak checking
+ // for the program it is linked into (if the return value is non-zero). This
+ // function must be defined as returning a constant value; any behavior beyond
+ // that is unsupported.
+ // To avoid dead stripping, you may need to define this function with
+ // __attribute__((used))
+ int __lsan_is_turned_off(void);
+
+ // This function may be optionally provided by user and should return
+ // a string containing LSan runtime options. See lsan_flags.inc for details.
+ const char *__lsan_default_options(void);
+
+ // This function may be optionally provided by the user and should return
+ // a string containing LSan suppressions.
+ const char *__lsan_default_suppressions(void);
+#ifdef __cplusplus
+} // extern "C"
+
+namespace __lsan {
+class ScopedDisabler {
+ public:
+ ScopedDisabler() { __lsan_disable(); }
+ ~ScopedDisabler() { __lsan_enable(); }
+};
+} // namespace __lsan
+#endif
+
+#endif // SANITIZER_LSAN_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/lsan_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/msan_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/msan_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/msan_interface.h (revision 351984)
@@ -0,0 +1,121 @@
+//===-- msan_interface.h --------------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of MemorySanitizer.
+//
+// Public interface header.
+//===----------------------------------------------------------------------===//
+#ifndef MSAN_INTERFACE_H
+#define MSAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ /* Set raw origin for the memory range. */
+ void __msan_set_origin(const volatile void *a, size_t size, uint32_t origin);
+
+ /* Get raw origin for an address. */
+ uint32_t __msan_get_origin(const volatile void *a);
+
+ /* Test that this_id is a descendant of prev_id (or they are simply equal).
+ * "descendant" here means they are part of the same chain, created with
+ * __msan_chain_origin. */
+ int __msan_origin_is_descendant_or_same(uint32_t this_id, uint32_t prev_id);
+
+ /* Returns non-zero if tracking origins. */
+ int __msan_get_track_origins(void);
+
+ /* Returns the origin id of the latest UMR in the calling thread. */
+ uint32_t __msan_get_umr_origin(void);
+
+ /* Make memory region fully initialized (without changing its contents). */
+ void __msan_unpoison(const volatile void *a, size_t size);
+
+ /* Make a null-terminated string fully initialized (without changing its
+ contents). */
+ void __msan_unpoison_string(const volatile char *a);
+
+ /* Make first n parameters of the next function call fully initialized. */
+ void __msan_unpoison_param(size_t n);
+
+ /* Make memory region fully uninitialized (without changing its contents).
+ This is a legacy interface that does not update origin information. Use
+ __msan_allocated_memory() instead. */
+ void __msan_poison(const volatile void *a, size_t size);
+
+ /* Make memory region partially uninitialized (without changing its contents).
+ */
+ void __msan_partial_poison(const volatile void *data, void *shadow,
+ size_t size);
+
+ /* Returns the offset of the first (at least partially) poisoned byte in the
+ memory range, or -1 if the whole range is good. */
+ intptr_t __msan_test_shadow(const volatile void *x, size_t size);
+
+ /* Checks that memory range is fully initialized, and reports an error if it
+ * is not. */
+ void __msan_check_mem_is_initialized(const volatile void *x, size_t size);
+
+ /* For testing:
+ __msan_set_expect_umr(1);
+ ... some buggy code ...
+ __msan_set_expect_umr(0);
+ The last line will verify that a UMR happened. */
+ void __msan_set_expect_umr(int expect_umr);
+
+ /* Change the value of keep_going flag. Non-zero value means don't terminate
+ program execution when an error is detected. This will not affect error in
+ modules that were compiled without the corresponding compiler flag. */
+ void __msan_set_keep_going(int keep_going);
+
+ /* Print shadow and origin for the memory range to stderr in a human-readable
+ format. */
+ void __msan_print_shadow(const volatile void *x, size_t size);
+
+ /* Print shadow for the memory range to stderr in a minimalistic
+ human-readable format. */
+ void __msan_dump_shadow(const volatile void *x, size_t size);
+
+ /* Returns true if running under a dynamic tool (DynamoRio-based). */
+ int __msan_has_dynamic_component(void);
+
+ /* Tell MSan about newly allocated memory (ex.: custom allocator).
+ Memory will be marked uninitialized, with origin at the call site. */
+ void __msan_allocated_memory(const volatile void* data, size_t size);
+
+ /* Tell MSan about newly destroyed memory. Mark memory as uninitialized. */
+ void __sanitizer_dtor_callback(const volatile void* data, size_t size);
+
+ /* This function may be optionally provided by user and should return
+ a string containing Msan runtime options. See msan_flags.h for details. */
+ const char* __msan_default_options(void);
+
+ /* Deprecated. Call __sanitizer_set_death_callback instead. */
+ void __msan_set_death_callback(void (*callback)(void));
+
+ /* Update shadow for the application copy of size bytes from src to dst.
+ Src and dst are application addresses. This function does not copy the
+ actual application memory, it only updates shadow and origin for such
+ copy. Source and destination regions can overlap. */
+ void __msan_copy_shadow(const volatile void *dst, const volatile void *src,
+ size_t size);
+
+ /* Disables uninitialized memory checks in interceptors. */
+ void __msan_scoped_disable_interceptor_checks(void);
+
+ /* Re-enables uninitialized memory checks in interceptors after a previous
+ call to __msan_scoped_disable_interceptor_checks. */
+ void __msan_scoped_enable_interceptor_checks(void);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/netbsd_syscall_hooks.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/netbsd_syscall_hooks.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/netbsd_syscall_hooks.h (revision 351984)
@@ -0,0 +1,4731 @@
+//===-- netbsd_syscall_hooks.h --------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of public sanitizer interface.
+//
+// System call handlers.
+//
+// Interface methods declared in this header implement pre- and post- syscall
+// actions for the active sanitizer.
+// Usage:
+// __sanitizer_syscall_pre_getfoo(...args...);
+// long long res = syscall(SYS_getfoo, ...args...);
+// __sanitizer_syscall_post_getfoo(res, ...args...);
+//
+// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
+//
+// Generated with: generate_netbsd_syscalls.awk
+// Generated date: 2018-10-30
+// Generated from: syscalls.master,v 1.293 2018/07/31 13:00:13 rjs Exp
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_NETBSD_SYSCALL_HOOKS_H
+#define SANITIZER_NETBSD_SYSCALL_HOOKS_H
+
+#define __sanitizer_syscall_pre_syscall(code, arg0, arg1, arg2, arg3, arg4, \
+ arg5, arg6, arg7) \
+ __sanitizer_syscall_pre_impl_syscall( \
+ (long long)(code), (long long)(arg0), (long long)(arg1), \
+ (long long)(arg2), (long long)(arg3), (long long)(arg4), \
+ (long long)(arg5), (long long)(arg6), (long long)(arg7))
+#define __sanitizer_syscall_post_syscall(res, code, arg0, arg1, arg2, arg3, \
+ arg4, arg5, arg6, arg7) \
+ __sanitizer_syscall_post_impl_syscall( \
+ res, (long long)(code), (long long)(arg0), (long long)(arg1), \
+ (long long)(arg2), (long long)(arg3), (long long)(arg4), \
+ (long long)(arg5), (long long)(arg6), (long long)(arg7))
+#define __sanitizer_syscall_pre_exit(rval) \
+ __sanitizer_syscall_pre_impl_exit((long long)(rval))
+#define __sanitizer_syscall_post_exit(res, rval) \
+ __sanitizer_syscall_post_impl_exit(res, (long long)(rval))
+#define __sanitizer_syscall_pre_fork() __sanitizer_syscall_pre_impl_fork()
+#define __sanitizer_syscall_post_fork(res) \
+ __sanitizer_syscall_post_impl_fork(res)
+#define __sanitizer_syscall_pre_read(fd, buf, nbyte) \
+ __sanitizer_syscall_pre_impl_read((long long)(fd), (long long)(buf), \
+ (long long)(nbyte))
+#define __sanitizer_syscall_post_read(res, fd, buf, nbyte) \
+ __sanitizer_syscall_post_impl_read(res, (long long)(fd), (long long)(buf), \
+ (long long)(nbyte))
+#define __sanitizer_syscall_pre_write(fd, buf, nbyte) \
+ __sanitizer_syscall_pre_impl_write((long long)(fd), (long long)(buf), \
+ (long long)(nbyte))
+#define __sanitizer_syscall_post_write(res, fd, buf, nbyte) \
+ __sanitizer_syscall_post_impl_write(res, (long long)(fd), (long long)(buf), \
+ (long long)(nbyte))
+#define __sanitizer_syscall_pre_open(path, flags, mode) \
+ __sanitizer_syscall_pre_impl_open((long long)(path), (long long)(flags), \
+ (long long)(mode))
+#define __sanitizer_syscall_post_open(res, path, flags, mode) \
+ __sanitizer_syscall_post_impl_open(res, (long long)(path), \
+ (long long)(flags), (long long)(mode))
+#define __sanitizer_syscall_pre_close(fd) \
+ __sanitizer_syscall_pre_impl_close((long long)(fd))
+#define __sanitizer_syscall_post_close(res, fd) \
+ __sanitizer_syscall_post_impl_close(res, (long long)(fd))
+#define __sanitizer_syscall_pre_compat_50_wait4(pid, status, options, rusage) \
+ __sanitizer_syscall_pre_impl_compat_50_wait4( \
+ (long long)(pid), (long long)(status), (long long)(options), \
+ (long long)(rusage))
+#define __sanitizer_syscall_post_compat_50_wait4(res, pid, status, options, \
+ rusage) \
+ __sanitizer_syscall_post_impl_compat_50_wait4( \
+ res, (long long)(pid), (long long)(status), (long long)(options), \
+ (long long)(rusage))
+#define __sanitizer_syscall_pre_compat_43_ocreat(path, mode) \
+ __sanitizer_syscall_pre_impl_compat_43_ocreat((long long)(path), \
+ (long long)(mode))
+#define __sanitizer_syscall_post_compat_43_ocreat(res, path, mode) \
+ __sanitizer_syscall_post_impl_compat_43_ocreat(res, (long long)(path), \
+ (long long)(mode))
+#define __sanitizer_syscall_pre_link(path, link) \
+ __sanitizer_syscall_pre_impl_link((long long)(path), (long long)(link))
+#define __sanitizer_syscall_post_link(res, path, link) \
+ __sanitizer_syscall_post_impl_link(res, (long long)(path), (long long)(link))
+#define __sanitizer_syscall_pre_unlink(path) \
+ __sanitizer_syscall_pre_impl_unlink((long long)(path))
+#define __sanitizer_syscall_post_unlink(res, path) \
+ __sanitizer_syscall_post_impl_unlink(res, (long long)(path))
+/* syscall 11 has been skipped */
+#define __sanitizer_syscall_pre_chdir(path) \
+ __sanitizer_syscall_pre_impl_chdir((long long)(path))
+#define __sanitizer_syscall_post_chdir(res, path) \
+ __sanitizer_syscall_post_impl_chdir(res, (long long)(path))
+#define __sanitizer_syscall_pre_fchdir(fd) \
+ __sanitizer_syscall_pre_impl_fchdir((long long)(fd))
+#define __sanitizer_syscall_post_fchdir(res, fd) \
+ __sanitizer_syscall_post_impl_fchdir(res, (long long)(fd))
+#define __sanitizer_syscall_pre_compat_50_mknod(path, mode, dev) \
+ __sanitizer_syscall_pre_impl_compat_50_mknod( \
+ (long long)(path), (long long)(mode), (long long)(dev))
+#define __sanitizer_syscall_post_compat_50_mknod(res, path, mode, dev) \
+ __sanitizer_syscall_post_impl_compat_50_mknod( \
+ res, (long long)(path), (long long)(mode), (long long)(dev))
+#define __sanitizer_syscall_pre_chmod(path, mode) \
+ __sanitizer_syscall_pre_impl_chmod((long long)(path), (long long)(mode))
+#define __sanitizer_syscall_post_chmod(res, path, mode) \
+ __sanitizer_syscall_post_impl_chmod(res, (long long)(path), (long long)(mode))
+#define __sanitizer_syscall_pre_chown(path, uid, gid) \
+ __sanitizer_syscall_pre_impl_chown((long long)(path), (long long)(uid), \
+ (long long)(gid))
+#define __sanitizer_syscall_post_chown(res, path, uid, gid) \
+ __sanitizer_syscall_post_impl_chown(res, (long long)(path), \
+ (long long)(uid), (long long)(gid))
+#define __sanitizer_syscall_pre_break(nsize) \
+ __sanitizer_syscall_pre_impl_break((long long)(nsize))
+#define __sanitizer_syscall_post_break(res, nsize) \
+ __sanitizer_syscall_post_impl_break(res, (long long)(nsize))
+#define __sanitizer_syscall_pre_compat_20_getfsstat(buf, bufsize, flags) \
+ __sanitizer_syscall_pre_impl_compat_20_getfsstat( \
+ (long long)(buf), (long long)(bufsize), (long long)(flags))
+#define __sanitizer_syscall_post_compat_20_getfsstat(res, buf, bufsize, flags) \
+ __sanitizer_syscall_post_impl_compat_20_getfsstat( \
+ res, (long long)(buf), (long long)(bufsize), (long long)(flags))
+#define __sanitizer_syscall_pre_compat_43_olseek(fd, offset, whence) \
+ __sanitizer_syscall_pre_impl_compat_43_olseek( \
+ (long long)(fd), (long long)(offset), (long long)(whence))
+#define __sanitizer_syscall_post_compat_43_olseek(res, fd, offset, whence) \
+ __sanitizer_syscall_post_impl_compat_43_olseek( \
+ res, (long long)(fd), (long long)(offset), (long long)(whence))
+#define __sanitizer_syscall_pre_getpid() __sanitizer_syscall_pre_impl_getpid()
+#define __sanitizer_syscall_post_getpid(res) \
+ __sanitizer_syscall_post_impl_getpid(res)
+#define __sanitizer_syscall_pre_compat_40_mount(type, path, flags, data) \
+ __sanitizer_syscall_pre_impl_compat_40_mount( \
+ (long long)(type), (long long)(path), (long long)(flags), \
+ (long long)(data))
+#define __sanitizer_syscall_post_compat_40_mount(res, type, path, flags, data) \
+ __sanitizer_syscall_post_impl_compat_40_mount( \
+ res, (long long)(type), (long long)(path), (long long)(flags), \
+ (long long)(data))
+#define __sanitizer_syscall_pre_unmount(path, flags) \
+ __sanitizer_syscall_pre_impl_unmount((long long)(path), (long long)(flags))
+#define __sanitizer_syscall_post_unmount(res, path, flags) \
+ __sanitizer_syscall_post_impl_unmount(res, (long long)(path), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_setuid(uid) \
+ __sanitizer_syscall_pre_impl_setuid((long long)(uid))
+#define __sanitizer_syscall_post_setuid(res, uid) \
+ __sanitizer_syscall_post_impl_setuid(res, (long long)(uid))
+#define __sanitizer_syscall_pre_getuid() __sanitizer_syscall_pre_impl_getuid()
+#define __sanitizer_syscall_post_getuid(res) \
+ __sanitizer_syscall_post_impl_getuid(res)
+#define __sanitizer_syscall_pre_geteuid() __sanitizer_syscall_pre_impl_geteuid()
+#define __sanitizer_syscall_post_geteuid(res) \
+ __sanitizer_syscall_post_impl_geteuid(res)
+#define __sanitizer_syscall_pre_ptrace(req, pid, addr, data) \
+ __sanitizer_syscall_pre_impl_ptrace((long long)(req), (long long)(pid), \
+ (long long)(addr), (long long)(data))
+#define __sanitizer_syscall_post_ptrace(res, req, pid, addr, data) \
+ __sanitizer_syscall_post_impl_ptrace(res, (long long)(req), \
+ (long long)(pid), (long long)(addr), \
+ (long long)(data))
+#define __sanitizer_syscall_pre_recvmsg(s, msg, flags) \
+ __sanitizer_syscall_pre_impl_recvmsg((long long)(s), (long long)(msg), \
+ (long long)(flags))
+#define __sanitizer_syscall_post_recvmsg(res, s, msg, flags) \
+ __sanitizer_syscall_post_impl_recvmsg(res, (long long)(s), (long long)(msg), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_sendmsg(s, msg, flags) \
+ __sanitizer_syscall_pre_impl_sendmsg((long long)(s), (long long)(msg), \
+ (long long)(flags))
+#define __sanitizer_syscall_post_sendmsg(res, s, msg, flags) \
+ __sanitizer_syscall_post_impl_sendmsg(res, (long long)(s), (long long)(msg), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_recvfrom(s, buf, len, flags, from, \
+ fromlenaddr) \
+ __sanitizer_syscall_pre_impl_recvfrom( \
+ (long long)(s), (long long)(buf), (long long)(len), (long long)(flags), \
+ (long long)(from), (long long)(fromlenaddr))
+#define __sanitizer_syscall_post_recvfrom(res, s, buf, len, flags, from, \
+ fromlenaddr) \
+ __sanitizer_syscall_post_impl_recvfrom( \
+ res, (long long)(s), (long long)(buf), (long long)(len), \
+ (long long)(flags), (long long)(from), (long long)(fromlenaddr))
+#define __sanitizer_syscall_pre_accept(s, name, anamelen) \
+ __sanitizer_syscall_pre_impl_accept((long long)(s), (long long)(name), \
+ (long long)(anamelen))
+#define __sanitizer_syscall_post_accept(res, s, name, anamelen) \
+ __sanitizer_syscall_post_impl_accept(res, (long long)(s), (long long)(name), \
+ (long long)(anamelen))
+#define __sanitizer_syscall_pre_getpeername(fdes, asa, alen) \
+ __sanitizer_syscall_pre_impl_getpeername( \
+ (long long)(fdes), (long long)(asa), (long long)(alen))
+#define __sanitizer_syscall_post_getpeername(res, fdes, asa, alen) \
+ __sanitizer_syscall_post_impl_getpeername( \
+ res, (long long)(fdes), (long long)(asa), (long long)(alen))
+#define __sanitizer_syscall_pre_getsockname(fdes, asa, alen) \
+ __sanitizer_syscall_pre_impl_getsockname( \
+ (long long)(fdes), (long long)(asa), (long long)(alen))
+#define __sanitizer_syscall_post_getsockname(res, fdes, asa, alen) \
+ __sanitizer_syscall_post_impl_getsockname( \
+ res, (long long)(fdes), (long long)(asa), (long long)(alen))
+#define __sanitizer_syscall_pre_access(path, flags) \
+ __sanitizer_syscall_pre_impl_access((long long)(path), (long long)(flags))
+#define __sanitizer_syscall_post_access(res, path, flags) \
+ __sanitizer_syscall_post_impl_access(res, (long long)(path), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_chflags(path, flags) \
+ __sanitizer_syscall_pre_impl_chflags((long long)(path), (long long)(flags))
+#define __sanitizer_syscall_post_chflags(res, path, flags) \
+ __sanitizer_syscall_post_impl_chflags(res, (long long)(path), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_fchflags(fd, flags) \
+ __sanitizer_syscall_pre_impl_fchflags((long long)(fd), (long long)(flags))
+#define __sanitizer_syscall_post_fchflags(res, fd, flags) \
+ __sanitizer_syscall_post_impl_fchflags(res, (long long)(fd), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_sync() __sanitizer_syscall_pre_impl_sync()
+#define __sanitizer_syscall_post_sync(res) \
+ __sanitizer_syscall_post_impl_sync(res)
+#define __sanitizer_syscall_pre_kill(pid, signum) \
+ __sanitizer_syscall_pre_impl_kill((long long)(pid), (long long)(signum))
+#define __sanitizer_syscall_post_kill(res, pid, signum) \
+ __sanitizer_syscall_post_impl_kill(res, (long long)(pid), (long long)(signum))
+#define __sanitizer_syscall_pre_compat_43_stat43(path, ub) \
+ __sanitizer_syscall_pre_impl_compat_43_stat43((long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_post_compat_43_stat43(res, path, ub) \
+ __sanitizer_syscall_post_impl_compat_43_stat43(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre_getppid() __sanitizer_syscall_pre_impl_getppid()
+#define __sanitizer_syscall_post_getppid(res) \
+ __sanitizer_syscall_post_impl_getppid(res)
+#define __sanitizer_syscall_pre_compat_43_lstat43(path, ub) \
+ __sanitizer_syscall_pre_impl_compat_43_lstat43((long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_post_compat_43_lstat43(res, path, ub) \
+ __sanitizer_syscall_post_impl_compat_43_lstat43(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre_dup(fd) \
+ __sanitizer_syscall_pre_impl_dup((long long)(fd))
+#define __sanitizer_syscall_post_dup(res, fd) \
+ __sanitizer_syscall_post_impl_dup(res, (long long)(fd))
+#define __sanitizer_syscall_pre_pipe() __sanitizer_syscall_pre_impl_pipe()
+#define __sanitizer_syscall_post_pipe(res) \
+ __sanitizer_syscall_post_impl_pipe(res)
+#define __sanitizer_syscall_pre_getegid() __sanitizer_syscall_pre_impl_getegid()
+#define __sanitizer_syscall_post_getegid(res) \
+ __sanitizer_syscall_post_impl_getegid(res)
+#define __sanitizer_syscall_pre_profil(samples, size, offset, scale) \
+ __sanitizer_syscall_pre_impl_profil((long long)(samples), (long long)(size), \
+ (long long)(offset), (long long)(scale))
+#define __sanitizer_syscall_post_profil(res, samples, size, offset, scale) \
+ __sanitizer_syscall_post_impl_profil(res, (long long)(samples), \
+ (long long)(size), (long long)(offset), \
+ (long long)(scale))
+#define __sanitizer_syscall_pre_ktrace(fname, ops, facs, pid) \
+ __sanitizer_syscall_pre_impl_ktrace((long long)(fname), (long long)(ops), \
+ (long long)(facs), (long long)(pid))
+#define __sanitizer_syscall_post_ktrace(res, fname, ops, facs, pid) \
+ __sanitizer_syscall_post_impl_ktrace(res, (long long)(fname), \
+ (long long)(ops), (long long)(facs), \
+ (long long)(pid))
+#define __sanitizer_syscall_pre_compat_13_sigaction13(signum, nsa, osa) \
+ __sanitizer_syscall_pre_impl_compat_13_sigaction13( \
+ (long long)(signum), (long long)(nsa), (long long)(osa))
+#define __sanitizer_syscall_post_compat_13_sigaction13(res, signum, nsa, osa) \
+ __sanitizer_syscall_post_impl_compat_13_sigaction13( \
+ res, (long long)(signum), (long long)(nsa), (long long)(osa))
+#define __sanitizer_syscall_pre_getgid() __sanitizer_syscall_pre_impl_getgid()
+#define __sanitizer_syscall_post_getgid(res) \
+ __sanitizer_syscall_post_impl_getgid(res)
+#define __sanitizer_syscall_pre_compat_13_sigprocmask13(how, mask) \
+ __sanitizer_syscall_pre_impl_compat_13_sigprocmask13((long long)(how), \
+ (long long)(mask))
+#define __sanitizer_syscall_post_compat_13_sigprocmask13(res, how, mask) \
+ __sanitizer_syscall_post_impl_compat_13_sigprocmask13(res, (long long)(how), \
+ (long long)(mask))
+#define __sanitizer_syscall_pre___getlogin(namebuf, namelen) \
+ __sanitizer_syscall_pre_impl___getlogin((long long)(namebuf), \
+ (long long)(namelen))
+#define __sanitizer_syscall_post___getlogin(res, namebuf, namelen) \
+ __sanitizer_syscall_post_impl___getlogin(res, (long long)(namebuf), \
+ (long long)(namelen))
+#define __sanitizer_syscall_pre___setlogin(namebuf) \
+ __sanitizer_syscall_pre_impl___setlogin((long long)(namebuf))
+#define __sanitizer_syscall_post___setlogin(res, namebuf) \
+ __sanitizer_syscall_post_impl___setlogin(res, (long long)(namebuf))
+#define __sanitizer_syscall_pre_acct(path) \
+ __sanitizer_syscall_pre_impl_acct((long long)(path))
+#define __sanitizer_syscall_post_acct(res, path) \
+ __sanitizer_syscall_post_impl_acct(res, (long long)(path))
+#define __sanitizer_syscall_pre_compat_13_sigpending13() \
+ __sanitizer_syscall_pre_impl_compat_13_sigpending13()
+#define __sanitizer_syscall_post_compat_13_sigpending13(res) \
+ __sanitizer_syscall_post_impl_compat_13_sigpending13(res)
+#define __sanitizer_syscall_pre_compat_13_sigaltstack13(nss, oss) \
+ __sanitizer_syscall_pre_impl_compat_13_sigaltstack13((long long)(nss), \
+ (long long)(oss))
+#define __sanitizer_syscall_post_compat_13_sigaltstack13(res, nss, oss) \
+ __sanitizer_syscall_post_impl_compat_13_sigaltstack13(res, (long long)(nss), \
+ (long long)(oss))
+#define __sanitizer_syscall_pre_ioctl(fd, com, data) \
+ __sanitizer_syscall_pre_impl_ioctl((long long)(fd), (long long)(com), \
+ (long long)(data))
+#define __sanitizer_syscall_post_ioctl(res, fd, com, data) \
+ __sanitizer_syscall_post_impl_ioctl(res, (long long)(fd), (long long)(com), \
+ (long long)(data))
+#define __sanitizer_syscall_pre_compat_12_oreboot(opt) \
+ __sanitizer_syscall_pre_impl_compat_12_oreboot((long long)(opt))
+#define __sanitizer_syscall_post_compat_12_oreboot(res, opt) \
+ __sanitizer_syscall_post_impl_compat_12_oreboot(res, (long long)(opt))
+#define __sanitizer_syscall_pre_revoke(path) \
+ __sanitizer_syscall_pre_impl_revoke((long long)(path))
+#define __sanitizer_syscall_post_revoke(res, path) \
+ __sanitizer_syscall_post_impl_revoke(res, (long long)(path))
+#define __sanitizer_syscall_pre_symlink(path, link) \
+ __sanitizer_syscall_pre_impl_symlink((long long)(path), (long long)(link))
+#define __sanitizer_syscall_post_symlink(res, path, link) \
+ __sanitizer_syscall_post_impl_symlink(res, (long long)(path), \
+ (long long)(link))
+#define __sanitizer_syscall_pre_readlink(path, buf, count) \
+ __sanitizer_syscall_pre_impl_readlink((long long)(path), (long long)(buf), \
+ (long long)(count))
+#define __sanitizer_syscall_post_readlink(res, path, buf, count) \
+ __sanitizer_syscall_post_impl_readlink(res, (long long)(path), \
+ (long long)(buf), (long long)(count))
+#define __sanitizer_syscall_pre_execve(path, argp, envp) \
+ __sanitizer_syscall_pre_impl_execve((long long)(path), (long long)(argp), \
+ (long long)(envp))
+#define __sanitizer_syscall_post_execve(res, path, argp, envp) \
+ __sanitizer_syscall_post_impl_execve(res, (long long)(path), \
+ (long long)(argp), (long long)(envp))
+#define __sanitizer_syscall_pre_umask(newmask) \
+ __sanitizer_syscall_pre_impl_umask((long long)(newmask))
+#define __sanitizer_syscall_post_umask(res, newmask) \
+ __sanitizer_syscall_post_impl_umask(res, (long long)(newmask))
+#define __sanitizer_syscall_pre_chroot(path) \
+ __sanitizer_syscall_pre_impl_chroot((long long)(path))
+#define __sanitizer_syscall_post_chroot(res, path) \
+ __sanitizer_syscall_post_impl_chroot(res, (long long)(path))
+#define __sanitizer_syscall_pre_compat_43_fstat43(fd, sb) \
+ __sanitizer_syscall_pre_impl_compat_43_fstat43((long long)(fd), \
+ (long long)(sb))
+#define __sanitizer_syscall_post_compat_43_fstat43(res, fd, sb) \
+ __sanitizer_syscall_post_impl_compat_43_fstat43(res, (long long)(fd), \
+ (long long)(sb))
+#define __sanitizer_syscall_pre_compat_43_ogetkerninfo(op, where, size, arg) \
+ __sanitizer_syscall_pre_impl_compat_43_ogetkerninfo( \
+ (long long)(op), (long long)(where), (long long)(size), \
+ (long long)(arg))
+#define __sanitizer_syscall_post_compat_43_ogetkerninfo(res, op, where, size, \
+ arg) \
+ __sanitizer_syscall_post_impl_compat_43_ogetkerninfo( \
+ res, (long long)(op), (long long)(where), (long long)(size), \
+ (long long)(arg))
+#define __sanitizer_syscall_pre_compat_43_ogetpagesize() \
+ __sanitizer_syscall_pre_impl_compat_43_ogetpagesize()
+#define __sanitizer_syscall_post_compat_43_ogetpagesize(res) \
+ __sanitizer_syscall_post_impl_compat_43_ogetpagesize(res)
+#define __sanitizer_syscall_pre_compat_12_msync(addr, len) \
+ __sanitizer_syscall_pre_impl_compat_12_msync((long long)(addr), \
+ (long long)(len))
+#define __sanitizer_syscall_post_compat_12_msync(res, addr, len) \
+ __sanitizer_syscall_post_impl_compat_12_msync(res, (long long)(addr), \
+ (long long)(len))
+#define __sanitizer_syscall_pre_vfork() __sanitizer_syscall_pre_impl_vfork()
+#define __sanitizer_syscall_post_vfork(res) \
+ __sanitizer_syscall_post_impl_vfork(res)
+/* syscall 67 has been skipped */
+/* syscall 68 has been skipped */
+/* syscall 69 has been skipped */
+/* syscall 70 has been skipped */
+#define __sanitizer_syscall_pre_compat_43_ommap(addr, len, prot, flags, fd, \
+ pos) \
+ __sanitizer_syscall_pre_impl_compat_43_ommap( \
+ (long long)(addr), (long long)(len), (long long)(prot), \
+ (long long)(flags), (long long)(fd), (long long)(pos))
+#define __sanitizer_syscall_post_compat_43_ommap(res, addr, len, prot, flags, \
+ fd, pos) \
+ __sanitizer_syscall_post_impl_compat_43_ommap( \
+ res, (long long)(addr), (long long)(len), (long long)(prot), \
+ (long long)(flags), (long long)(fd), (long long)(pos))
+#define __sanitizer_syscall_pre_vadvise(anom) \
+ __sanitizer_syscall_pre_impl_vadvise((long long)(anom))
+#define __sanitizer_syscall_post_vadvise(res, anom) \
+ __sanitizer_syscall_post_impl_vadvise(res, (long long)(anom))
+#define __sanitizer_syscall_pre_munmap(addr, len) \
+ __sanitizer_syscall_pre_impl_munmap((long long)(addr), (long long)(len))
+#define __sanitizer_syscall_post_munmap(res, addr, len) \
+ __sanitizer_syscall_post_impl_munmap(res, (long long)(addr), (long long)(len))
+#define __sanitizer_syscall_pre_mprotect(addr, len, prot) \
+ __sanitizer_syscall_pre_impl_mprotect((long long)(addr), (long long)(len), \
+ (long long)(prot))
+#define __sanitizer_syscall_post_mprotect(res, addr, len, prot) \
+ __sanitizer_syscall_post_impl_mprotect(res, (long long)(addr), \
+ (long long)(len), (long long)(prot))
+#define __sanitizer_syscall_pre_madvise(addr, len, behav) \
+ __sanitizer_syscall_pre_impl_madvise((long long)(addr), (long long)(len), \
+ (long long)(behav))
+#define __sanitizer_syscall_post_madvise(res, addr, len, behav) \
+ __sanitizer_syscall_post_impl_madvise(res, (long long)(addr), \
+ (long long)(len), (long long)(behav))
+/* syscall 76 has been skipped */
+/* syscall 77 has been skipped */
+#define __sanitizer_syscall_pre_mincore(addr, len, vec) \
+ __sanitizer_syscall_pre_impl_mincore((long long)(addr), (long long)(len), \
+ (long long)(vec))
+#define __sanitizer_syscall_post_mincore(res, addr, len, vec) \
+ __sanitizer_syscall_post_impl_mincore(res, (long long)(addr), \
+ (long long)(len), (long long)(vec))
+#define __sanitizer_syscall_pre_getgroups(gidsetsize, gidset) \
+ __sanitizer_syscall_pre_impl_getgroups((long long)(gidsetsize), \
+ (long long)(gidset))
+#define __sanitizer_syscall_post_getgroups(res, gidsetsize, gidset) \
+ __sanitizer_syscall_post_impl_getgroups(res, (long long)(gidsetsize), \
+ (long long)(gidset))
+#define __sanitizer_syscall_pre_setgroups(gidsetsize, gidset) \
+ __sanitizer_syscall_pre_impl_setgroups((long long)(gidsetsize), \
+ (long long)(gidset))
+#define __sanitizer_syscall_post_setgroups(res, gidsetsize, gidset) \
+ __sanitizer_syscall_post_impl_setgroups(res, (long long)(gidsetsize), \
+ (long long)(gidset))
+#define __sanitizer_syscall_pre_getpgrp() __sanitizer_syscall_pre_impl_getpgrp()
+#define __sanitizer_syscall_post_getpgrp(res) \
+ __sanitizer_syscall_post_impl_getpgrp(res)
+#define __sanitizer_syscall_pre_setpgid(pid, pgid) \
+ __sanitizer_syscall_pre_impl_setpgid((long long)(pid), (long long)(pgid))
+#define __sanitizer_syscall_post_setpgid(res, pid, pgid) \
+ __sanitizer_syscall_post_impl_setpgid(res, (long long)(pid), \
+ (long long)(pgid))
+#define __sanitizer_syscall_pre_compat_50_setitimer(which, itv, oitv) \
+ __sanitizer_syscall_pre_impl_compat_50_setitimer( \
+ (long long)(which), (long long)(itv), (long long)(oitv))
+#define __sanitizer_syscall_post_compat_50_setitimer(res, which, itv, oitv) \
+ __sanitizer_syscall_post_impl_compat_50_setitimer( \
+ res, (long long)(which), (long long)(itv), (long long)(oitv))
+#define __sanitizer_syscall_pre_compat_43_owait() \
+ __sanitizer_syscall_pre_impl_compat_43_owait()
+#define __sanitizer_syscall_post_compat_43_owait(res) \
+ __sanitizer_syscall_post_impl_compat_43_owait(res)
+#define __sanitizer_syscall_pre_compat_12_oswapon(name) \
+ __sanitizer_syscall_pre_impl_compat_12_oswapon((long long)(name))
+#define __sanitizer_syscall_post_compat_12_oswapon(res, name) \
+ __sanitizer_syscall_post_impl_compat_12_oswapon(res, (long long)(name))
+#define __sanitizer_syscall_pre_compat_50_getitimer(which, itv) \
+ __sanitizer_syscall_pre_impl_compat_50_getitimer((long long)(which), \
+ (long long)(itv))
+#define __sanitizer_syscall_post_compat_50_getitimer(res, which, itv) \
+ __sanitizer_syscall_post_impl_compat_50_getitimer(res, (long long)(which), \
+ (long long)(itv))
+#define __sanitizer_syscall_pre_compat_43_ogethostname(hostname, len) \
+ __sanitizer_syscall_pre_impl_compat_43_ogethostname((long long)(hostname), \
+ (long long)(len))
+#define __sanitizer_syscall_post_compat_43_ogethostname(res, hostname, len) \
+ __sanitizer_syscall_post_impl_compat_43_ogethostname( \
+ res, (long long)(hostname), (long long)(len))
+#define __sanitizer_syscall_pre_compat_43_osethostname(hostname, len) \
+ __sanitizer_syscall_pre_impl_compat_43_osethostname((long long)(hostname), \
+ (long long)(len))
+#define __sanitizer_syscall_post_compat_43_osethostname(res, hostname, len) \
+ __sanitizer_syscall_post_impl_compat_43_osethostname( \
+ res, (long long)(hostname), (long long)(len))
+#define __sanitizer_syscall_pre_compat_43_ogetdtablesize() \
+ __sanitizer_syscall_pre_impl_compat_43_ogetdtablesize()
+#define __sanitizer_syscall_post_compat_43_ogetdtablesize(res) \
+ __sanitizer_syscall_post_impl_compat_43_ogetdtablesize(res)
+#define __sanitizer_syscall_pre_dup2(from, to) \
+ __sanitizer_syscall_pre_impl_dup2((long long)(from), (long long)(to))
+#define __sanitizer_syscall_post_dup2(res, from, to) \
+ __sanitizer_syscall_post_impl_dup2(res, (long long)(from), (long long)(to))
+/* syscall 91 has been skipped */
+#define __sanitizer_syscall_pre_fcntl(fd, cmd, arg) \
+ __sanitizer_syscall_pre_impl_fcntl((long long)(fd), (long long)(cmd), \
+ (long long)(arg))
+#define __sanitizer_syscall_post_fcntl(res, fd, cmd, arg) \
+ __sanitizer_syscall_post_impl_fcntl(res, (long long)(fd), (long long)(cmd), \
+ (long long)(arg))
+#define __sanitizer_syscall_pre_compat_50_select(nd, in, ou, ex, tv) \
+ __sanitizer_syscall_pre_impl_compat_50_select( \
+ (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
+ (long long)(tv))
+#define __sanitizer_syscall_post_compat_50_select(res, nd, in, ou, ex, tv) \
+ __sanitizer_syscall_post_impl_compat_50_select( \
+ res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
+ (long long)(tv))
+/* syscall 94 has been skipped */
+#define __sanitizer_syscall_pre_fsync(fd) \
+ __sanitizer_syscall_pre_impl_fsync((long long)(fd))
+#define __sanitizer_syscall_post_fsync(res, fd) \
+ __sanitizer_syscall_post_impl_fsync(res, (long long)(fd))
+#define __sanitizer_syscall_pre_setpriority(which, who, prio) \
+ __sanitizer_syscall_pre_impl_setpriority( \
+ (long long)(which), (long long)(who), (long long)(prio))
+#define __sanitizer_syscall_post_setpriority(res, which, who, prio) \
+ __sanitizer_syscall_post_impl_setpriority( \
+ res, (long long)(which), (long long)(who), (long long)(prio))
+#define __sanitizer_syscall_pre_compat_30_socket(domain, type, protocol) \
+ __sanitizer_syscall_pre_impl_compat_30_socket( \
+ (long long)(domain), (long long)(type), (long long)(protocol))
+#define __sanitizer_syscall_post_compat_30_socket(res, domain, type, protocol) \
+ __sanitizer_syscall_post_impl_compat_30_socket( \
+ res, (long long)(domain), (long long)(type), (long long)(protocol))
+#define __sanitizer_syscall_pre_connect(s, name, namelen) \
+ __sanitizer_syscall_pre_impl_connect((long long)(s), (long long)(name), \
+ (long long)(namelen))
+#define __sanitizer_syscall_post_connect(res, s, name, namelen) \
+ __sanitizer_syscall_post_impl_connect( \
+ res, (long long)(s), (long long)(name), (long long)(namelen))
+#define __sanitizer_syscall_pre_compat_43_oaccept(s, name, anamelen) \
+ __sanitizer_syscall_pre_impl_compat_43_oaccept( \
+ (long long)(s), (long long)(name), (long long)(anamelen))
+#define __sanitizer_syscall_post_compat_43_oaccept(res, s, name, anamelen) \
+ __sanitizer_syscall_post_impl_compat_43_oaccept( \
+ res, (long long)(s), (long long)(name), (long long)(anamelen))
+#define __sanitizer_syscall_pre_getpriority(which, who) \
+ __sanitizer_syscall_pre_impl_getpriority((long long)(which), (long long)(who))
+#define __sanitizer_syscall_post_getpriority(res, which, who) \
+ __sanitizer_syscall_post_impl_getpriority(res, (long long)(which), \
+ (long long)(who))
+#define __sanitizer_syscall_pre_compat_43_osend(s, buf, len, flags) \
+ __sanitizer_syscall_pre_impl_compat_43_osend( \
+ (long long)(s), (long long)(buf), (long long)(len), (long long)(flags))
+#define __sanitizer_syscall_post_compat_43_osend(res, s, buf, len, flags) \
+ __sanitizer_syscall_post_impl_compat_43_osend( \
+ res, (long long)(s), (long long)(buf), (long long)(len), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_compat_43_orecv(s, buf, len, flags) \
+ __sanitizer_syscall_pre_impl_compat_43_orecv( \
+ (long long)(s), (long long)(buf), (long long)(len), (long long)(flags))
+#define __sanitizer_syscall_post_compat_43_orecv(res, s, buf, len, flags) \
+ __sanitizer_syscall_post_impl_compat_43_orecv( \
+ res, (long long)(s), (long long)(buf), (long long)(len), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_compat_13_sigreturn13(sigcntxp) \
+ __sanitizer_syscall_pre_impl_compat_13_sigreturn13((long long)(sigcntxp))
+#define __sanitizer_syscall_post_compat_13_sigreturn13(res, sigcntxp) \
+ __sanitizer_syscall_post_impl_compat_13_sigreturn13(res, \
+ (long long)(sigcntxp))
+#define __sanitizer_syscall_pre_bind(s, name, namelen) \
+ __sanitizer_syscall_pre_impl_bind((long long)(s), (long long)(name), \
+ (long long)(namelen))
+#define __sanitizer_syscall_post_bind(res, s, name, namelen) \
+ __sanitizer_syscall_post_impl_bind(res, (long long)(s), (long long)(name), \
+ (long long)(namelen))
+#define __sanitizer_syscall_pre_setsockopt(s, level, name, val, valsize) \
+ __sanitizer_syscall_pre_impl_setsockopt((long long)(s), (long long)(level), \
+ (long long)(name), (long long)(val), \
+ (long long)(valsize))
+#define __sanitizer_syscall_post_setsockopt(res, s, level, name, val, valsize) \
+ __sanitizer_syscall_post_impl_setsockopt( \
+ res, (long long)(s), (long long)(level), (long long)(name), \
+ (long long)(val), (long long)(valsize))
+#define __sanitizer_syscall_pre_listen(s, backlog) \
+ __sanitizer_syscall_pre_impl_listen((long long)(s), (long long)(backlog))
+#define __sanitizer_syscall_post_listen(res, s, backlog) \
+ __sanitizer_syscall_post_impl_listen(res, (long long)(s), \
+ (long long)(backlog))
+/* syscall 107 has been skipped */
+#define __sanitizer_syscall_pre_compat_43_osigvec(signum, nsv, osv) \
+ __sanitizer_syscall_pre_impl_compat_43_osigvec( \
+ (long long)(signum), (long long)(nsv), (long long)(osv))
+#define __sanitizer_syscall_post_compat_43_osigvec(res, signum, nsv, osv) \
+ __sanitizer_syscall_post_impl_compat_43_osigvec( \
+ res, (long long)(signum), (long long)(nsv), (long long)(osv))
+#define __sanitizer_syscall_pre_compat_43_osigblock(mask) \
+ __sanitizer_syscall_pre_impl_compat_43_osigblock((long long)(mask))
+#define __sanitizer_syscall_post_compat_43_osigblock(res, mask) \
+ __sanitizer_syscall_post_impl_compat_43_osigblock(res, (long long)(mask))
+#define __sanitizer_syscall_pre_compat_43_osigsetmask(mask) \
+ __sanitizer_syscall_pre_impl_compat_43_osigsetmask((long long)(mask))
+#define __sanitizer_syscall_post_compat_43_osigsetmask(res, mask) \
+ __sanitizer_syscall_post_impl_compat_43_osigsetmask(res, (long long)(mask))
+#define __sanitizer_syscall_pre_compat_13_sigsuspend13(mask) \
+ __sanitizer_syscall_pre_impl_compat_13_sigsuspend13((long long)(mask))
+#define __sanitizer_syscall_post_compat_13_sigsuspend13(res, mask) \
+ __sanitizer_syscall_post_impl_compat_13_sigsuspend13(res, (long long)(mask))
+#define __sanitizer_syscall_pre_compat_43_osigstack(nss, oss) \
+ __sanitizer_syscall_pre_impl_compat_43_osigstack((long long)(nss), \
+ (long long)(oss))
+#define __sanitizer_syscall_post_compat_43_osigstack(res, nss, oss) \
+ __sanitizer_syscall_post_impl_compat_43_osigstack(res, (long long)(nss), \
+ (long long)(oss))
+#define __sanitizer_syscall_pre_compat_43_orecvmsg(s, msg, flags) \
+ __sanitizer_syscall_pre_impl_compat_43_orecvmsg( \
+ (long long)(s), (long long)(msg), (long long)(flags))
+#define __sanitizer_syscall_post_compat_43_orecvmsg(res, s, msg, flags) \
+ __sanitizer_syscall_post_impl_compat_43_orecvmsg( \
+ res, (long long)(s), (long long)(msg), (long long)(flags))
+#define __sanitizer_syscall_pre_compat_43_osendmsg(s, msg, flags) \
+ __sanitizer_syscall_pre_impl_compat_43_osendmsg( \
+ (long long)(s), (long long)(msg), (long long)(flags))
+#define __sanitizer_syscall_post_compat_43_osendmsg(res, s, msg, flags) \
+ __sanitizer_syscall_post_impl_compat_43_osendmsg( \
+ res, (long long)(s), (long long)(msg), (long long)(flags))
+/* syscall 115 has been skipped */
+#define __sanitizer_syscall_pre_compat_50_gettimeofday(tp, tzp) \
+ __sanitizer_syscall_pre_impl_compat_50_gettimeofday((long long)(tp), \
+ (long long)(tzp))
+#define __sanitizer_syscall_post_compat_50_gettimeofday(res, tp, tzp) \
+ __sanitizer_syscall_post_impl_compat_50_gettimeofday(res, (long long)(tp), \
+ (long long)(tzp))
+#define __sanitizer_syscall_pre_compat_50_getrusage(who, rusage) \
+ __sanitizer_syscall_pre_impl_compat_50_getrusage((long long)(who), \
+ (long long)(rusage))
+#define __sanitizer_syscall_post_compat_50_getrusage(res, who, rusage) \
+ __sanitizer_syscall_post_impl_compat_50_getrusage(res, (long long)(who), \
+ (long long)(rusage))
+#define __sanitizer_syscall_pre_getsockopt(s, level, name, val, avalsize) \
+ __sanitizer_syscall_pre_impl_getsockopt((long long)(s), (long long)(level), \
+ (long long)(name), (long long)(val), \
+ (long long)(avalsize))
+#define __sanitizer_syscall_post_getsockopt(res, s, level, name, val, \
+ avalsize) \
+ __sanitizer_syscall_post_impl_getsockopt( \
+ res, (long long)(s), (long long)(level), (long long)(name), \
+ (long long)(val), (long long)(avalsize))
+/* syscall 119 has been skipped */
+#define __sanitizer_syscall_pre_readv(fd, iovp, iovcnt) \
+ __sanitizer_syscall_pre_impl_readv((long long)(fd), (long long)(iovp), \
+ (long long)(iovcnt))
+#define __sanitizer_syscall_post_readv(res, fd, iovp, iovcnt) \
+ __sanitizer_syscall_post_impl_readv(res, (long long)(fd), (long long)(iovp), \
+ (long long)(iovcnt))
+#define __sanitizer_syscall_pre_writev(fd, iovp, iovcnt) \
+ __sanitizer_syscall_pre_impl_writev((long long)(fd), (long long)(iovp), \
+ (long long)(iovcnt))
+#define __sanitizer_syscall_post_writev(res, fd, iovp, iovcnt) \
+ __sanitizer_syscall_post_impl_writev(res, (long long)(fd), \
+ (long long)(iovp), (long long)(iovcnt))
+#define __sanitizer_syscall_pre_compat_50_settimeofday(tv, tzp) \
+ __sanitizer_syscall_pre_impl_compat_50_settimeofday((long long)(tv), \
+ (long long)(tzp))
+#define __sanitizer_syscall_post_compat_50_settimeofday(res, tv, tzp) \
+ __sanitizer_syscall_post_impl_compat_50_settimeofday(res, (long long)(tv), \
+ (long long)(tzp))
+#define __sanitizer_syscall_pre_fchown(fd, uid, gid) \
+ __sanitizer_syscall_pre_impl_fchown((long long)(fd), (long long)(uid), \
+ (long long)(gid))
+#define __sanitizer_syscall_post_fchown(res, fd, uid, gid) \
+ __sanitizer_syscall_post_impl_fchown(res, (long long)(fd), (long long)(uid), \
+ (long long)(gid))
+#define __sanitizer_syscall_pre_fchmod(fd, mode) \
+ __sanitizer_syscall_pre_impl_fchmod((long long)(fd), (long long)(mode))
+#define __sanitizer_syscall_post_fchmod(res, fd, mode) \
+ __sanitizer_syscall_post_impl_fchmod(res, (long long)(fd), (long long)(mode))
+#define __sanitizer_syscall_pre_compat_43_orecvfrom(s, buf, len, flags, from, \
+ fromlenaddr) \
+ __sanitizer_syscall_pre_impl_compat_43_orecvfrom( \
+ (long long)(s), (long long)(buf), (long long)(len), (long long)(flags), \
+ (long long)(from), (long long)(fromlenaddr))
+#define __sanitizer_syscall_post_compat_43_orecvfrom(res, s, buf, len, flags, \
+ from, fromlenaddr) \
+ __sanitizer_syscall_post_impl_compat_43_orecvfrom( \
+ res, (long long)(s), (long long)(buf), (long long)(len), \
+ (long long)(flags), (long long)(from), (long long)(fromlenaddr))
+#define __sanitizer_syscall_pre_setreuid(ruid, euid) \
+ __sanitizer_syscall_pre_impl_setreuid((long long)(ruid), (long long)(euid))
+#define __sanitizer_syscall_post_setreuid(res, ruid, euid) \
+ __sanitizer_syscall_post_impl_setreuid(res, (long long)(ruid), \
+ (long long)(euid))
+#define __sanitizer_syscall_pre_setregid(rgid, egid) \
+ __sanitizer_syscall_pre_impl_setregid((long long)(rgid), (long long)(egid))
+#define __sanitizer_syscall_post_setregid(res, rgid, egid) \
+ __sanitizer_syscall_post_impl_setregid(res, (long long)(rgid), \
+ (long long)(egid))
+#define __sanitizer_syscall_pre_rename(from, to) \
+ __sanitizer_syscall_pre_impl_rename((long long)(from), (long long)(to))
+#define __sanitizer_syscall_post_rename(res, from, to) \
+ __sanitizer_syscall_post_impl_rename(res, (long long)(from), (long long)(to))
+#define __sanitizer_syscall_pre_compat_43_otruncate(path, length) \
+ __sanitizer_syscall_pre_impl_compat_43_otruncate((long long)(path), \
+ (long long)(length))
+#define __sanitizer_syscall_post_compat_43_otruncate(res, path, length) \
+ __sanitizer_syscall_post_impl_compat_43_otruncate(res, (long long)(path), \
+ (long long)(length))
+#define __sanitizer_syscall_pre_compat_43_oftruncate(fd, length) \
+ __sanitizer_syscall_pre_impl_compat_43_oftruncate((long long)(fd), \
+ (long long)(length))
+#define __sanitizer_syscall_post_compat_43_oftruncate(res, fd, length) \
+ __sanitizer_syscall_post_impl_compat_43_oftruncate(res, (long long)(fd), \
+ (long long)(length))
+#define __sanitizer_syscall_pre_flock(fd, how) \
+ __sanitizer_syscall_pre_impl_flock((long long)(fd), (long long)(how))
+#define __sanitizer_syscall_post_flock(res, fd, how) \
+ __sanitizer_syscall_post_impl_flock(res, (long long)(fd), (long long)(how))
+#define __sanitizer_syscall_pre_mkfifo(path, mode) \
+ __sanitizer_syscall_pre_impl_mkfifo((long long)(path), (long long)(mode))
+#define __sanitizer_syscall_post_mkfifo(res, path, mode) \
+ __sanitizer_syscall_post_impl_mkfifo(res, (long long)(path), \
+ (long long)(mode))
+#define __sanitizer_syscall_pre_sendto(s, buf, len, flags, to, tolen) \
+ __sanitizer_syscall_pre_impl_sendto((long long)(s), (long long)(buf), \
+ (long long)(len), (long long)(flags), \
+ (long long)(to), (long long)(tolen))
+#define __sanitizer_syscall_post_sendto(res, s, buf, len, flags, to, tolen) \
+ __sanitizer_syscall_post_impl_sendto(res, (long long)(s), (long long)(buf), \
+ (long long)(len), (long long)(flags), \
+ (long long)(to), (long long)(tolen))
+#define __sanitizer_syscall_pre_shutdown(s, how) \
+ __sanitizer_syscall_pre_impl_shutdown((long long)(s), (long long)(how))
+#define __sanitizer_syscall_post_shutdown(res, s, how) \
+ __sanitizer_syscall_post_impl_shutdown(res, (long long)(s), (long long)(how))
+#define __sanitizer_syscall_pre_socketpair(domain, type, protocol, rsv) \
+ __sanitizer_syscall_pre_impl_socketpair( \
+ (long long)(domain), (long long)(type), (long long)(protocol), \
+ (long long)(rsv))
+#define __sanitizer_syscall_post_socketpair(res, domain, type, protocol, rsv) \
+ __sanitizer_syscall_post_impl_socketpair( \
+ res, (long long)(domain), (long long)(type), (long long)(protocol), \
+ (long long)(rsv))
+#define __sanitizer_syscall_pre_mkdir(path, mode) \
+ __sanitizer_syscall_pre_impl_mkdir((long long)(path), (long long)(mode))
+#define __sanitizer_syscall_post_mkdir(res, path, mode) \
+ __sanitizer_syscall_post_impl_mkdir(res, (long long)(path), (long long)(mode))
+#define __sanitizer_syscall_pre_rmdir(path) \
+ __sanitizer_syscall_pre_impl_rmdir((long long)(path))
+#define __sanitizer_syscall_post_rmdir(res, path) \
+ __sanitizer_syscall_post_impl_rmdir(res, (long long)(path))
+#define __sanitizer_syscall_pre_compat_50_utimes(path, tptr) \
+ __sanitizer_syscall_pre_impl_compat_50_utimes((long long)(path), \
+ (long long)(tptr))
+#define __sanitizer_syscall_post_compat_50_utimes(res, path, tptr) \
+ __sanitizer_syscall_post_impl_compat_50_utimes(res, (long long)(path), \
+ (long long)(tptr))
+/* syscall 139 has been skipped */
+#define __sanitizer_syscall_pre_compat_50_adjtime(delta, olddelta) \
+ __sanitizer_syscall_pre_impl_compat_50_adjtime((long long)(delta), \
+ (long long)(olddelta))
+#define __sanitizer_syscall_post_compat_50_adjtime(res, delta, olddelta) \
+ __sanitizer_syscall_post_impl_compat_50_adjtime(res, (long long)(delta), \
+ (long long)(olddelta))
+#define __sanitizer_syscall_pre_compat_43_ogetpeername(fdes, asa, alen) \
+ __sanitizer_syscall_pre_impl_compat_43_ogetpeername( \
+ (long long)(fdes), (long long)(asa), (long long)(alen))
+#define __sanitizer_syscall_post_compat_43_ogetpeername(res, fdes, asa, alen) \
+ __sanitizer_syscall_post_impl_compat_43_ogetpeername( \
+ res, (long long)(fdes), (long long)(asa), (long long)(alen))
+#define __sanitizer_syscall_pre_compat_43_ogethostid() \
+ __sanitizer_syscall_pre_impl_compat_43_ogethostid()
+#define __sanitizer_syscall_post_compat_43_ogethostid(res) \
+ __sanitizer_syscall_post_impl_compat_43_ogethostid(res)
+#define __sanitizer_syscall_pre_compat_43_osethostid(hostid) \
+ __sanitizer_syscall_pre_impl_compat_43_osethostid((long long)(hostid))
+#define __sanitizer_syscall_post_compat_43_osethostid(res, hostid) \
+ __sanitizer_syscall_post_impl_compat_43_osethostid(res, (long long)(hostid))
+#define __sanitizer_syscall_pre_compat_43_ogetrlimit(which, rlp) \
+ __sanitizer_syscall_pre_impl_compat_43_ogetrlimit((long long)(which), \
+ (long long)(rlp))
+#define __sanitizer_syscall_post_compat_43_ogetrlimit(res, which, rlp) \
+ __sanitizer_syscall_post_impl_compat_43_ogetrlimit(res, (long long)(which), \
+ (long long)(rlp))
+#define __sanitizer_syscall_pre_compat_43_osetrlimit(which, rlp) \
+ __sanitizer_syscall_pre_impl_compat_43_osetrlimit((long long)(which), \
+ (long long)(rlp))
+#define __sanitizer_syscall_post_compat_43_osetrlimit(res, which, rlp) \
+ __sanitizer_syscall_post_impl_compat_43_osetrlimit(res, (long long)(which), \
+ (long long)(rlp))
+#define __sanitizer_syscall_pre_compat_43_okillpg(pgid, signum) \
+ __sanitizer_syscall_pre_impl_compat_43_okillpg((long long)(pgid), \
+ (long long)(signum))
+#define __sanitizer_syscall_post_compat_43_okillpg(res, pgid, signum) \
+ __sanitizer_syscall_post_impl_compat_43_okillpg(res, (long long)(pgid), \
+ (long long)(signum))
+#define __sanitizer_syscall_pre_setsid() __sanitizer_syscall_pre_impl_setsid()
+#define __sanitizer_syscall_post_setsid(res) \
+ __sanitizer_syscall_post_impl_setsid(res)
+#define __sanitizer_syscall_pre_compat_50_quotactl(path, cmd, uid, arg) \
+ __sanitizer_syscall_pre_impl_compat_50_quotactl( \
+ (long long)(path), (long long)(cmd), (long long)(uid), (long long)(arg))
+#define __sanitizer_syscall_post_compat_50_quotactl(res, path, cmd, uid, arg) \
+ __sanitizer_syscall_post_impl_compat_50_quotactl( \
+ res, (long long)(path), (long long)(cmd), (long long)(uid), \
+ (long long)(arg))
+#define __sanitizer_syscall_pre_compat_43_oquota() \
+ __sanitizer_syscall_pre_impl_compat_43_oquota()
+#define __sanitizer_syscall_post_compat_43_oquota(res) \
+ __sanitizer_syscall_post_impl_compat_43_oquota(res)
+#define __sanitizer_syscall_pre_compat_43_ogetsockname(fdec, asa, alen) \
+ __sanitizer_syscall_pre_impl_compat_43_ogetsockname( \
+ (long long)(fdec), (long long)(asa), (long long)(alen))
+#define __sanitizer_syscall_post_compat_43_ogetsockname(res, fdec, asa, alen) \
+ __sanitizer_syscall_post_impl_compat_43_ogetsockname( \
+ res, (long long)(fdec), (long long)(asa), (long long)(alen))
+/* syscall 151 has been skipped */
+/* syscall 152 has been skipped */
+/* syscall 153 has been skipped */
+/* syscall 154 has been skipped */
+#define __sanitizer_syscall_pre_nfssvc(flag, argp) \
+ __sanitizer_syscall_pre_impl_nfssvc((long long)(flag), (long long)(argp))
+#define __sanitizer_syscall_post_nfssvc(res, flag, argp) \
+ __sanitizer_syscall_post_impl_nfssvc(res, (long long)(flag), \
+ (long long)(argp))
+#define __sanitizer_syscall_pre_compat_43_ogetdirentries(fd, buf, count, \
+ basep) \
+ __sanitizer_syscall_pre_impl_compat_43_ogetdirentries( \
+ (long long)(fd), (long long)(buf), (long long)(count), \
+ (long long)(basep))
+#define __sanitizer_syscall_post_compat_43_ogetdirentries(res, fd, buf, count, \
+ basep) \
+ __sanitizer_syscall_post_impl_compat_43_ogetdirentries( \
+ res, (long long)(fd), (long long)(buf), (long long)(count), \
+ (long long)(basep))
+#define __sanitizer_syscall_pre_compat_20_statfs(path, buf) \
+ __sanitizer_syscall_pre_impl_compat_20_statfs((long long)(path), \
+ (long long)(buf))
+#define __sanitizer_syscall_post_compat_20_statfs(res, path, buf) \
+ __sanitizer_syscall_post_impl_compat_20_statfs(res, (long long)(path), \
+ (long long)(buf))
+#define __sanitizer_syscall_pre_compat_20_fstatfs(fd, buf) \
+ __sanitizer_syscall_pre_impl_compat_20_fstatfs((long long)(fd), \
+ (long long)(buf))
+#define __sanitizer_syscall_post_compat_20_fstatfs(res, fd, buf) \
+ __sanitizer_syscall_post_impl_compat_20_fstatfs(res, (long long)(fd), \
+ (long long)(buf))
+/* syscall 159 has been skipped */
+/* syscall 160 has been skipped */
+#define __sanitizer_syscall_pre_compat_30_getfh(fname, fhp) \
+ __sanitizer_syscall_pre_impl_compat_30_getfh((long long)(fname), \
+ (long long)(fhp))
+#define __sanitizer_syscall_post_compat_30_getfh(res, fname, fhp) \
+ __sanitizer_syscall_post_impl_compat_30_getfh(res, (long long)(fname), \
+ (long long)(fhp))
+#define __sanitizer_syscall_pre_compat_09_ogetdomainname(domainname, len) \
+ __sanitizer_syscall_pre_impl_compat_09_ogetdomainname( \
+ (long long)(domainname), (long long)(len))
+#define __sanitizer_syscall_post_compat_09_ogetdomainname(res, domainname, \
+ len) \
+ __sanitizer_syscall_post_impl_compat_09_ogetdomainname( \
+ res, (long long)(domainname), (long long)(len))
+#define __sanitizer_syscall_pre_compat_09_osetdomainname(domainname, len) \
+ __sanitizer_syscall_pre_impl_compat_09_osetdomainname( \
+ (long long)(domainname), (long long)(len))
+#define __sanitizer_syscall_post_compat_09_osetdomainname(res, domainname, \
+ len) \
+ __sanitizer_syscall_post_impl_compat_09_osetdomainname( \
+ res, (long long)(domainname), (long long)(len))
+#define __sanitizer_syscall_pre_compat_09_ouname(name) \
+ __sanitizer_syscall_pre_impl_compat_09_ouname((long long)(name))
+#define __sanitizer_syscall_post_compat_09_ouname(res, name) \
+ __sanitizer_syscall_post_impl_compat_09_ouname(res, (long long)(name))
+#define __sanitizer_syscall_pre_sysarch(op, parms) \
+ __sanitizer_syscall_pre_impl_sysarch((long long)(op), (long long)(parms))
+#define __sanitizer_syscall_post_sysarch(res, op, parms) \
+ __sanitizer_syscall_post_impl_sysarch(res, (long long)(op), \
+ (long long)(parms))
+/* syscall 166 has been skipped */
+/* syscall 167 has been skipped */
+/* syscall 168 has been skipped */
+#if !defined(_LP64)
+#define __sanitizer_syscall_pre_compat_10_osemsys(which, a2, a3, a4, a5) \
+ __sanitizer_syscall_pre_impl_compat_10_osemsys( \
+ (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4), \
+ (long long)(a5))
+#define __sanitizer_syscall_post_compat_10_osemsys(res, which, a2, a3, a4, a5) \
+ __sanitizer_syscall_post_impl_compat_10_osemsys( \
+ res, (long long)(which), (long long)(a2), (long long)(a3), \
+ (long long)(a4), (long long)(a5))
+#else
+/* syscall 169 has been skipped */
+#endif
+#if !defined(_LP64)
+#define __sanitizer_syscall_pre_compat_10_omsgsys(which, a2, a3, a4, a5, a6) \
+ __sanitizer_syscall_pre_impl_compat_10_omsgsys( \
+ (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4), \
+ (long long)(a5), (long long)(a6))
+#define __sanitizer_syscall_post_compat_10_omsgsys(res, which, a2, a3, a4, a5, \
+ a6) \
+ __sanitizer_syscall_post_impl_compat_10_omsgsys( \
+ res, (long long)(which), (long long)(a2), (long long)(a3), \
+ (long long)(a4), (long long)(a5), (long long)(a6))
+#else
+/* syscall 170 has been skipped */
+#endif
+#if !defined(_LP64)
+#define __sanitizer_syscall_pre_compat_10_oshmsys(which, a2, a3, a4) \
+ __sanitizer_syscall_pre_impl_compat_10_oshmsys( \
+ (long long)(which), (long long)(a2), (long long)(a3), (long long)(a4))
+#define __sanitizer_syscall_post_compat_10_oshmsys(res, which, a2, a3, a4) \
+ __sanitizer_syscall_post_impl_compat_10_oshmsys( \
+ res, (long long)(which), (long long)(a2), (long long)(a3), \
+ (long long)(a4))
+#else
+/* syscall 171 has been skipped */
+#endif
+/* syscall 172 has been skipped */
+#define __sanitizer_syscall_pre_pread(fd, buf, nbyte, PAD, offset) \
+ __sanitizer_syscall_pre_impl_pread((long long)(fd), (long long)(buf), \
+ (long long)(nbyte), (long long)(PAD), \
+ (long long)(offset))
+#define __sanitizer_syscall_post_pread(res, fd, buf, nbyte, PAD, offset) \
+ __sanitizer_syscall_post_impl_pread(res, (long long)(fd), (long long)(buf), \
+ (long long)(nbyte), (long long)(PAD), \
+ (long long)(offset))
+#define __sanitizer_syscall_pre_pwrite(fd, buf, nbyte, PAD, offset) \
+ __sanitizer_syscall_pre_impl_pwrite((long long)(fd), (long long)(buf), \
+ (long long)(nbyte), (long long)(PAD), \
+ (long long)(offset))
+#define __sanitizer_syscall_post_pwrite(res, fd, buf, nbyte, PAD, offset) \
+ __sanitizer_syscall_post_impl_pwrite(res, (long long)(fd), (long long)(buf), \
+ (long long)(nbyte), (long long)(PAD), \
+ (long long)(offset))
+#define __sanitizer_syscall_pre_compat_30_ntp_gettime(ntvp) \
+ __sanitizer_syscall_pre_impl_compat_30_ntp_gettime((long long)(ntvp))
+#define __sanitizer_syscall_post_compat_30_ntp_gettime(res, ntvp) \
+ __sanitizer_syscall_post_impl_compat_30_ntp_gettime(res, (long long)(ntvp))
+#if defined(NTP) || !defined(_KERNEL_OPT)
+#define __sanitizer_syscall_pre_ntp_adjtime(tp) \
+ __sanitizer_syscall_pre_impl_ntp_adjtime((long long)(tp))
+#define __sanitizer_syscall_post_ntp_adjtime(res, tp) \
+ __sanitizer_syscall_post_impl_ntp_adjtime(res, (long long)(tp))
+#else
+/* syscall 176 has been skipped */
+#endif
+/* syscall 177 has been skipped */
+/* syscall 178 has been skipped */
+/* syscall 179 has been skipped */
+/* syscall 180 has been skipped */
+#define __sanitizer_syscall_pre_setgid(gid) \
+ __sanitizer_syscall_pre_impl_setgid((long long)(gid))
+#define __sanitizer_syscall_post_setgid(res, gid) \
+ __sanitizer_syscall_post_impl_setgid(res, (long long)(gid))
+#define __sanitizer_syscall_pre_setegid(egid) \
+ __sanitizer_syscall_pre_impl_setegid((long long)(egid))
+#define __sanitizer_syscall_post_setegid(res, egid) \
+ __sanitizer_syscall_post_impl_setegid(res, (long long)(egid))
+#define __sanitizer_syscall_pre_seteuid(euid) \
+ __sanitizer_syscall_pre_impl_seteuid((long long)(euid))
+#define __sanitizer_syscall_post_seteuid(res, euid) \
+ __sanitizer_syscall_post_impl_seteuid(res, (long long)(euid))
+#define __sanitizer_syscall_pre_lfs_bmapv(fsidp, blkiov, blkcnt) \
+ __sanitizer_syscall_pre_impl_lfs_bmapv( \
+ (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
+#define __sanitizer_syscall_post_lfs_bmapv(res, fsidp, blkiov, blkcnt) \
+ __sanitizer_syscall_post_impl_lfs_bmapv( \
+ res, (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
+#define __sanitizer_syscall_pre_lfs_markv(fsidp, blkiov, blkcnt) \
+ __sanitizer_syscall_pre_impl_lfs_markv( \
+ (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
+#define __sanitizer_syscall_post_lfs_markv(res, fsidp, blkiov, blkcnt) \
+ __sanitizer_syscall_post_impl_lfs_markv( \
+ res, (long long)(fsidp), (long long)(blkiov), (long long)(blkcnt))
+#define __sanitizer_syscall_pre_lfs_segclean(fsidp, segment) \
+ __sanitizer_syscall_pre_impl_lfs_segclean((long long)(fsidp), \
+ (long long)(segment))
+#define __sanitizer_syscall_post_lfs_segclean(res, fsidp, segment) \
+ __sanitizer_syscall_post_impl_lfs_segclean(res, (long long)(fsidp), \
+ (long long)(segment))
+#define __sanitizer_syscall_pre_compat_50_lfs_segwait(fsidp, tv) \
+ __sanitizer_syscall_pre_impl_compat_50_lfs_segwait((long long)(fsidp), \
+ (long long)(tv))
+#define __sanitizer_syscall_post_compat_50_lfs_segwait(res, fsidp, tv) \
+ __sanitizer_syscall_post_impl_compat_50_lfs_segwait(res, (long long)(fsidp), \
+ (long long)(tv))
+#define __sanitizer_syscall_pre_compat_12_stat12(path, ub) \
+ __sanitizer_syscall_pre_impl_compat_12_stat12((long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_post_compat_12_stat12(res, path, ub) \
+ __sanitizer_syscall_post_impl_compat_12_stat12(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre_compat_12_fstat12(fd, sb) \
+ __sanitizer_syscall_pre_impl_compat_12_fstat12((long long)(fd), \
+ (long long)(sb))
+#define __sanitizer_syscall_post_compat_12_fstat12(res, fd, sb) \
+ __sanitizer_syscall_post_impl_compat_12_fstat12(res, (long long)(fd), \
+ (long long)(sb))
+#define __sanitizer_syscall_pre_compat_12_lstat12(path, ub) \
+ __sanitizer_syscall_pre_impl_compat_12_lstat12((long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_post_compat_12_lstat12(res, path, ub) \
+ __sanitizer_syscall_post_impl_compat_12_lstat12(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre_pathconf(path, name) \
+ __sanitizer_syscall_pre_impl_pathconf((long long)(path), (long long)(name))
+#define __sanitizer_syscall_post_pathconf(res, path, name) \
+ __sanitizer_syscall_post_impl_pathconf(res, (long long)(path), \
+ (long long)(name))
+#define __sanitizer_syscall_pre_fpathconf(fd, name) \
+ __sanitizer_syscall_pre_impl_fpathconf((long long)(fd), (long long)(name))
+#define __sanitizer_syscall_post_fpathconf(res, fd, name) \
+ __sanitizer_syscall_post_impl_fpathconf(res, (long long)(fd), \
+ (long long)(name))
+#define __sanitizer_syscall_pre_getsockopt2(s, level, name, val, avalsize) \
+ __sanitizer_syscall_pre_impl_getsockopt2( \
+ (long long)(s), (long long)(level), (long long)(name), (long long)(val), \
+ (long long)(avalsize))
+#define __sanitizer_syscall_post_getsockopt2(res, s, level, name, val, \
+ avalsize) \
+ __sanitizer_syscall_post_impl_getsockopt2( \
+ res, (long long)(s), (long long)(level), (long long)(name), \
+ (long long)(val), (long long)(avalsize))
+#define __sanitizer_syscall_pre_getrlimit(which, rlp) \
+ __sanitizer_syscall_pre_impl_getrlimit((long long)(which), (long long)(rlp))
+#define __sanitizer_syscall_post_getrlimit(res, which, rlp) \
+ __sanitizer_syscall_post_impl_getrlimit(res, (long long)(which), \
+ (long long)(rlp))
+#define __sanitizer_syscall_pre_setrlimit(which, rlp) \
+ __sanitizer_syscall_pre_impl_setrlimit((long long)(which), (long long)(rlp))
+#define __sanitizer_syscall_post_setrlimit(res, which, rlp) \
+ __sanitizer_syscall_post_impl_setrlimit(res, (long long)(which), \
+ (long long)(rlp))
+#define __sanitizer_syscall_pre_compat_12_getdirentries(fd, buf, count, basep) \
+ __sanitizer_syscall_pre_impl_compat_12_getdirentries( \
+ (long long)(fd), (long long)(buf), (long long)(count), \
+ (long long)(basep))
+#define __sanitizer_syscall_post_compat_12_getdirentries(res, fd, buf, count, \
+ basep) \
+ __sanitizer_syscall_post_impl_compat_12_getdirentries( \
+ res, (long long)(fd), (long long)(buf), (long long)(count), \
+ (long long)(basep))
+#define __sanitizer_syscall_pre_mmap(addr, len, prot, flags, fd, PAD, pos) \
+ __sanitizer_syscall_pre_impl_mmap( \
+ (long long)(addr), (long long)(len), (long long)(prot), \
+ (long long)(flags), (long long)(fd), (long long)(PAD), (long long)(pos))
+#define __sanitizer_syscall_post_mmap(res, addr, len, prot, flags, fd, PAD, \
+ pos) \
+ __sanitizer_syscall_post_impl_mmap( \
+ res, (long long)(addr), (long long)(len), (long long)(prot), \
+ (long long)(flags), (long long)(fd), (long long)(PAD), (long long)(pos))
+#define __sanitizer_syscall_pre___syscall(code, arg0, arg1, arg2, arg3, arg4, \
+ arg5, arg6, arg7) \
+ __sanitizer_syscall_pre_impl___syscall( \
+ (long long)(code), (long long)(arg0), (long long)(arg1), \
+ (long long)(arg2), (long long)(arg3), (long long)(arg4), \
+ (long long)(arg5), (long long)(arg6), (long long)(arg7))
+#define __sanitizer_syscall_post___syscall(res, code, arg0, arg1, arg2, arg3, \
+ arg4, arg5, arg6, arg7) \
+ __sanitizer_syscall_post_impl___syscall( \
+ res, (long long)(code), (long long)(arg0), (long long)(arg1), \
+ (long long)(arg2), (long long)(arg3), (long long)(arg4), \
+ (long long)(arg5), (long long)(arg6), (long long)(arg7))
+#define __sanitizer_syscall_pre_lseek(fd, PAD, offset, whence) \
+ __sanitizer_syscall_pre_impl_lseek((long long)(fd), (long long)(PAD), \
+ (long long)(offset), (long long)(whence))
+#define __sanitizer_syscall_post_lseek(res, fd, PAD, offset, whence) \
+ __sanitizer_syscall_post_impl_lseek(res, (long long)(fd), (long long)(PAD), \
+ (long long)(offset), \
+ (long long)(whence))
+#define __sanitizer_syscall_pre_truncate(path, PAD, length) \
+ __sanitizer_syscall_pre_impl_truncate((long long)(path), (long long)(PAD), \
+ (long long)(length))
+#define __sanitizer_syscall_post_truncate(res, path, PAD, length) \
+ __sanitizer_syscall_post_impl_truncate( \
+ res, (long long)(path), (long long)(PAD), (long long)(length))
+#define __sanitizer_syscall_pre_ftruncate(fd, PAD, length) \
+ __sanitizer_syscall_pre_impl_ftruncate((long long)(fd), (long long)(PAD), \
+ (long long)(length))
+#define __sanitizer_syscall_post_ftruncate(res, fd, PAD, length) \
+ __sanitizer_syscall_post_impl_ftruncate( \
+ res, (long long)(fd), (long long)(PAD), (long long)(length))
+#define __sanitizer_syscall_pre___sysctl(name, namelen, oldv, oldlenp, newv, \
+ newlen) \
+ __sanitizer_syscall_pre_impl___sysctl( \
+ (long long)(name), (long long)(namelen), (long long)(oldv), \
+ (long long)(oldlenp), (long long)(newv), (long long)(newlen))
+#define __sanitizer_syscall_post___sysctl(res, name, namelen, oldv, oldlenp, \
+ newv, newlen) \
+ __sanitizer_syscall_post_impl___sysctl( \
+ res, (long long)(name), (long long)(namelen), (long long)(oldv), \
+ (long long)(oldlenp), (long long)(newv), (long long)(newlen))
+#define __sanitizer_syscall_pre_mlock(addr, len) \
+ __sanitizer_syscall_pre_impl_mlock((long long)(addr), (long long)(len))
+#define __sanitizer_syscall_post_mlock(res, addr, len) \
+ __sanitizer_syscall_post_impl_mlock(res, (long long)(addr), (long long)(len))
+#define __sanitizer_syscall_pre_munlock(addr, len) \
+ __sanitizer_syscall_pre_impl_munlock((long long)(addr), (long long)(len))
+#define __sanitizer_syscall_post_munlock(res, addr, len) \
+ __sanitizer_syscall_post_impl_munlock(res, (long long)(addr), \
+ (long long)(len))
+#define __sanitizer_syscall_pre_undelete(path) \
+ __sanitizer_syscall_pre_impl_undelete((long long)(path))
+#define __sanitizer_syscall_post_undelete(res, path) \
+ __sanitizer_syscall_post_impl_undelete(res, (long long)(path))
+#define __sanitizer_syscall_pre_compat_50_futimes(fd, tptr) \
+ __sanitizer_syscall_pre_impl_compat_50_futimes((long long)(fd), \
+ (long long)(tptr))
+#define __sanitizer_syscall_post_compat_50_futimes(res, fd, tptr) \
+ __sanitizer_syscall_post_impl_compat_50_futimes(res, (long long)(fd), \
+ (long long)(tptr))
+#define __sanitizer_syscall_pre_getpgid(pid) \
+ __sanitizer_syscall_pre_impl_getpgid((long long)(pid))
+#define __sanitizer_syscall_post_getpgid(res, pid) \
+ __sanitizer_syscall_post_impl_getpgid(res, (long long)(pid))
+#define __sanitizer_syscall_pre_reboot(opt, bootstr) \
+ __sanitizer_syscall_pre_impl_reboot((long long)(opt), (long long)(bootstr))
+#define __sanitizer_syscall_post_reboot(res, opt, bootstr) \
+ __sanitizer_syscall_post_impl_reboot(res, (long long)(opt), \
+ (long long)(bootstr))
+#define __sanitizer_syscall_pre_poll(fds, nfds, timeout) \
+ __sanitizer_syscall_pre_impl_poll((long long)(fds), (long long)(nfds), \
+ (long long)(timeout))
+#define __sanitizer_syscall_post_poll(res, fds, nfds, timeout) \
+ __sanitizer_syscall_post_impl_poll(res, (long long)(fds), (long long)(nfds), \
+ (long long)(timeout))
+#define __sanitizer_syscall_pre_afssys(id, a1, a2, a3, a4, a5, a6) \
+ __sanitizer_syscall_pre_impl_afssys( \
+ (long long)(id), (long long)(a1), (long long)(a2), (long long)(a3), \
+ (long long)(a4), (long long)(a5), (long long)(a6))
+#define __sanitizer_syscall_post_afssys(res, id, a1, a2, a3, a4, a5, a6) \
+ __sanitizer_syscall_post_impl_afssys( \
+ res, (long long)(id), (long long)(a1), (long long)(a2), (long long)(a3), \
+ (long long)(a4), (long long)(a5), (long long)(a6))
+/* syscall 211 has been skipped */
+/* syscall 212 has been skipped */
+/* syscall 213 has been skipped */
+/* syscall 214 has been skipped */
+/* syscall 215 has been skipped */
+/* syscall 216 has been skipped */
+/* syscall 217 has been skipped */
+/* syscall 218 has been skipped */
+/* syscall 219 has been skipped */
+#define __sanitizer_syscall_pre_compat_14___semctl(semid, semnum, cmd, arg) \
+ __sanitizer_syscall_pre_impl_compat_14___semctl( \
+ (long long)(semid), (long long)(semnum), (long long)(cmd), \
+ (long long)(arg))
+#define __sanitizer_syscall_post_compat_14___semctl(res, semid, semnum, cmd, \
+ arg) \
+ __sanitizer_syscall_post_impl_compat_14___semctl( \
+ res, (long long)(semid), (long long)(semnum), (long long)(cmd), \
+ (long long)(arg))
+#define __sanitizer_syscall_pre_semget(key, nsems, semflg) \
+ __sanitizer_syscall_pre_impl_semget((long long)(key), (long long)(nsems), \
+ (long long)(semflg))
+#define __sanitizer_syscall_post_semget(res, key, nsems, semflg) \
+ __sanitizer_syscall_post_impl_semget( \
+ res, (long long)(key), (long long)(nsems), (long long)(semflg))
+#define __sanitizer_syscall_pre_semop(semid, sops, nsops) \
+ __sanitizer_syscall_pre_impl_semop((long long)(semid), (long long)(sops), \
+ (long long)(nsops))
+#define __sanitizer_syscall_post_semop(res, semid, sops, nsops) \
+ __sanitizer_syscall_post_impl_semop(res, (long long)(semid), \
+ (long long)(sops), (long long)(nsops))
+#define __sanitizer_syscall_pre_semconfig(flag) \
+ __sanitizer_syscall_pre_impl_semconfig((long long)(flag))
+#define __sanitizer_syscall_post_semconfig(res, flag) \
+ __sanitizer_syscall_post_impl_semconfig(res, (long long)(flag))
+#define __sanitizer_syscall_pre_compat_14_msgctl(msqid, cmd, buf) \
+ __sanitizer_syscall_pre_impl_compat_14_msgctl( \
+ (long long)(msqid), (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_post_compat_14_msgctl(res, msqid, cmd, buf) \
+ __sanitizer_syscall_post_impl_compat_14_msgctl( \
+ res, (long long)(msqid), (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_pre_msgget(key, msgflg) \
+ __sanitizer_syscall_pre_impl_msgget((long long)(key), (long long)(msgflg))
+#define __sanitizer_syscall_post_msgget(res, key, msgflg) \
+ __sanitizer_syscall_post_impl_msgget(res, (long long)(key), \
+ (long long)(msgflg))
+#define __sanitizer_syscall_pre_msgsnd(msqid, msgp, msgsz, msgflg) \
+ __sanitizer_syscall_pre_impl_msgsnd((long long)(msqid), (long long)(msgp), \
+ (long long)(msgsz), (long long)(msgflg))
+#define __sanitizer_syscall_post_msgsnd(res, msqid, msgp, msgsz, msgflg) \
+ __sanitizer_syscall_post_impl_msgsnd(res, (long long)(msqid), \
+ (long long)(msgp), (long long)(msgsz), \
+ (long long)(msgflg))
+#define __sanitizer_syscall_pre_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg) \
+ __sanitizer_syscall_pre_impl_msgrcv((long long)(msqid), (long long)(msgp), \
+ (long long)(msgsz), (long long)(msgtyp), \
+ (long long)(msgflg))
+#define __sanitizer_syscall_post_msgrcv(res, msqid, msgp, msgsz, msgtyp, \
+ msgflg) \
+ __sanitizer_syscall_post_impl_msgrcv( \
+ res, (long long)(msqid), (long long)(msgp), (long long)(msgsz), \
+ (long long)(msgtyp), (long long)(msgflg))
+#define __sanitizer_syscall_pre_shmat(shmid, shmaddr, shmflg) \
+ __sanitizer_syscall_pre_impl_shmat((long long)(shmid), (long long)(shmaddr), \
+ (long long)(shmflg))
+#define __sanitizer_syscall_post_shmat(res, shmid, shmaddr, shmflg) \
+ __sanitizer_syscall_post_impl_shmat( \
+ res, (long long)(shmid), (long long)(shmaddr), (long long)(shmflg))
+#define __sanitizer_syscall_pre_compat_14_shmctl(shmid, cmd, buf) \
+ __sanitizer_syscall_pre_impl_compat_14_shmctl( \
+ (long long)(shmid), (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_post_compat_14_shmctl(res, shmid, cmd, buf) \
+ __sanitizer_syscall_post_impl_compat_14_shmctl( \
+ res, (long long)(shmid), (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_pre_shmdt(shmaddr) \
+ __sanitizer_syscall_pre_impl_shmdt((long long)(shmaddr))
+#define __sanitizer_syscall_post_shmdt(res, shmaddr) \
+ __sanitizer_syscall_post_impl_shmdt(res, (long long)(shmaddr))
+#define __sanitizer_syscall_pre_shmget(key, size, shmflg) \
+ __sanitizer_syscall_pre_impl_shmget((long long)(key), (long long)(size), \
+ (long long)(shmflg))
+#define __sanitizer_syscall_post_shmget(res, key, size, shmflg) \
+ __sanitizer_syscall_post_impl_shmget(res, (long long)(key), \
+ (long long)(size), (long long)(shmflg))
+#define __sanitizer_syscall_pre_compat_50_clock_gettime(clock_id, tp) \
+ __sanitizer_syscall_pre_impl_compat_50_clock_gettime((long long)(clock_id), \
+ (long long)(tp))
+#define __sanitizer_syscall_post_compat_50_clock_gettime(res, clock_id, tp) \
+ __sanitizer_syscall_post_impl_compat_50_clock_gettime( \
+ res, (long long)(clock_id), (long long)(tp))
+#define __sanitizer_syscall_pre_compat_50_clock_settime(clock_id, tp) \
+ __sanitizer_syscall_pre_impl_compat_50_clock_settime((long long)(clock_id), \
+ (long long)(tp))
+#define __sanitizer_syscall_post_compat_50_clock_settime(res, clock_id, tp) \
+ __sanitizer_syscall_post_impl_compat_50_clock_settime( \
+ res, (long long)(clock_id), (long long)(tp))
+#define __sanitizer_syscall_pre_compat_50_clock_getres(clock_id, tp) \
+ __sanitizer_syscall_pre_impl_compat_50_clock_getres((long long)(clock_id), \
+ (long long)(tp))
+#define __sanitizer_syscall_post_compat_50_clock_getres(res, clock_id, tp) \
+ __sanitizer_syscall_post_impl_compat_50_clock_getres( \
+ res, (long long)(clock_id), (long long)(tp))
+#define __sanitizer_syscall_pre_timer_create(clock_id, evp, timerid) \
+ __sanitizer_syscall_pre_impl_timer_create( \
+ (long long)(clock_id), (long long)(evp), (long long)(timerid))
+#define __sanitizer_syscall_post_timer_create(res, clock_id, evp, timerid) \
+ __sanitizer_syscall_post_impl_timer_create( \
+ res, (long long)(clock_id), (long long)(evp), (long long)(timerid))
+#define __sanitizer_syscall_pre_timer_delete(timerid) \
+ __sanitizer_syscall_pre_impl_timer_delete((long long)(timerid))
+#define __sanitizer_syscall_post_timer_delete(res, timerid) \
+ __sanitizer_syscall_post_impl_timer_delete(res, (long long)(timerid))
+#define __sanitizer_syscall_pre_compat_50_timer_settime(timerid, flags, value, \
+ ovalue) \
+ __sanitizer_syscall_pre_impl_compat_50_timer_settime( \
+ (long long)(timerid), (long long)(flags), (long long)(value), \
+ (long long)(ovalue))
+#define __sanitizer_syscall_post_compat_50_timer_settime(res, timerid, flags, \
+ value, ovalue) \
+ __sanitizer_syscall_post_impl_compat_50_timer_settime( \
+ res, (long long)(timerid), (long long)(flags), (long long)(value), \
+ (long long)(ovalue))
+#define __sanitizer_syscall_pre_compat_50_timer_gettime(timerid, value) \
+ __sanitizer_syscall_pre_impl_compat_50_timer_gettime((long long)(timerid), \
+ (long long)(value))
+#define __sanitizer_syscall_post_compat_50_timer_gettime(res, timerid, value) \
+ __sanitizer_syscall_post_impl_compat_50_timer_gettime( \
+ res, (long long)(timerid), (long long)(value))
+#define __sanitizer_syscall_pre_timer_getoverrun(timerid) \
+ __sanitizer_syscall_pre_impl_timer_getoverrun((long long)(timerid))
+#define __sanitizer_syscall_post_timer_getoverrun(res, timerid) \
+ __sanitizer_syscall_post_impl_timer_getoverrun(res, (long long)(timerid))
+#define __sanitizer_syscall_pre_compat_50_nanosleep(rqtp, rmtp) \
+ __sanitizer_syscall_pre_impl_compat_50_nanosleep((long long)(rqtp), \
+ (long long)(rmtp))
+#define __sanitizer_syscall_post_compat_50_nanosleep(res, rqtp, rmtp) \
+ __sanitizer_syscall_post_impl_compat_50_nanosleep(res, (long long)(rqtp), \
+ (long long)(rmtp))
+#define __sanitizer_syscall_pre_fdatasync(fd) \
+ __sanitizer_syscall_pre_impl_fdatasync((long long)(fd))
+#define __sanitizer_syscall_post_fdatasync(res, fd) \
+ __sanitizer_syscall_post_impl_fdatasync(res, (long long)(fd))
+#define __sanitizer_syscall_pre_mlockall(flags) \
+ __sanitizer_syscall_pre_impl_mlockall((long long)(flags))
+#define __sanitizer_syscall_post_mlockall(res, flags) \
+ __sanitizer_syscall_post_impl_mlockall(res, (long long)(flags))
+#define __sanitizer_syscall_pre_munlockall() \
+ __sanitizer_syscall_pre_impl_munlockall()
+#define __sanitizer_syscall_post_munlockall(res) \
+ __sanitizer_syscall_post_impl_munlockall(res)
+#define __sanitizer_syscall_pre_compat_50___sigtimedwait(set, info, timeout) \
+ __sanitizer_syscall_pre_impl_compat_50___sigtimedwait( \
+ (long long)(set), (long long)(info), (long long)(timeout))
+#define __sanitizer_syscall_post_compat_50___sigtimedwait(res, set, info, \
+ timeout) \
+ __sanitizer_syscall_post_impl_compat_50___sigtimedwait( \
+ res, (long long)(set), (long long)(info), (long long)(timeout))
+#define __sanitizer_syscall_pre_sigqueueinfo(pid, info) \
+ __sanitizer_syscall_pre_impl_sigqueueinfo((long long)(pid), (long long)(info))
+#define __sanitizer_syscall_post_sigqueueinfo(res, pid, info) \
+ __sanitizer_syscall_post_impl_sigqueueinfo(res, (long long)(pid), \
+ (long long)(info))
+#define __sanitizer_syscall_pre_modctl(cmd, arg) \
+ __sanitizer_syscall_pre_impl_modctl((long long)(cmd), (long long)(arg))
+#define __sanitizer_syscall_post_modctl(res, cmd, arg) \
+ __sanitizer_syscall_post_impl_modctl(res, (long long)(cmd), (long long)(arg))
+#define __sanitizer_syscall_pre__ksem_init(value, idp) \
+ __sanitizer_syscall_pre_impl__ksem_init((long long)(value), (long long)(idp))
+#define __sanitizer_syscall_post__ksem_init(res, value, idp) \
+ __sanitizer_syscall_post_impl__ksem_init(res, (long long)(value), \
+ (long long)(idp))
+#define __sanitizer_syscall_pre__ksem_open(name, oflag, mode, value, idp) \
+ __sanitizer_syscall_pre_impl__ksem_open( \
+ (long long)(name), (long long)(oflag), (long long)(mode), \
+ (long long)(value), (long long)(idp))
+#define __sanitizer_syscall_post__ksem_open(res, name, oflag, mode, value, \
+ idp) \
+ __sanitizer_syscall_post_impl__ksem_open( \
+ res, (long long)(name), (long long)(oflag), (long long)(mode), \
+ (long long)(value), (long long)(idp))
+#define __sanitizer_syscall_pre__ksem_unlink(name) \
+ __sanitizer_syscall_pre_impl__ksem_unlink((long long)(name))
+#define __sanitizer_syscall_post__ksem_unlink(res, name) \
+ __sanitizer_syscall_post_impl__ksem_unlink(res, (long long)(name))
+#define __sanitizer_syscall_pre__ksem_close(id) \
+ __sanitizer_syscall_pre_impl__ksem_close((long long)(id))
+#define __sanitizer_syscall_post__ksem_close(res, id) \
+ __sanitizer_syscall_post_impl__ksem_close(res, (long long)(id))
+#define __sanitizer_syscall_pre__ksem_post(id) \
+ __sanitizer_syscall_pre_impl__ksem_post((long long)(id))
+#define __sanitizer_syscall_post__ksem_post(res, id) \
+ __sanitizer_syscall_post_impl__ksem_post(res, (long long)(id))
+#define __sanitizer_syscall_pre__ksem_wait(id) \
+ __sanitizer_syscall_pre_impl__ksem_wait((long long)(id))
+#define __sanitizer_syscall_post__ksem_wait(res, id) \
+ __sanitizer_syscall_post_impl__ksem_wait(res, (long long)(id))
+#define __sanitizer_syscall_pre__ksem_trywait(id) \
+ __sanitizer_syscall_pre_impl__ksem_trywait((long long)(id))
+#define __sanitizer_syscall_post__ksem_trywait(res, id) \
+ __sanitizer_syscall_post_impl__ksem_trywait(res, (long long)(id))
+#define __sanitizer_syscall_pre__ksem_getvalue(id, value) \
+ __sanitizer_syscall_pre_impl__ksem_getvalue((long long)(id), \
+ (long long)(value))
+#define __sanitizer_syscall_post__ksem_getvalue(res, id, value) \
+ __sanitizer_syscall_post_impl__ksem_getvalue(res, (long long)(id), \
+ (long long)(value))
+#define __sanitizer_syscall_pre__ksem_destroy(id) \
+ __sanitizer_syscall_pre_impl__ksem_destroy((long long)(id))
+#define __sanitizer_syscall_post__ksem_destroy(res, id) \
+ __sanitizer_syscall_post_impl__ksem_destroy(res, (long long)(id))
+#define __sanitizer_syscall_pre__ksem_timedwait(id, abstime) \
+ __sanitizer_syscall_pre_impl__ksem_timedwait((long long)(id), \
+ (long long)(abstime))
+#define __sanitizer_syscall_post__ksem_timedwait(res, id, abstime) \
+ __sanitizer_syscall_post_impl__ksem_timedwait(res, (long long)(id), \
+ (long long)(abstime))
+#define __sanitizer_syscall_pre_mq_open(name, oflag, mode, attr) \
+ __sanitizer_syscall_pre_impl_mq_open((long long)(name), (long long)(oflag), \
+ (long long)(mode), (long long)(attr))
+#define __sanitizer_syscall_post_mq_open(res, name, oflag, mode, attr) \
+ __sanitizer_syscall_post_impl_mq_open(res, (long long)(name), \
+ (long long)(oflag), (long long)(mode), \
+ (long long)(attr))
+#define __sanitizer_syscall_pre_mq_close(mqdes) \
+ __sanitizer_syscall_pre_impl_mq_close((long long)(mqdes))
+#define __sanitizer_syscall_post_mq_close(res, mqdes) \
+ __sanitizer_syscall_post_impl_mq_close(res, (long long)(mqdes))
+#define __sanitizer_syscall_pre_mq_unlink(name) \
+ __sanitizer_syscall_pre_impl_mq_unlink((long long)(name))
+#define __sanitizer_syscall_post_mq_unlink(res, name) \
+ __sanitizer_syscall_post_impl_mq_unlink(res, (long long)(name))
+#define __sanitizer_syscall_pre_mq_getattr(mqdes, mqstat) \
+ __sanitizer_syscall_pre_impl_mq_getattr((long long)(mqdes), \
+ (long long)(mqstat))
+#define __sanitizer_syscall_post_mq_getattr(res, mqdes, mqstat) \
+ __sanitizer_syscall_post_impl_mq_getattr(res, (long long)(mqdes), \
+ (long long)(mqstat))
+#define __sanitizer_syscall_pre_mq_setattr(mqdes, mqstat, omqstat) \
+ __sanitizer_syscall_pre_impl_mq_setattr( \
+ (long long)(mqdes), (long long)(mqstat), (long long)(omqstat))
+#define __sanitizer_syscall_post_mq_setattr(res, mqdes, mqstat, omqstat) \
+ __sanitizer_syscall_post_impl_mq_setattr( \
+ res, (long long)(mqdes), (long long)(mqstat), (long long)(omqstat))
+#define __sanitizer_syscall_pre_mq_notify(mqdes, notification) \
+ __sanitizer_syscall_pre_impl_mq_notify((long long)(mqdes), \
+ (long long)(notification))
+#define __sanitizer_syscall_post_mq_notify(res, mqdes, notification) \
+ __sanitizer_syscall_post_impl_mq_notify(res, (long long)(mqdes), \
+ (long long)(notification))
+#define __sanitizer_syscall_pre_mq_send(mqdes, msg_ptr, msg_len, msg_prio) \
+ __sanitizer_syscall_pre_impl_mq_send( \
+ (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio))
+#define __sanitizer_syscall_post_mq_send(res, mqdes, msg_ptr, msg_len, \
+ msg_prio) \
+ __sanitizer_syscall_post_impl_mq_send( \
+ res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio))
+#define __sanitizer_syscall_pre_mq_receive(mqdes, msg_ptr, msg_len, msg_prio) \
+ __sanitizer_syscall_pre_impl_mq_receive( \
+ (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio))
+#define __sanitizer_syscall_post_mq_receive(res, mqdes, msg_ptr, msg_len, \
+ msg_prio) \
+ __sanitizer_syscall_post_impl_mq_receive( \
+ res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio))
+#define __sanitizer_syscall_pre_compat_50_mq_timedsend( \
+ mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \
+ __sanitizer_syscall_pre_impl_compat_50_mq_timedsend( \
+ (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio), (long long)(abs_timeout))
+#define __sanitizer_syscall_post_compat_50_mq_timedsend( \
+ res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \
+ __sanitizer_syscall_post_impl_compat_50_mq_timedsend( \
+ res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio), (long long)(abs_timeout))
+#define __sanitizer_syscall_pre_compat_50_mq_timedreceive( \
+ mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \
+ __sanitizer_syscall_pre_impl_compat_50_mq_timedreceive( \
+ (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio), (long long)(abs_timeout))
+#define __sanitizer_syscall_post_compat_50_mq_timedreceive( \
+ res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \
+ __sanitizer_syscall_post_impl_compat_50_mq_timedreceive( \
+ res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio), (long long)(abs_timeout))
+/* syscall 267 has been skipped */
+/* syscall 268 has been skipped */
+/* syscall 269 has been skipped */
+#define __sanitizer_syscall_pre___posix_rename(from, to) \
+ __sanitizer_syscall_pre_impl___posix_rename((long long)(from), \
+ (long long)(to))
+#define __sanitizer_syscall_post___posix_rename(res, from, to) \
+ __sanitizer_syscall_post_impl___posix_rename(res, (long long)(from), \
+ (long long)(to))
+#define __sanitizer_syscall_pre_swapctl(cmd, arg, misc) \
+ __sanitizer_syscall_pre_impl_swapctl((long long)(cmd), (long long)(arg), \
+ (long long)(misc))
+#define __sanitizer_syscall_post_swapctl(res, cmd, arg, misc) \
+ __sanitizer_syscall_post_impl_swapctl(res, (long long)(cmd), \
+ (long long)(arg), (long long)(misc))
+#define __sanitizer_syscall_pre_compat_30_getdents(fd, buf, count) \
+ __sanitizer_syscall_pre_impl_compat_30_getdents( \
+ (long long)(fd), (long long)(buf), (long long)(count))
+#define __sanitizer_syscall_post_compat_30_getdents(res, fd, buf, count) \
+ __sanitizer_syscall_post_impl_compat_30_getdents( \
+ res, (long long)(fd), (long long)(buf), (long long)(count))
+#define __sanitizer_syscall_pre_minherit(addr, len, inherit) \
+ __sanitizer_syscall_pre_impl_minherit((long long)(addr), (long long)(len), \
+ (long long)(inherit))
+#define __sanitizer_syscall_post_minherit(res, addr, len, inherit) \
+ __sanitizer_syscall_post_impl_minherit( \
+ res, (long long)(addr), (long long)(len), (long long)(inherit))
+#define __sanitizer_syscall_pre_lchmod(path, mode) \
+ __sanitizer_syscall_pre_impl_lchmod((long long)(path), (long long)(mode))
+#define __sanitizer_syscall_post_lchmod(res, path, mode) \
+ __sanitizer_syscall_post_impl_lchmod(res, (long long)(path), \
+ (long long)(mode))
+#define __sanitizer_syscall_pre_lchown(path, uid, gid) \
+ __sanitizer_syscall_pre_impl_lchown((long long)(path), (long long)(uid), \
+ (long long)(gid))
+#define __sanitizer_syscall_post_lchown(res, path, uid, gid) \
+ __sanitizer_syscall_post_impl_lchown(res, (long long)(path), \
+ (long long)(uid), (long long)(gid))
+#define __sanitizer_syscall_pre_compat_50_lutimes(path, tptr) \
+ __sanitizer_syscall_pre_impl_compat_50_lutimes((long long)(path), \
+ (long long)(tptr))
+#define __sanitizer_syscall_post_compat_50_lutimes(res, path, tptr) \
+ __sanitizer_syscall_post_impl_compat_50_lutimes(res, (long long)(path), \
+ (long long)(tptr))
+#define __sanitizer_syscall_pre___msync13(addr, len, flags) \
+ __sanitizer_syscall_pre_impl___msync13((long long)(addr), (long long)(len), \
+ (long long)(flags))
+#define __sanitizer_syscall_post___msync13(res, addr, len, flags) \
+ __sanitizer_syscall_post_impl___msync13( \
+ res, (long long)(addr), (long long)(len), (long long)(flags))
+#define __sanitizer_syscall_pre_compat_30___stat13(path, ub) \
+ __sanitizer_syscall_pre_impl_compat_30___stat13((long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_post_compat_30___stat13(res, path, ub) \
+ __sanitizer_syscall_post_impl_compat_30___stat13(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre_compat_30___fstat13(fd, sb) \
+ __sanitizer_syscall_pre_impl_compat_30___fstat13((long long)(fd), \
+ (long long)(sb))
+#define __sanitizer_syscall_post_compat_30___fstat13(res, fd, sb) \
+ __sanitizer_syscall_post_impl_compat_30___fstat13(res, (long long)(fd), \
+ (long long)(sb))
+#define __sanitizer_syscall_pre_compat_30___lstat13(path, ub) \
+ __sanitizer_syscall_pre_impl_compat_30___lstat13((long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_post_compat_30___lstat13(res, path, ub) \
+ __sanitizer_syscall_post_impl_compat_30___lstat13(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre___sigaltstack14(nss, oss) \
+ __sanitizer_syscall_pre_impl___sigaltstack14((long long)(nss), \
+ (long long)(oss))
+#define __sanitizer_syscall_post___sigaltstack14(res, nss, oss) \
+ __sanitizer_syscall_post_impl___sigaltstack14(res, (long long)(nss), \
+ (long long)(oss))
+#define __sanitizer_syscall_pre___vfork14() \
+ __sanitizer_syscall_pre_impl___vfork14()
+#define __sanitizer_syscall_post___vfork14(res) \
+ __sanitizer_syscall_post_impl___vfork14(res)
+#define __sanitizer_syscall_pre___posix_chown(path, uid, gid) \
+ __sanitizer_syscall_pre_impl___posix_chown( \
+ (long long)(path), (long long)(uid), (long long)(gid))
+#define __sanitizer_syscall_post___posix_chown(res, path, uid, gid) \
+ __sanitizer_syscall_post_impl___posix_chown( \
+ res, (long long)(path), (long long)(uid), (long long)(gid))
+#define __sanitizer_syscall_pre___posix_fchown(fd, uid, gid) \
+ __sanitizer_syscall_pre_impl___posix_fchown( \
+ (long long)(fd), (long long)(uid), (long long)(gid))
+#define __sanitizer_syscall_post___posix_fchown(res, fd, uid, gid) \
+ __sanitizer_syscall_post_impl___posix_fchown( \
+ res, (long long)(fd), (long long)(uid), (long long)(gid))
+#define __sanitizer_syscall_pre___posix_lchown(path, uid, gid) \
+ __sanitizer_syscall_pre_impl___posix_lchown( \
+ (long long)(path), (long long)(uid), (long long)(gid))
+#define __sanitizer_syscall_post___posix_lchown(res, path, uid, gid) \
+ __sanitizer_syscall_post_impl___posix_lchown( \
+ res, (long long)(path), (long long)(uid), (long long)(gid))
+#define __sanitizer_syscall_pre_getsid(pid) \
+ __sanitizer_syscall_pre_impl_getsid((long long)(pid))
+#define __sanitizer_syscall_post_getsid(res, pid) \
+ __sanitizer_syscall_post_impl_getsid(res, (long long)(pid))
+#define __sanitizer_syscall_pre___clone(flags, stack) \
+ __sanitizer_syscall_pre_impl___clone((long long)(flags), (long long)(stack))
+#define __sanitizer_syscall_post___clone(res, flags, stack) \
+ __sanitizer_syscall_post_impl___clone(res, (long long)(flags), \
+ (long long)(stack))
+#define __sanitizer_syscall_pre_fktrace(fd, ops, facs, pid) \
+ __sanitizer_syscall_pre_impl_fktrace((long long)(fd), (long long)(ops), \
+ (long long)(facs), (long long)(pid))
+#define __sanitizer_syscall_post_fktrace(res, fd, ops, facs, pid) \
+ __sanitizer_syscall_post_impl_fktrace(res, (long long)(fd), \
+ (long long)(ops), (long long)(facs), \
+ (long long)(pid))
+#define __sanitizer_syscall_pre_preadv(fd, iovp, iovcnt, PAD, offset) \
+ __sanitizer_syscall_pre_impl_preadv((long long)(fd), (long long)(iovp), \
+ (long long)(iovcnt), (long long)(PAD), \
+ (long long)(offset))
+#define __sanitizer_syscall_post_preadv(res, fd, iovp, iovcnt, PAD, offset) \
+ __sanitizer_syscall_post_impl_preadv(res, (long long)(fd), \
+ (long long)(iovp), (long long)(iovcnt), \
+ (long long)(PAD), (long long)(offset))
+#define __sanitizer_syscall_pre_pwritev(fd, iovp, iovcnt, PAD, offset) \
+ __sanitizer_syscall_pre_impl_pwritev((long long)(fd), (long long)(iovp), \
+ (long long)(iovcnt), (long long)(PAD), \
+ (long long)(offset))
+#define __sanitizer_syscall_post_pwritev(res, fd, iovp, iovcnt, PAD, offset) \
+ __sanitizer_syscall_post_impl_pwritev( \
+ res, (long long)(fd), (long long)(iovp), (long long)(iovcnt), \
+ (long long)(PAD), (long long)(offset))
+#define __sanitizer_syscall_pre_compat_16___sigaction14(signum, nsa, osa) \
+ __sanitizer_syscall_pre_impl_compat_16___sigaction14( \
+ (long long)(signum), (long long)(nsa), (long long)(osa))
+#define __sanitizer_syscall_post_compat_16___sigaction14(res, signum, nsa, \
+ osa) \
+ __sanitizer_syscall_post_impl_compat_16___sigaction14( \
+ res, (long long)(signum), (long long)(nsa), (long long)(osa))
+#define __sanitizer_syscall_pre___sigpending14(set) \
+ __sanitizer_syscall_pre_impl___sigpending14((long long)(set))
+#define __sanitizer_syscall_post___sigpending14(res, set) \
+ __sanitizer_syscall_post_impl___sigpending14(res, (long long)(set))
+#define __sanitizer_syscall_pre___sigprocmask14(how, set, oset) \
+ __sanitizer_syscall_pre_impl___sigprocmask14( \
+ (long long)(how), (long long)(set), (long long)(oset))
+#define __sanitizer_syscall_post___sigprocmask14(res, how, set, oset) \
+ __sanitizer_syscall_post_impl___sigprocmask14( \
+ res, (long long)(how), (long long)(set), (long long)(oset))
+#define __sanitizer_syscall_pre___sigsuspend14(set) \
+ __sanitizer_syscall_pre_impl___sigsuspend14((long long)(set))
+#define __sanitizer_syscall_post___sigsuspend14(res, set) \
+ __sanitizer_syscall_post_impl___sigsuspend14(res, (long long)(set))
+#define __sanitizer_syscall_pre_compat_16___sigreturn14(sigcntxp) \
+ __sanitizer_syscall_pre_impl_compat_16___sigreturn14((long long)(sigcntxp))
+#define __sanitizer_syscall_post_compat_16___sigreturn14(res, sigcntxp) \
+ __sanitizer_syscall_post_impl_compat_16___sigreturn14(res, \
+ (long long)(sigcntxp))
+#define __sanitizer_syscall_pre___getcwd(bufp, length) \
+ __sanitizer_syscall_pre_impl___getcwd((long long)(bufp), (long long)(length))
+#define __sanitizer_syscall_post___getcwd(res, bufp, length) \
+ __sanitizer_syscall_post_impl___getcwd(res, (long long)(bufp), \
+ (long long)(length))
+#define __sanitizer_syscall_pre_fchroot(fd) \
+ __sanitizer_syscall_pre_impl_fchroot((long long)(fd))
+#define __sanitizer_syscall_post_fchroot(res, fd) \
+ __sanitizer_syscall_post_impl_fchroot(res, (long long)(fd))
+#define __sanitizer_syscall_pre_compat_30_fhopen(fhp, flags) \
+ __sanitizer_syscall_pre_impl_compat_30_fhopen((long long)(fhp), \
+ (long long)(flags))
+#define __sanitizer_syscall_post_compat_30_fhopen(res, fhp, flags) \
+ __sanitizer_syscall_post_impl_compat_30_fhopen(res, (long long)(fhp), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_compat_30_fhstat(fhp, sb) \
+ __sanitizer_syscall_pre_impl_compat_30_fhstat((long long)(fhp), \
+ (long long)(sb))
+#define __sanitizer_syscall_post_compat_30_fhstat(res, fhp, sb) \
+ __sanitizer_syscall_post_impl_compat_30_fhstat(res, (long long)(fhp), \
+ (long long)(sb))
+#define __sanitizer_syscall_pre_compat_20_fhstatfs(fhp, buf) \
+ __sanitizer_syscall_pre_impl_compat_20_fhstatfs((long long)(fhp), \
+ (long long)(buf))
+#define __sanitizer_syscall_post_compat_20_fhstatfs(res, fhp, buf) \
+ __sanitizer_syscall_post_impl_compat_20_fhstatfs(res, (long long)(fhp), \
+ (long long)(buf))
+#define __sanitizer_syscall_pre_compat_50_____semctl13(semid, semnum, cmd, \
+ arg) \
+ __sanitizer_syscall_pre_impl_compat_50_____semctl13( \
+ (long long)(semid), (long long)(semnum), (long long)(cmd), \
+ (long long)(arg))
+#define __sanitizer_syscall_post_compat_50_____semctl13(res, semid, semnum, \
+ cmd, arg) \
+ __sanitizer_syscall_post_impl_compat_50_____semctl13( \
+ res, (long long)(semid), (long long)(semnum), (long long)(cmd), \
+ (long long)(arg))
+#define __sanitizer_syscall_pre_compat_50___msgctl13(msqid, cmd, buf) \
+ __sanitizer_syscall_pre_impl_compat_50___msgctl13( \
+ (long long)(msqid), (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_post_compat_50___msgctl13(res, msqid, cmd, buf) \
+ __sanitizer_syscall_post_impl_compat_50___msgctl13( \
+ res, (long long)(msqid), (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_pre_compat_50___shmctl13(shmid, cmd, buf) \
+ __sanitizer_syscall_pre_impl_compat_50___shmctl13( \
+ (long long)(shmid), (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_post_compat_50___shmctl13(res, shmid, cmd, buf) \
+ __sanitizer_syscall_post_impl_compat_50___shmctl13( \
+ res, (long long)(shmid), (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_pre_lchflags(path, flags) \
+ __sanitizer_syscall_pre_impl_lchflags((long long)(path), (long long)(flags))
+#define __sanitizer_syscall_post_lchflags(res, path, flags) \
+ __sanitizer_syscall_post_impl_lchflags(res, (long long)(path), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_issetugid() \
+ __sanitizer_syscall_pre_impl_issetugid()
+#define __sanitizer_syscall_post_issetugid(res) \
+ __sanitizer_syscall_post_impl_issetugid(res)
+#define __sanitizer_syscall_pre_utrace(label, addr, len) \
+ __sanitizer_syscall_pre_impl_utrace((long long)(label), (long long)(addr), \
+ (long long)(len))
+#define __sanitizer_syscall_post_utrace(res, label, addr, len) \
+ __sanitizer_syscall_post_impl_utrace(res, (long long)(label), \
+ (long long)(addr), (long long)(len))
+#define __sanitizer_syscall_pre_getcontext(ucp) \
+ __sanitizer_syscall_pre_impl_getcontext((long long)(ucp))
+#define __sanitizer_syscall_post_getcontext(res, ucp) \
+ __sanitizer_syscall_post_impl_getcontext(res, (long long)(ucp))
+#define __sanitizer_syscall_pre_setcontext(ucp) \
+ __sanitizer_syscall_pre_impl_setcontext((long long)(ucp))
+#define __sanitizer_syscall_post_setcontext(res, ucp) \
+ __sanitizer_syscall_post_impl_setcontext(res, (long long)(ucp))
+#define __sanitizer_syscall_pre__lwp_create(ucp, flags, new_lwp) \
+ __sanitizer_syscall_pre_impl__lwp_create( \
+ (long long)(ucp), (long long)(flags), (long long)(new_lwp))
+#define __sanitizer_syscall_post__lwp_create(res, ucp, flags, new_lwp) \
+ __sanitizer_syscall_post_impl__lwp_create( \
+ res, (long long)(ucp), (long long)(flags), (long long)(new_lwp))
+#define __sanitizer_syscall_pre__lwp_exit() \
+ __sanitizer_syscall_pre_impl__lwp_exit()
+#define __sanitizer_syscall_post__lwp_exit(res) \
+ __sanitizer_syscall_post_impl__lwp_exit(res)
+#define __sanitizer_syscall_pre__lwp_self() \
+ __sanitizer_syscall_pre_impl__lwp_self()
+#define __sanitizer_syscall_post__lwp_self(res) \
+ __sanitizer_syscall_post_impl__lwp_self(res)
+#define __sanitizer_syscall_pre__lwp_wait(wait_for, departed) \
+ __sanitizer_syscall_pre_impl__lwp_wait((long long)(wait_for), \
+ (long long)(departed))
+#define __sanitizer_syscall_post__lwp_wait(res, wait_for, departed) \
+ __sanitizer_syscall_post_impl__lwp_wait(res, (long long)(wait_for), \
+ (long long)(departed))
+#define __sanitizer_syscall_pre__lwp_suspend(target) \
+ __sanitizer_syscall_pre_impl__lwp_suspend((long long)(target))
+#define __sanitizer_syscall_post__lwp_suspend(res, target) \
+ __sanitizer_syscall_post_impl__lwp_suspend(res, (long long)(target))
+#define __sanitizer_syscall_pre__lwp_continue(target) \
+ __sanitizer_syscall_pre_impl__lwp_continue((long long)(target))
+#define __sanitizer_syscall_post__lwp_continue(res, target) \
+ __sanitizer_syscall_post_impl__lwp_continue(res, (long long)(target))
+#define __sanitizer_syscall_pre__lwp_wakeup(target) \
+ __sanitizer_syscall_pre_impl__lwp_wakeup((long long)(target))
+#define __sanitizer_syscall_post__lwp_wakeup(res, target) \
+ __sanitizer_syscall_post_impl__lwp_wakeup(res, (long long)(target))
+#define __sanitizer_syscall_pre__lwp_getprivate() \
+ __sanitizer_syscall_pre_impl__lwp_getprivate()
+#define __sanitizer_syscall_post__lwp_getprivate(res) \
+ __sanitizer_syscall_post_impl__lwp_getprivate(res)
+#define __sanitizer_syscall_pre__lwp_setprivate(ptr) \
+ __sanitizer_syscall_pre_impl__lwp_setprivate((long long)(ptr))
+#define __sanitizer_syscall_post__lwp_setprivate(res, ptr) \
+ __sanitizer_syscall_post_impl__lwp_setprivate(res, (long long)(ptr))
+#define __sanitizer_syscall_pre__lwp_kill(target, signo) \
+ __sanitizer_syscall_pre_impl__lwp_kill((long long)(target), \
+ (long long)(signo))
+#define __sanitizer_syscall_post__lwp_kill(res, target, signo) \
+ __sanitizer_syscall_post_impl__lwp_kill(res, (long long)(target), \
+ (long long)(signo))
+#define __sanitizer_syscall_pre__lwp_detach(target) \
+ __sanitizer_syscall_pre_impl__lwp_detach((long long)(target))
+#define __sanitizer_syscall_post__lwp_detach(res, target) \
+ __sanitizer_syscall_post_impl__lwp_detach(res, (long long)(target))
+#define __sanitizer_syscall_pre_compat_50__lwp_park(ts, unpark, hint, \
+ unparkhint) \
+ __sanitizer_syscall_pre_impl_compat_50__lwp_park( \
+ (long long)(ts), (long long)(unpark), (long long)(hint), \
+ (long long)(unparkhint))
+#define __sanitizer_syscall_post_compat_50__lwp_park(res, ts, unpark, hint, \
+ unparkhint) \
+ __sanitizer_syscall_post_impl_compat_50__lwp_park( \
+ res, (long long)(ts), (long long)(unpark), (long long)(hint), \
+ (long long)(unparkhint))
+#define __sanitizer_syscall_pre__lwp_unpark(target, hint) \
+ __sanitizer_syscall_pre_impl__lwp_unpark((long long)(target), \
+ (long long)(hint))
+#define __sanitizer_syscall_post__lwp_unpark(res, target, hint) \
+ __sanitizer_syscall_post_impl__lwp_unpark(res, (long long)(target), \
+ (long long)(hint))
+#define __sanitizer_syscall_pre__lwp_unpark_all(targets, ntargets, hint) \
+ __sanitizer_syscall_pre_impl__lwp_unpark_all( \
+ (long long)(targets), (long long)(ntargets), (long long)(hint))
+#define __sanitizer_syscall_post__lwp_unpark_all(res, targets, ntargets, hint) \
+ __sanitizer_syscall_post_impl__lwp_unpark_all( \
+ res, (long long)(targets), (long long)(ntargets), (long long)(hint))
+#define __sanitizer_syscall_pre__lwp_setname(target, name) \
+ __sanitizer_syscall_pre_impl__lwp_setname((long long)(target), \
+ (long long)(name))
+#define __sanitizer_syscall_post__lwp_setname(res, target, name) \
+ __sanitizer_syscall_post_impl__lwp_setname(res, (long long)(target), \
+ (long long)(name))
+#define __sanitizer_syscall_pre__lwp_getname(target, name, len) \
+ __sanitizer_syscall_pre_impl__lwp_getname( \
+ (long long)(target), (long long)(name), (long long)(len))
+#define __sanitizer_syscall_post__lwp_getname(res, target, name, len) \
+ __sanitizer_syscall_post_impl__lwp_getname( \
+ res, (long long)(target), (long long)(name), (long long)(len))
+#define __sanitizer_syscall_pre__lwp_ctl(features, address) \
+ __sanitizer_syscall_pre_impl__lwp_ctl((long long)(features), \
+ (long long)(address))
+#define __sanitizer_syscall_post__lwp_ctl(res, features, address) \
+ __sanitizer_syscall_post_impl__lwp_ctl(res, (long long)(features), \
+ (long long)(address))
+/* syscall 326 has been skipped */
+/* syscall 327 has been skipped */
+/* syscall 328 has been skipped */
+/* syscall 329 has been skipped */
+#define __sanitizer_syscall_pre_compat_60_sa_register(newv, oldv, flags, \
+ stackinfo_offset) \
+ __sanitizer_syscall_pre_impl_compat_60_sa_register( \
+ (long long)(newv), (long long)(oldv), (long long)(flags), \
+ (long long)(stackinfo_offset))
+#define __sanitizer_syscall_post_compat_60_sa_register(res, newv, oldv, flags, \
+ stackinfo_offset) \
+ __sanitizer_syscall_post_impl_compat_60_sa_register( \
+ res, (long long)(newv), (long long)(oldv), (long long)(flags), \
+ (long long)(stackinfo_offset))
+#define __sanitizer_syscall_pre_compat_60_sa_stacks(num, stacks) \
+ __sanitizer_syscall_pre_impl_compat_60_sa_stacks((long long)(num), \
+ (long long)(stacks))
+#define __sanitizer_syscall_post_compat_60_sa_stacks(res, num, stacks) \
+ __sanitizer_syscall_post_impl_compat_60_sa_stacks(res, (long long)(num), \
+ (long long)(stacks))
+#define __sanitizer_syscall_pre_compat_60_sa_enable() \
+ __sanitizer_syscall_pre_impl_compat_60_sa_enable()
+#define __sanitizer_syscall_post_compat_60_sa_enable(res) \
+ __sanitizer_syscall_post_impl_compat_60_sa_enable(res)
+#define __sanitizer_syscall_pre_compat_60_sa_setconcurrency(concurrency) \
+ __sanitizer_syscall_pre_impl_compat_60_sa_setconcurrency( \
+ (long long)(concurrency))
+#define __sanitizer_syscall_post_compat_60_sa_setconcurrency(res, concurrency) \
+ __sanitizer_syscall_post_impl_compat_60_sa_setconcurrency( \
+ res, (long long)(concurrency))
+#define __sanitizer_syscall_pre_compat_60_sa_yield() \
+ __sanitizer_syscall_pre_impl_compat_60_sa_yield()
+#define __sanitizer_syscall_post_compat_60_sa_yield(res) \
+ __sanitizer_syscall_post_impl_compat_60_sa_yield(res)
+#define __sanitizer_syscall_pre_compat_60_sa_preempt(sa_id) \
+ __sanitizer_syscall_pre_impl_compat_60_sa_preempt((long long)(sa_id))
+#define __sanitizer_syscall_post_compat_60_sa_preempt(res, sa_id) \
+ __sanitizer_syscall_post_impl_compat_60_sa_preempt(res, (long long)(sa_id))
+/* syscall 336 has been skipped */
+/* syscall 337 has been skipped */
+/* syscall 338 has been skipped */
+/* syscall 339 has been skipped */
+#define __sanitizer_syscall_pre___sigaction_sigtramp(signum, nsa, osa, tramp, \
+ vers) \
+ __sanitizer_syscall_pre_impl___sigaction_sigtramp( \
+ (long long)(signum), (long long)(nsa), (long long)(osa), \
+ (long long)(tramp), (long long)(vers))
+#define __sanitizer_syscall_post___sigaction_sigtramp(res, signum, nsa, osa, \
+ tramp, vers) \
+ __sanitizer_syscall_post_impl___sigaction_sigtramp( \
+ res, (long long)(signum), (long long)(nsa), (long long)(osa), \
+ (long long)(tramp), (long long)(vers))
+/* syscall 341 has been skipped */
+/* syscall 342 has been skipped */
+#define __sanitizer_syscall_pre_rasctl(addr, len, op) \
+ __sanitizer_syscall_pre_impl_rasctl((long long)(addr), (long long)(len), \
+ (long long)(op))
+#define __sanitizer_syscall_post_rasctl(res, addr, len, op) \
+ __sanitizer_syscall_post_impl_rasctl(res, (long long)(addr), \
+ (long long)(len), (long long)(op))
+#define __sanitizer_syscall_pre_kqueue() __sanitizer_syscall_pre_impl_kqueue()
+#define __sanitizer_syscall_post_kqueue(res) \
+ __sanitizer_syscall_post_impl_kqueue(res)
+#define __sanitizer_syscall_pre_compat_50_kevent(fd, changelist, nchanges, \
+ eventlist, nevents, timeout) \
+ __sanitizer_syscall_pre_impl_compat_50_kevent( \
+ (long long)(fd), (long long)(changelist), (long long)(nchanges), \
+ (long long)(eventlist), (long long)(nevents), (long long)(timeout))
+#define __sanitizer_syscall_post_compat_50_kevent( \
+ res, fd, changelist, nchanges, eventlist, nevents, timeout) \
+ __sanitizer_syscall_post_impl_compat_50_kevent( \
+ res, (long long)(fd), (long long)(changelist), (long long)(nchanges), \
+ (long long)(eventlist), (long long)(nevents), (long long)(timeout))
+#define __sanitizer_syscall_pre__sched_setparam(pid, lid, policy, params) \
+ __sanitizer_syscall_pre_impl__sched_setparam( \
+ (long long)(pid), (long long)(lid), (long long)(policy), \
+ (long long)(params))
+#define __sanitizer_syscall_post__sched_setparam(res, pid, lid, policy, \
+ params) \
+ __sanitizer_syscall_post_impl__sched_setparam( \
+ res, (long long)(pid), (long long)(lid), (long long)(policy), \
+ (long long)(params))
+#define __sanitizer_syscall_pre__sched_getparam(pid, lid, policy, params) \
+ __sanitizer_syscall_pre_impl__sched_getparam( \
+ (long long)(pid), (long long)(lid), (long long)(policy), \
+ (long long)(params))
+#define __sanitizer_syscall_post__sched_getparam(res, pid, lid, policy, \
+ params) \
+ __sanitizer_syscall_post_impl__sched_getparam( \
+ res, (long long)(pid), (long long)(lid), (long long)(policy), \
+ (long long)(params))
+#define __sanitizer_syscall_pre__sched_setaffinity(pid, lid, size, cpuset) \
+ __sanitizer_syscall_pre_impl__sched_setaffinity( \
+ (long long)(pid), (long long)(lid), (long long)(size), \
+ (long long)(cpuset))
+#define __sanitizer_syscall_post__sched_setaffinity(res, pid, lid, size, \
+ cpuset) \
+ __sanitizer_syscall_post_impl__sched_setaffinity( \
+ res, (long long)(pid), (long long)(lid), (long long)(size), \
+ (long long)(cpuset))
+#define __sanitizer_syscall_pre__sched_getaffinity(pid, lid, size, cpuset) \
+ __sanitizer_syscall_pre_impl__sched_getaffinity( \
+ (long long)(pid), (long long)(lid), (long long)(size), \
+ (long long)(cpuset))
+#define __sanitizer_syscall_post__sched_getaffinity(res, pid, lid, size, \
+ cpuset) \
+ __sanitizer_syscall_post_impl__sched_getaffinity( \
+ res, (long long)(pid), (long long)(lid), (long long)(size), \
+ (long long)(cpuset))
+#define __sanitizer_syscall_pre_sched_yield() \
+ __sanitizer_syscall_pre_impl_sched_yield()
+#define __sanitizer_syscall_post_sched_yield(res) \
+ __sanitizer_syscall_post_impl_sched_yield(res)
+#define __sanitizer_syscall_pre__sched_protect(priority) \
+ __sanitizer_syscall_pre_impl__sched_protect((long long)(priority))
+#define __sanitizer_syscall_post__sched_protect(res, priority) \
+ __sanitizer_syscall_post_impl__sched_protect(res, (long long)(priority))
+/* syscall 352 has been skipped */
+/* syscall 353 has been skipped */
+#define __sanitizer_syscall_pre_fsync_range(fd, flags, start, length) \
+ __sanitizer_syscall_pre_impl_fsync_range( \
+ (long long)(fd), (long long)(flags), (long long)(start), \
+ (long long)(length))
+#define __sanitizer_syscall_post_fsync_range(res, fd, flags, start, length) \
+ __sanitizer_syscall_post_impl_fsync_range( \
+ res, (long long)(fd), (long long)(flags), (long long)(start), \
+ (long long)(length))
+#define __sanitizer_syscall_pre_uuidgen(store, count) \
+ __sanitizer_syscall_pre_impl_uuidgen((long long)(store), (long long)(count))
+#define __sanitizer_syscall_post_uuidgen(res, store, count) \
+ __sanitizer_syscall_post_impl_uuidgen(res, (long long)(store), \
+ (long long)(count))
+#define __sanitizer_syscall_pre_getvfsstat(buf, bufsize, flags) \
+ __sanitizer_syscall_pre_impl_getvfsstat( \
+ (long long)(buf), (long long)(bufsize), (long long)(flags))
+#define __sanitizer_syscall_post_getvfsstat(res, buf, bufsize, flags) \
+ __sanitizer_syscall_post_impl_getvfsstat( \
+ res, (long long)(buf), (long long)(bufsize), (long long)(flags))
+#define __sanitizer_syscall_pre_statvfs1(path, buf, flags) \
+ __sanitizer_syscall_pre_impl_statvfs1((long long)(path), (long long)(buf), \
+ (long long)(flags))
+#define __sanitizer_syscall_post_statvfs1(res, path, buf, flags) \
+ __sanitizer_syscall_post_impl_statvfs1(res, (long long)(path), \
+ (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_pre_fstatvfs1(fd, buf, flags) \
+ __sanitizer_syscall_pre_impl_fstatvfs1((long long)(fd), (long long)(buf), \
+ (long long)(flags))
+#define __sanitizer_syscall_post_fstatvfs1(res, fd, buf, flags) \
+ __sanitizer_syscall_post_impl_fstatvfs1( \
+ res, (long long)(fd), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_pre_compat_30_fhstatvfs1(fhp, buf, flags) \
+ __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1( \
+ (long long)(fhp), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_post_compat_30_fhstatvfs1(res, fhp, buf, flags) \
+ __sanitizer_syscall_post_impl_compat_30_fhstatvfs1( \
+ res, (long long)(fhp), (long long)(buf), (long long)(flags))
+#define __sanitizer_syscall_pre_extattrctl(path, cmd, filename, attrnamespace, \
+ attrname) \
+ __sanitizer_syscall_pre_impl_extattrctl( \
+ (long long)(path), (long long)(cmd), (long long)(filename), \
+ (long long)(attrnamespace), (long long)(attrname))
+#define __sanitizer_syscall_post_extattrctl(res, path, cmd, filename, \
+ attrnamespace, attrname) \
+ __sanitizer_syscall_post_impl_extattrctl( \
+ res, (long long)(path), (long long)(cmd), (long long)(filename), \
+ (long long)(attrnamespace), (long long)(attrname))
+#define __sanitizer_syscall_pre_extattr_set_file(path, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_pre_impl_extattr_set_file( \
+ (long long)(path), (long long)(attrnamespace), (long long)(attrname), \
+ (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_post_extattr_set_file(res, path, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_post_impl_extattr_set_file( \
+ res, (long long)(path), (long long)(attrnamespace), \
+ (long long)(attrname), (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_pre_extattr_get_file(path, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_pre_impl_extattr_get_file( \
+ (long long)(path), (long long)(attrnamespace), (long long)(attrname), \
+ (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_post_extattr_get_file(res, path, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_post_impl_extattr_get_file( \
+ res, (long long)(path), (long long)(attrnamespace), \
+ (long long)(attrname), (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_pre_extattr_delete_file(path, attrnamespace, \
+ attrname) \
+ __sanitizer_syscall_pre_impl_extattr_delete_file( \
+ (long long)(path), (long long)(attrnamespace), (long long)(attrname))
+#define __sanitizer_syscall_post_extattr_delete_file(res, path, attrnamespace, \
+ attrname) \
+ __sanitizer_syscall_post_impl_extattr_delete_file( \
+ res, (long long)(path), (long long)(attrnamespace), \
+ (long long)(attrname))
+#define __sanitizer_syscall_pre_extattr_set_fd(fd, attrnamespace, attrname, \
+ data, nbytes) \
+ __sanitizer_syscall_pre_impl_extattr_set_fd( \
+ (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \
+ (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_post_extattr_set_fd(res, fd, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_post_impl_extattr_set_fd( \
+ res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \
+ (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_pre_extattr_get_fd(fd, attrnamespace, attrname, \
+ data, nbytes) \
+ __sanitizer_syscall_pre_impl_extattr_get_fd( \
+ (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \
+ (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_post_extattr_get_fd(res, fd, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_post_impl_extattr_get_fd( \
+ res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname), \
+ (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_pre_extattr_delete_fd(fd, attrnamespace, attrname) \
+ __sanitizer_syscall_pre_impl_extattr_delete_fd( \
+ (long long)(fd), (long long)(attrnamespace), (long long)(attrname))
+#define __sanitizer_syscall_post_extattr_delete_fd(res, fd, attrnamespace, \
+ attrname) \
+ __sanitizer_syscall_post_impl_extattr_delete_fd( \
+ res, (long long)(fd), (long long)(attrnamespace), (long long)(attrname))
+#define __sanitizer_syscall_pre_extattr_set_link(path, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_pre_impl_extattr_set_link( \
+ (long long)(path), (long long)(attrnamespace), (long long)(attrname), \
+ (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_post_extattr_set_link(res, path, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_post_impl_extattr_set_link( \
+ res, (long long)(path), (long long)(attrnamespace), \
+ (long long)(attrname), (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_pre_extattr_get_link(path, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_pre_impl_extattr_get_link( \
+ (long long)(path), (long long)(attrnamespace), (long long)(attrname), \
+ (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_post_extattr_get_link(res, path, attrnamespace, \
+ attrname, data, nbytes) \
+ __sanitizer_syscall_post_impl_extattr_get_link( \
+ res, (long long)(path), (long long)(attrnamespace), \
+ (long long)(attrname), (long long)(data), (long long)(nbytes))
+#define __sanitizer_syscall_pre_extattr_delete_link(path, attrnamespace, \
+ attrname) \
+ __sanitizer_syscall_pre_impl_extattr_delete_link( \
+ (long long)(path), (long long)(attrnamespace), (long long)(attrname))
+#define __sanitizer_syscall_post_extattr_delete_link(res, path, attrnamespace, \
+ attrname) \
+ __sanitizer_syscall_post_impl_extattr_delete_link( \
+ res, (long long)(path), (long long)(attrnamespace), \
+ (long long)(attrname))
+#define __sanitizer_syscall_pre_extattr_list_fd(fd, attrnamespace, data, \
+ nbytes) \
+ __sanitizer_syscall_pre_impl_extattr_list_fd( \
+ (long long)(fd), (long long)(attrnamespace), (long long)(data), \
+ (long long)(nbytes))
+#define __sanitizer_syscall_post_extattr_list_fd(res, fd, attrnamespace, data, \
+ nbytes) \
+ __sanitizer_syscall_post_impl_extattr_list_fd( \
+ res, (long long)(fd), (long long)(attrnamespace), (long long)(data), \
+ (long long)(nbytes))
+#define __sanitizer_syscall_pre_extattr_list_file(path, attrnamespace, data, \
+ nbytes) \
+ __sanitizer_syscall_pre_impl_extattr_list_file( \
+ (long long)(path), (long long)(attrnamespace), (long long)(data), \
+ (long long)(nbytes))
+#define __sanitizer_syscall_post_extattr_list_file(res, path, attrnamespace, \
+ data, nbytes) \
+ __sanitizer_syscall_post_impl_extattr_list_file( \
+ res, (long long)(path), (long long)(attrnamespace), (long long)(data), \
+ (long long)(nbytes))
+#define __sanitizer_syscall_pre_extattr_list_link(path, attrnamespace, data, \
+ nbytes) \
+ __sanitizer_syscall_pre_impl_extattr_list_link( \
+ (long long)(path), (long long)(attrnamespace), (long long)(data), \
+ (long long)(nbytes))
+#define __sanitizer_syscall_post_extattr_list_link(res, path, attrnamespace, \
+ data, nbytes) \
+ __sanitizer_syscall_post_impl_extattr_list_link( \
+ res, (long long)(path), (long long)(attrnamespace), (long long)(data), \
+ (long long)(nbytes))
+#define __sanitizer_syscall_pre_compat_50_pselect(nd, in, ou, ex, ts, mask) \
+ __sanitizer_syscall_pre_impl_compat_50_pselect( \
+ (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
+ (long long)(ts), (long long)(mask))
+#define __sanitizer_syscall_post_compat_50_pselect(res, nd, in, ou, ex, ts, \
+ mask) \
+ __sanitizer_syscall_post_impl_compat_50_pselect( \
+ res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
+ (long long)(ts), (long long)(mask))
+#define __sanitizer_syscall_pre_compat_50_pollts(fds, nfds, ts, mask) \
+ __sanitizer_syscall_pre_impl_compat_50_pollts( \
+ (long long)(fds), (long long)(nfds), (long long)(ts), (long long)(mask))
+#define __sanitizer_syscall_post_compat_50_pollts(res, fds, nfds, ts, mask) \
+ __sanitizer_syscall_post_impl_compat_50_pollts( \
+ res, (long long)(fds), (long long)(nfds), (long long)(ts), \
+ (long long)(mask))
+#define __sanitizer_syscall_pre_setxattr(path, name, value, size, flags) \
+ __sanitizer_syscall_pre_impl_setxattr((long long)(path), (long long)(name), \
+ (long long)(value), (long long)(size), \
+ (long long)(flags))
+#define __sanitizer_syscall_post_setxattr(res, path, name, value, size, flags) \
+ __sanitizer_syscall_post_impl_setxattr( \
+ res, (long long)(path), (long long)(name), (long long)(value), \
+ (long long)(size), (long long)(flags))
+#define __sanitizer_syscall_pre_lsetxattr(path, name, value, size, flags) \
+ __sanitizer_syscall_pre_impl_lsetxattr( \
+ (long long)(path), (long long)(name), (long long)(value), \
+ (long long)(size), (long long)(flags))
+#define __sanitizer_syscall_post_lsetxattr(res, path, name, value, size, \
+ flags) \
+ __sanitizer_syscall_post_impl_lsetxattr( \
+ res, (long long)(path), (long long)(name), (long long)(value), \
+ (long long)(size), (long long)(flags))
+#define __sanitizer_syscall_pre_fsetxattr(fd, name, value, size, flags) \
+ __sanitizer_syscall_pre_impl_fsetxattr( \
+ (long long)(fd), (long long)(name), (long long)(value), \
+ (long long)(size), (long long)(flags))
+#define __sanitizer_syscall_post_fsetxattr(res, fd, name, value, size, flags) \
+ __sanitizer_syscall_post_impl_fsetxattr( \
+ res, (long long)(fd), (long long)(name), (long long)(value), \
+ (long long)(size), (long long)(flags))
+#define __sanitizer_syscall_pre_getxattr(path, name, value, size) \
+ __sanitizer_syscall_pre_impl_getxattr((long long)(path), (long long)(name), \
+ (long long)(value), (long long)(size))
+#define __sanitizer_syscall_post_getxattr(res, path, name, value, size) \
+ __sanitizer_syscall_post_impl_getxattr( \
+ res, (long long)(path), (long long)(name), (long long)(value), \
+ (long long)(size))
+#define __sanitizer_syscall_pre_lgetxattr(path, name, value, size) \
+ __sanitizer_syscall_pre_impl_lgetxattr((long long)(path), (long long)(name), \
+ (long long)(value), \
+ (long long)(size))
+#define __sanitizer_syscall_post_lgetxattr(res, path, name, value, size) \
+ __sanitizer_syscall_post_impl_lgetxattr( \
+ res, (long long)(path), (long long)(name), (long long)(value), \
+ (long long)(size))
+#define __sanitizer_syscall_pre_fgetxattr(fd, name, value, size) \
+ __sanitizer_syscall_pre_impl_fgetxattr((long long)(fd), (long long)(name), \
+ (long long)(value), \
+ (long long)(size))
+#define __sanitizer_syscall_post_fgetxattr(res, fd, name, value, size) \
+ __sanitizer_syscall_post_impl_fgetxattr( \
+ res, (long long)(fd), (long long)(name), (long long)(value), \
+ (long long)(size))
+#define __sanitizer_syscall_pre_listxattr(path, list, size) \
+ __sanitizer_syscall_pre_impl_listxattr((long long)(path), (long long)(list), \
+ (long long)(size))
+#define __sanitizer_syscall_post_listxattr(res, path, list, size) \
+ __sanitizer_syscall_post_impl_listxattr( \
+ res, (long long)(path), (long long)(list), (long long)(size))
+#define __sanitizer_syscall_pre_llistxattr(path, list, size) \
+ __sanitizer_syscall_pre_impl_llistxattr( \
+ (long long)(path), (long long)(list), (long long)(size))
+#define __sanitizer_syscall_post_llistxattr(res, path, list, size) \
+ __sanitizer_syscall_post_impl_llistxattr( \
+ res, (long long)(path), (long long)(list), (long long)(size))
+#define __sanitizer_syscall_pre_flistxattr(fd, list, size) \
+ __sanitizer_syscall_pre_impl_flistxattr((long long)(fd), (long long)(list), \
+ (long long)(size))
+#define __sanitizer_syscall_post_flistxattr(res, fd, list, size) \
+ __sanitizer_syscall_post_impl_flistxattr( \
+ res, (long long)(fd), (long long)(list), (long long)(size))
+#define __sanitizer_syscall_pre_removexattr(path, name) \
+ __sanitizer_syscall_pre_impl_removexattr((long long)(path), (long long)(name))
+#define __sanitizer_syscall_post_removexattr(res, path, name) \
+ __sanitizer_syscall_post_impl_removexattr(res, (long long)(path), \
+ (long long)(name))
+#define __sanitizer_syscall_pre_lremovexattr(path, name) \
+ __sanitizer_syscall_pre_impl_lremovexattr((long long)(path), \
+ (long long)(name))
+#define __sanitizer_syscall_post_lremovexattr(res, path, name) \
+ __sanitizer_syscall_post_impl_lremovexattr(res, (long long)(path), \
+ (long long)(name))
+#define __sanitizer_syscall_pre_fremovexattr(fd, name) \
+ __sanitizer_syscall_pre_impl_fremovexattr((long long)(fd), (long long)(name))
+#define __sanitizer_syscall_post_fremovexattr(res, fd, name) \
+ __sanitizer_syscall_post_impl_fremovexattr(res, (long long)(fd), \
+ (long long)(name))
+#define __sanitizer_syscall_pre_compat_50___stat30(path, ub) \
+ __sanitizer_syscall_pre_impl_compat_50___stat30((long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_post_compat_50___stat30(res, path, ub) \
+ __sanitizer_syscall_post_impl_compat_50___stat30(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre_compat_50___fstat30(fd, sb) \
+ __sanitizer_syscall_pre_impl_compat_50___fstat30((long long)(fd), \
+ (long long)(sb))
+#define __sanitizer_syscall_post_compat_50___fstat30(res, fd, sb) \
+ __sanitizer_syscall_post_impl_compat_50___fstat30(res, (long long)(fd), \
+ (long long)(sb))
+#define __sanitizer_syscall_pre_compat_50___lstat30(path, ub) \
+ __sanitizer_syscall_pre_impl_compat_50___lstat30((long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_post_compat_50___lstat30(res, path, ub) \
+ __sanitizer_syscall_post_impl_compat_50___lstat30(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre___getdents30(fd, buf, count) \
+ __sanitizer_syscall_pre_impl___getdents30((long long)(fd), (long long)(buf), \
+ (long long)(count))
+#define __sanitizer_syscall_post___getdents30(res, fd, buf, count) \
+ __sanitizer_syscall_post_impl___getdents30( \
+ res, (long long)(fd), (long long)(buf), (long long)(count))
+#define __sanitizer_syscall_pre_posix_fadvise() \
+ __sanitizer_syscall_pre_impl_posix_fadvise((long long)())
+#define __sanitizer_syscall_post_posix_fadvise(res) \
+ __sanitizer_syscall_post_impl_posix_fadvise(res, (long long)())
+#define __sanitizer_syscall_pre_compat_30___fhstat30(fhp, sb) \
+ __sanitizer_syscall_pre_impl_compat_30___fhstat30((long long)(fhp), \
+ (long long)(sb))
+#define __sanitizer_syscall_post_compat_30___fhstat30(res, fhp, sb) \
+ __sanitizer_syscall_post_impl_compat_30___fhstat30(res, (long long)(fhp), \
+ (long long)(sb))
+#define __sanitizer_syscall_pre_compat_50___ntp_gettime30(ntvp) \
+ __sanitizer_syscall_pre_impl_compat_50___ntp_gettime30((long long)(ntvp))
+#define __sanitizer_syscall_post_compat_50___ntp_gettime30(res, ntvp) \
+ __sanitizer_syscall_post_impl_compat_50___ntp_gettime30(res, \
+ (long long)(ntvp))
+#define __sanitizer_syscall_pre___socket30(domain, type, protocol) \
+ __sanitizer_syscall_pre_impl___socket30( \
+ (long long)(domain), (long long)(type), (long long)(protocol))
+#define __sanitizer_syscall_post___socket30(res, domain, type, protocol) \
+ __sanitizer_syscall_post_impl___socket30( \
+ res, (long long)(domain), (long long)(type), (long long)(protocol))
+#define __sanitizer_syscall_pre___getfh30(fname, fhp, fh_size) \
+ __sanitizer_syscall_pre_impl___getfh30((long long)(fname), (long long)(fhp), \
+ (long long)(fh_size))
+#define __sanitizer_syscall_post___getfh30(res, fname, fhp, fh_size) \
+ __sanitizer_syscall_post_impl___getfh30( \
+ res, (long long)(fname), (long long)(fhp), (long long)(fh_size))
+#define __sanitizer_syscall_pre___fhopen40(fhp, fh_size, flags) \
+ __sanitizer_syscall_pre_impl___fhopen40( \
+ (long long)(fhp), (long long)(fh_size), (long long)(flags))
+#define __sanitizer_syscall_post___fhopen40(res, fhp, fh_size, flags) \
+ __sanitizer_syscall_post_impl___fhopen40( \
+ res, (long long)(fhp), (long long)(fh_size), (long long)(flags))
+#define __sanitizer_syscall_pre___fhstatvfs140(fhp, fh_size, buf, flags) \
+ __sanitizer_syscall_pre_impl___fhstatvfs140( \
+ (long long)(fhp), (long long)(fh_size), (long long)(buf), \
+ (long long)(flags))
+#define __sanitizer_syscall_post___fhstatvfs140(res, fhp, fh_size, buf, flags) \
+ __sanitizer_syscall_post_impl___fhstatvfs140( \
+ res, (long long)(fhp), (long long)(fh_size), (long long)(buf), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_compat_50___fhstat40(fhp, fh_size, sb) \
+ __sanitizer_syscall_pre_impl_compat_50___fhstat40( \
+ (long long)(fhp), (long long)(fh_size), (long long)(sb))
+#define __sanitizer_syscall_post_compat_50___fhstat40(res, fhp, fh_size, sb) \
+ __sanitizer_syscall_post_impl_compat_50___fhstat40( \
+ res, (long long)(fhp), (long long)(fh_size), (long long)(sb))
+#define __sanitizer_syscall_pre_aio_cancel(fildes, aiocbp) \
+ __sanitizer_syscall_pre_impl_aio_cancel((long long)(fildes), \
+ (long long)(aiocbp))
+#define __sanitizer_syscall_post_aio_cancel(res, fildes, aiocbp) \
+ __sanitizer_syscall_post_impl_aio_cancel(res, (long long)(fildes), \
+ (long long)(aiocbp))
+#define __sanitizer_syscall_pre_aio_error(aiocbp) \
+ __sanitizer_syscall_pre_impl_aio_error((long long)(aiocbp))
+#define __sanitizer_syscall_post_aio_error(res, aiocbp) \
+ __sanitizer_syscall_post_impl_aio_error(res, (long long)(aiocbp))
+#define __sanitizer_syscall_pre_aio_fsync(op, aiocbp) \
+ __sanitizer_syscall_pre_impl_aio_fsync((long long)(op), (long long)(aiocbp))
+#define __sanitizer_syscall_post_aio_fsync(res, op, aiocbp) \
+ __sanitizer_syscall_post_impl_aio_fsync(res, (long long)(op), \
+ (long long)(aiocbp))
+#define __sanitizer_syscall_pre_aio_read(aiocbp) \
+ __sanitizer_syscall_pre_impl_aio_read((long long)(aiocbp))
+#define __sanitizer_syscall_post_aio_read(res, aiocbp) \
+ __sanitizer_syscall_post_impl_aio_read(res, (long long)(aiocbp))
+#define __sanitizer_syscall_pre_aio_return(aiocbp) \
+ __sanitizer_syscall_pre_impl_aio_return((long long)(aiocbp))
+#define __sanitizer_syscall_post_aio_return(res, aiocbp) \
+ __sanitizer_syscall_post_impl_aio_return(res, (long long)(aiocbp))
+#define __sanitizer_syscall_pre_compat_50_aio_suspend(list, nent, timeout) \
+ __sanitizer_syscall_pre_impl_compat_50_aio_suspend( \
+ (long long)(list), (long long)(nent), (long long)(timeout))
+#define __sanitizer_syscall_post_compat_50_aio_suspend(res, list, nent, \
+ timeout) \
+ __sanitizer_syscall_post_impl_compat_50_aio_suspend( \
+ res, (long long)(list), (long long)(nent), (long long)(timeout))
+#define __sanitizer_syscall_pre_aio_write(aiocbp) \
+ __sanitizer_syscall_pre_impl_aio_write((long long)(aiocbp))
+#define __sanitizer_syscall_post_aio_write(res, aiocbp) \
+ __sanitizer_syscall_post_impl_aio_write(res, (long long)(aiocbp))
+#define __sanitizer_syscall_pre_lio_listio(mode, list, nent, sig) \
+ __sanitizer_syscall_pre_impl_lio_listio((long long)(mode), \
+ (long long)(list), \
+ (long long)(nent), (long long)(sig))
+#define __sanitizer_syscall_post_lio_listio(res, mode, list, nent, sig) \
+ __sanitizer_syscall_post_impl_lio_listio( \
+ res, (long long)(mode), (long long)(list), (long long)(nent), \
+ (long long)(sig))
+/* syscall 407 has been skipped */
+/* syscall 408 has been skipped */
+/* syscall 409 has been skipped */
+#define __sanitizer_syscall_pre___mount50(type, path, flags, data, data_len) \
+ __sanitizer_syscall_pre_impl___mount50( \
+ (long long)(type), (long long)(path), (long long)(flags), \
+ (long long)(data), (long long)(data_len))
+#define __sanitizer_syscall_post___mount50(res, type, path, flags, data, \
+ data_len) \
+ __sanitizer_syscall_post_impl___mount50( \
+ res, (long long)(type), (long long)(path), (long long)(flags), \
+ (long long)(data), (long long)(data_len))
+#define __sanitizer_syscall_pre_mremap(old_address, old_size, new_address, \
+ new_size, flags) \
+ __sanitizer_syscall_pre_impl_mremap( \
+ (long long)(old_address), (long long)(old_size), \
+ (long long)(new_address), (long long)(new_size), (long long)(flags))
+#define __sanitizer_syscall_post_mremap(res, old_address, old_size, \
+ new_address, new_size, flags) \
+ __sanitizer_syscall_post_impl_mremap( \
+ res, (long long)(old_address), (long long)(old_size), \
+ (long long)(new_address), (long long)(new_size), (long long)(flags))
+#define __sanitizer_syscall_pre_pset_create(psid) \
+ __sanitizer_syscall_pre_impl_pset_create((long long)(psid))
+#define __sanitizer_syscall_post_pset_create(res, psid) \
+ __sanitizer_syscall_post_impl_pset_create(res, (long long)(psid))
+#define __sanitizer_syscall_pre_pset_destroy(psid) \
+ __sanitizer_syscall_pre_impl_pset_destroy((long long)(psid))
+#define __sanitizer_syscall_post_pset_destroy(res, psid) \
+ __sanitizer_syscall_post_impl_pset_destroy(res, (long long)(psid))
+#define __sanitizer_syscall_pre_pset_assign(psid, cpuid, opsid) \
+ __sanitizer_syscall_pre_impl_pset_assign( \
+ (long long)(psid), (long long)(cpuid), (long long)(opsid))
+#define __sanitizer_syscall_post_pset_assign(res, psid, cpuid, opsid) \
+ __sanitizer_syscall_post_impl_pset_assign( \
+ res, (long long)(psid), (long long)(cpuid), (long long)(opsid))
+#define __sanitizer_syscall_pre__pset_bind(idtype, first_id, second_id, psid, \
+ opsid) \
+ __sanitizer_syscall_pre_impl__pset_bind( \
+ (long long)(idtype), (long long)(first_id), (long long)(second_id), \
+ (long long)(psid), (long long)(opsid))
+#define __sanitizer_syscall_post__pset_bind(res, idtype, first_id, second_id, \
+ psid, opsid) \
+ __sanitizer_syscall_post_impl__pset_bind( \
+ res, (long long)(idtype), (long long)(first_id), (long long)(second_id), \
+ (long long)(psid), (long long)(opsid))
+#define __sanitizer_syscall_pre___posix_fadvise50(fd, PAD, offset, len, \
+ advice) \
+ __sanitizer_syscall_pre_impl___posix_fadvise50( \
+ (long long)(fd), (long long)(PAD), (long long)(offset), \
+ (long long)(len), (long long)(advice))
+#define __sanitizer_syscall_post___posix_fadvise50(res, fd, PAD, offset, len, \
+ advice) \
+ __sanitizer_syscall_post_impl___posix_fadvise50( \
+ res, (long long)(fd), (long long)(PAD), (long long)(offset), \
+ (long long)(len), (long long)(advice))
+#define __sanitizer_syscall_pre___select50(nd, in, ou, ex, tv) \
+ __sanitizer_syscall_pre_impl___select50((long long)(nd), (long long)(in), \
+ (long long)(ou), (long long)(ex), \
+ (long long)(tv))
+#define __sanitizer_syscall_post___select50(res, nd, in, ou, ex, tv) \
+ __sanitizer_syscall_post_impl___select50(res, (long long)(nd), \
+ (long long)(in), (long long)(ou), \
+ (long long)(ex), (long long)(tv))
+#define __sanitizer_syscall_pre___gettimeofday50(tp, tzp) \
+ __sanitizer_syscall_pre_impl___gettimeofday50((long long)(tp), \
+ (long long)(tzp))
+#define __sanitizer_syscall_post___gettimeofday50(res, tp, tzp) \
+ __sanitizer_syscall_post_impl___gettimeofday50(res, (long long)(tp), \
+ (long long)(tzp))
+#define __sanitizer_syscall_pre___settimeofday50(tv, tzp) \
+ __sanitizer_syscall_pre_impl___settimeofday50((long long)(tv), \
+ (long long)(tzp))
+#define __sanitizer_syscall_post___settimeofday50(res, tv, tzp) \
+ __sanitizer_syscall_post_impl___settimeofday50(res, (long long)(tv), \
+ (long long)(tzp))
+#define __sanitizer_syscall_pre___utimes50(path, tptr) \
+ __sanitizer_syscall_pre_impl___utimes50((long long)(path), (long long)(tptr))
+#define __sanitizer_syscall_post___utimes50(res, path, tptr) \
+ __sanitizer_syscall_post_impl___utimes50(res, (long long)(path), \
+ (long long)(tptr))
+#define __sanitizer_syscall_pre___adjtime50(delta, olddelta) \
+ __sanitizer_syscall_pre_impl___adjtime50((long long)(delta), \
+ (long long)(olddelta))
+#define __sanitizer_syscall_post___adjtime50(res, delta, olddelta) \
+ __sanitizer_syscall_post_impl___adjtime50(res, (long long)(delta), \
+ (long long)(olddelta))
+#define __sanitizer_syscall_pre___lfs_segwait50(fsidp, tv) \
+ __sanitizer_syscall_pre_impl___lfs_segwait50((long long)(fsidp), \
+ (long long)(tv))
+#define __sanitizer_syscall_post___lfs_segwait50(res, fsidp, tv) \
+ __sanitizer_syscall_post_impl___lfs_segwait50(res, (long long)(fsidp), \
+ (long long)(tv))
+#define __sanitizer_syscall_pre___futimes50(fd, tptr) \
+ __sanitizer_syscall_pre_impl___futimes50((long long)(fd), (long long)(tptr))
+#define __sanitizer_syscall_post___futimes50(res, fd, tptr) \
+ __sanitizer_syscall_post_impl___futimes50(res, (long long)(fd), \
+ (long long)(tptr))
+#define __sanitizer_syscall_pre___lutimes50(path, tptr) \
+ __sanitizer_syscall_pre_impl___lutimes50((long long)(path), (long long)(tptr))
+#define __sanitizer_syscall_post___lutimes50(res, path, tptr) \
+ __sanitizer_syscall_post_impl___lutimes50(res, (long long)(path), \
+ (long long)(tptr))
+#define __sanitizer_syscall_pre___setitimer50(which, itv, oitv) \
+ __sanitizer_syscall_pre_impl___setitimer50( \
+ (long long)(which), (long long)(itv), (long long)(oitv))
+#define __sanitizer_syscall_post___setitimer50(res, which, itv, oitv) \
+ __sanitizer_syscall_post_impl___setitimer50( \
+ res, (long long)(which), (long long)(itv), (long long)(oitv))
+#define __sanitizer_syscall_pre___getitimer50(which, itv) \
+ __sanitizer_syscall_pre_impl___getitimer50((long long)(which), \
+ (long long)(itv))
+#define __sanitizer_syscall_post___getitimer50(res, which, itv) \
+ __sanitizer_syscall_post_impl___getitimer50(res, (long long)(which), \
+ (long long)(itv))
+#define __sanitizer_syscall_pre___clock_gettime50(clock_id, tp) \
+ __sanitizer_syscall_pre_impl___clock_gettime50((long long)(clock_id), \
+ (long long)(tp))
+#define __sanitizer_syscall_post___clock_gettime50(res, clock_id, tp) \
+ __sanitizer_syscall_post_impl___clock_gettime50(res, (long long)(clock_id), \
+ (long long)(tp))
+#define __sanitizer_syscall_pre___clock_settime50(clock_id, tp) \
+ __sanitizer_syscall_pre_impl___clock_settime50((long long)(clock_id), \
+ (long long)(tp))
+#define __sanitizer_syscall_post___clock_settime50(res, clock_id, tp) \
+ __sanitizer_syscall_post_impl___clock_settime50(res, (long long)(clock_id), \
+ (long long)(tp))
+#define __sanitizer_syscall_pre___clock_getres50(clock_id, tp) \
+ __sanitizer_syscall_pre_impl___clock_getres50((long long)(clock_id), \
+ (long long)(tp))
+#define __sanitizer_syscall_post___clock_getres50(res, clock_id, tp) \
+ __sanitizer_syscall_post_impl___clock_getres50(res, (long long)(clock_id), \
+ (long long)(tp))
+#define __sanitizer_syscall_pre___nanosleep50(rqtp, rmtp) \
+ __sanitizer_syscall_pre_impl___nanosleep50((long long)(rqtp), \
+ (long long)(rmtp))
+#define __sanitizer_syscall_post___nanosleep50(res, rqtp, rmtp) \
+ __sanitizer_syscall_post_impl___nanosleep50(res, (long long)(rqtp), \
+ (long long)(rmtp))
+#define __sanitizer_syscall_pre_____sigtimedwait50(set, info, timeout) \
+ __sanitizer_syscall_pre_impl_____sigtimedwait50( \
+ (long long)(set), (long long)(info), (long long)(timeout))
+#define __sanitizer_syscall_post_____sigtimedwait50(res, set, info, timeout) \
+ __sanitizer_syscall_post_impl_____sigtimedwait50( \
+ res, (long long)(set), (long long)(info), (long long)(timeout))
+#define __sanitizer_syscall_pre___mq_timedsend50(mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_pre_impl___mq_timedsend50( \
+ (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio), (long long)(abs_timeout))
+#define __sanitizer_syscall_post___mq_timedsend50( \
+ res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \
+ __sanitizer_syscall_post_impl___mq_timedsend50( \
+ res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio), (long long)(abs_timeout))
+#define __sanitizer_syscall_pre___mq_timedreceive50(mqdes, msg_ptr, msg_len, \
+ msg_prio, abs_timeout) \
+ __sanitizer_syscall_pre_impl___mq_timedreceive50( \
+ (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio), (long long)(abs_timeout))
+#define __sanitizer_syscall_post___mq_timedreceive50( \
+ res, mqdes, msg_ptr, msg_len, msg_prio, abs_timeout) \
+ __sanitizer_syscall_post_impl___mq_timedreceive50( \
+ res, (long long)(mqdes), (long long)(msg_ptr), (long long)(msg_len), \
+ (long long)(msg_prio), (long long)(abs_timeout))
+#define __sanitizer_syscall_pre_compat_60__lwp_park(ts, unpark, hint, \
+ unparkhint) \
+ __sanitizer_syscall_pre_impl_compat_60__lwp_park( \
+ (long long)(ts), (long long)(unpark), (long long)(hint), \
+ (long long)(unparkhint))
+#define __sanitizer_syscall_post_compat_60__lwp_park(res, ts, unpark, hint, \
+ unparkhint) \
+ __sanitizer_syscall_post_impl_compat_60__lwp_park( \
+ res, (long long)(ts), (long long)(unpark), (long long)(hint), \
+ (long long)(unparkhint))
+#define __sanitizer_syscall_pre___kevent50(fd, changelist, nchanges, \
+ eventlist, nevents, timeout) \
+ __sanitizer_syscall_pre_impl___kevent50( \
+ (long long)(fd), (long long)(changelist), (long long)(nchanges), \
+ (long long)(eventlist), (long long)(nevents), (long long)(timeout))
+#define __sanitizer_syscall_post___kevent50(res, fd, changelist, nchanges, \
+ eventlist, nevents, timeout) \
+ __sanitizer_syscall_post_impl___kevent50( \
+ res, (long long)(fd), (long long)(changelist), (long long)(nchanges), \
+ (long long)(eventlist), (long long)(nevents), (long long)(timeout))
+#define __sanitizer_syscall_pre___pselect50(nd, in, ou, ex, ts, mask) \
+ __sanitizer_syscall_pre_impl___pselect50((long long)(nd), (long long)(in), \
+ (long long)(ou), (long long)(ex), \
+ (long long)(ts), (long long)(mask))
+#define __sanitizer_syscall_post___pselect50(res, nd, in, ou, ex, ts, mask) \
+ __sanitizer_syscall_post_impl___pselect50( \
+ res, (long long)(nd), (long long)(in), (long long)(ou), (long long)(ex), \
+ (long long)(ts), (long long)(mask))
+#define __sanitizer_syscall_pre___pollts50(fds, nfds, ts, mask) \
+ __sanitizer_syscall_pre_impl___pollts50((long long)(fds), (long long)(nfds), \
+ (long long)(ts), (long long)(mask))
+#define __sanitizer_syscall_post___pollts50(res, fds, nfds, ts, mask) \
+ __sanitizer_syscall_post_impl___pollts50(res, (long long)(fds), \
+ (long long)(nfds), (long long)(ts), \
+ (long long)(mask))
+#define __sanitizer_syscall_pre___aio_suspend50(list, nent, timeout) \
+ __sanitizer_syscall_pre_impl___aio_suspend50( \
+ (long long)(list), (long long)(nent), (long long)(timeout))
+#define __sanitizer_syscall_post___aio_suspend50(res, list, nent, timeout) \
+ __sanitizer_syscall_post_impl___aio_suspend50( \
+ res, (long long)(list), (long long)(nent), (long long)(timeout))
+#define __sanitizer_syscall_pre___stat50(path, ub) \
+ __sanitizer_syscall_pre_impl___stat50((long long)(path), (long long)(ub))
+#define __sanitizer_syscall_post___stat50(res, path, ub) \
+ __sanitizer_syscall_post_impl___stat50(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre___fstat50(fd, sb) \
+ __sanitizer_syscall_pre_impl___fstat50((long long)(fd), (long long)(sb))
+#define __sanitizer_syscall_post___fstat50(res, fd, sb) \
+ __sanitizer_syscall_post_impl___fstat50(res, (long long)(fd), (long long)(sb))
+#define __sanitizer_syscall_pre___lstat50(path, ub) \
+ __sanitizer_syscall_pre_impl___lstat50((long long)(path), (long long)(ub))
+#define __sanitizer_syscall_post___lstat50(res, path, ub) \
+ __sanitizer_syscall_post_impl___lstat50(res, (long long)(path), \
+ (long long)(ub))
+#define __sanitizer_syscall_pre_____semctl50(semid, semnum, cmd, arg) \
+ __sanitizer_syscall_pre_impl_____semctl50( \
+ (long long)(semid), (long long)(semnum), (long long)(cmd), \
+ (long long)(arg))
+#define __sanitizer_syscall_post_____semctl50(res, semid, semnum, cmd, arg) \
+ __sanitizer_syscall_post_impl_____semctl50( \
+ res, (long long)(semid), (long long)(semnum), (long long)(cmd), \
+ (long long)(arg))
+#define __sanitizer_syscall_pre___shmctl50(shmid, cmd, buf) \
+ __sanitizer_syscall_pre_impl___shmctl50((long long)(shmid), \
+ (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_post___shmctl50(res, shmid, cmd, buf) \
+ __sanitizer_syscall_post_impl___shmctl50(res, (long long)(shmid), \
+ (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_pre___msgctl50(msqid, cmd, buf) \
+ __sanitizer_syscall_pre_impl___msgctl50((long long)(msqid), \
+ (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_post___msgctl50(res, msqid, cmd, buf) \
+ __sanitizer_syscall_post_impl___msgctl50(res, (long long)(msqid), \
+ (long long)(cmd), (long long)(buf))
+#define __sanitizer_syscall_pre___getrusage50(who, rusage) \
+ __sanitizer_syscall_pre_impl___getrusage50((long long)(who), \
+ (long long)(rusage))
+#define __sanitizer_syscall_post___getrusage50(res, who, rusage) \
+ __sanitizer_syscall_post_impl___getrusage50(res, (long long)(who), \
+ (long long)(rusage))
+#define __sanitizer_syscall_pre___timer_settime50(timerid, flags, value, \
+ ovalue) \
+ __sanitizer_syscall_pre_impl___timer_settime50( \
+ (long long)(timerid), (long long)(flags), (long long)(value), \
+ (long long)(ovalue))
+#define __sanitizer_syscall_post___timer_settime50(res, timerid, flags, value, \
+ ovalue) \
+ __sanitizer_syscall_post_impl___timer_settime50( \
+ res, (long long)(timerid), (long long)(flags), (long long)(value), \
+ (long long)(ovalue))
+#define __sanitizer_syscall_pre___timer_gettime50(timerid, value) \
+ __sanitizer_syscall_pre_impl___timer_gettime50((long long)(timerid), \
+ (long long)(value))
+#define __sanitizer_syscall_post___timer_gettime50(res, timerid, value) \
+ __sanitizer_syscall_post_impl___timer_gettime50(res, (long long)(timerid), \
+ (long long)(value))
+#if defined(NTP) || !defined(_KERNEL_OPT)
+#define __sanitizer_syscall_pre___ntp_gettime50(ntvp) \
+ __sanitizer_syscall_pre_impl___ntp_gettime50((long long)(ntvp))
+#define __sanitizer_syscall_post___ntp_gettime50(res, ntvp) \
+ __sanitizer_syscall_post_impl___ntp_gettime50(res, (long long)(ntvp))
+#else
+/* syscall 448 has been skipped */
+#endif
+#define __sanitizer_syscall_pre___wait450(pid, status, options, rusage) \
+ __sanitizer_syscall_pre_impl___wait450( \
+ (long long)(pid), (long long)(status), (long long)(options), \
+ (long long)(rusage))
+#define __sanitizer_syscall_post___wait450(res, pid, status, options, rusage) \
+ __sanitizer_syscall_post_impl___wait450( \
+ res, (long long)(pid), (long long)(status), (long long)(options), \
+ (long long)(rusage))
+#define __sanitizer_syscall_pre___mknod50(path, mode, dev) \
+ __sanitizer_syscall_pre_impl___mknod50((long long)(path), (long long)(mode), \
+ (long long)(dev))
+#define __sanitizer_syscall_post___mknod50(res, path, mode, dev) \
+ __sanitizer_syscall_post_impl___mknod50(res, (long long)(path), \
+ (long long)(mode), (long long)(dev))
+#define __sanitizer_syscall_pre___fhstat50(fhp, fh_size, sb) \
+ __sanitizer_syscall_pre_impl___fhstat50( \
+ (long long)(fhp), (long long)(fh_size), (long long)(sb))
+#define __sanitizer_syscall_post___fhstat50(res, fhp, fh_size, sb) \
+ __sanitizer_syscall_post_impl___fhstat50( \
+ res, (long long)(fhp), (long long)(fh_size), (long long)(sb))
+/* syscall 452 has been skipped */
+#define __sanitizer_syscall_pre_pipe2(fildes, flags) \
+ __sanitizer_syscall_pre_impl_pipe2((long long)(fildes), (long long)(flags))
+#define __sanitizer_syscall_post_pipe2(res, fildes, flags) \
+ __sanitizer_syscall_post_impl_pipe2(res, (long long)(fildes), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_dup3(from, to, flags) \
+ __sanitizer_syscall_pre_impl_dup3((long long)(from), (long long)(to), \
+ (long long)(flags))
+#define __sanitizer_syscall_post_dup3(res, from, to, flags) \
+ __sanitizer_syscall_post_impl_dup3(res, (long long)(from), (long long)(to), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_kqueue1(flags) \
+ __sanitizer_syscall_pre_impl_kqueue1((long long)(flags))
+#define __sanitizer_syscall_post_kqueue1(res, flags) \
+ __sanitizer_syscall_post_impl_kqueue1(res, (long long)(flags))
+#define __sanitizer_syscall_pre_paccept(s, name, anamelen, mask, flags) \
+ __sanitizer_syscall_pre_impl_paccept((long long)(s), (long long)(name), \
+ (long long)(anamelen), \
+ (long long)(mask), (long long)(flags))
+#define __sanitizer_syscall_post_paccept(res, s, name, anamelen, mask, flags) \
+ __sanitizer_syscall_post_impl_paccept( \
+ res, (long long)(s), (long long)(name), (long long)(anamelen), \
+ (long long)(mask), (long long)(flags))
+#define __sanitizer_syscall_pre_linkat(fd1, name1, fd2, name2, flags) \
+ __sanitizer_syscall_pre_impl_linkat((long long)(fd1), (long long)(name1), \
+ (long long)(fd2), (long long)(name2), \
+ (long long)(flags))
+#define __sanitizer_syscall_post_linkat(res, fd1, name1, fd2, name2, flags) \
+ __sanitizer_syscall_post_impl_linkat(res, (long long)(fd1), \
+ (long long)(name1), (long long)(fd2), \
+ (long long)(name2), (long long)(flags))
+#define __sanitizer_syscall_pre_renameat(fromfd, from, tofd, to) \
+ __sanitizer_syscall_pre_impl_renameat((long long)(fromfd), \
+ (long long)(from), (long long)(tofd), \
+ (long long)(to))
+#define __sanitizer_syscall_post_renameat(res, fromfd, from, tofd, to) \
+ __sanitizer_syscall_post_impl_renameat(res, (long long)(fromfd), \
+ (long long)(from), (long long)(tofd), \
+ (long long)(to))
+#define __sanitizer_syscall_pre_mkfifoat(fd, path, mode) \
+ __sanitizer_syscall_pre_impl_mkfifoat((long long)(fd), (long long)(path), \
+ (long long)(mode))
+#define __sanitizer_syscall_post_mkfifoat(res, fd, path, mode) \
+ __sanitizer_syscall_post_impl_mkfifoat(res, (long long)(fd), \
+ (long long)(path), (long long)(mode))
+#define __sanitizer_syscall_pre_mknodat(fd, path, mode, PAD, dev) \
+ __sanitizer_syscall_pre_impl_mknodat((long long)(fd), (long long)(path), \
+ (long long)(mode), (long long)(PAD), \
+ (long long)(dev))
+#define __sanitizer_syscall_post_mknodat(res, fd, path, mode, PAD, dev) \
+ __sanitizer_syscall_post_impl_mknodat(res, (long long)(fd), \
+ (long long)(path), (long long)(mode), \
+ (long long)(PAD), (long long)(dev))
+#define __sanitizer_syscall_pre_mkdirat(fd, path, mode) \
+ __sanitizer_syscall_pre_impl_mkdirat((long long)(fd), (long long)(path), \
+ (long long)(mode))
+#define __sanitizer_syscall_post_mkdirat(res, fd, path, mode) \
+ __sanitizer_syscall_post_impl_mkdirat(res, (long long)(fd), \
+ (long long)(path), (long long)(mode))
+#define __sanitizer_syscall_pre_faccessat(fd, path, amode, flag) \
+ __sanitizer_syscall_pre_impl_faccessat((long long)(fd), (long long)(path), \
+ (long long)(amode), \
+ (long long)(flag))
+#define __sanitizer_syscall_post_faccessat(res, fd, path, amode, flag) \
+ __sanitizer_syscall_post_impl_faccessat( \
+ res, (long long)(fd), (long long)(path), (long long)(amode), \
+ (long long)(flag))
+#define __sanitizer_syscall_pre_fchmodat(fd, path, mode, flag) \
+ __sanitizer_syscall_pre_impl_fchmodat((long long)(fd), (long long)(path), \
+ (long long)(mode), (long long)(flag))
+#define __sanitizer_syscall_post_fchmodat(res, fd, path, mode, flag) \
+ __sanitizer_syscall_post_impl_fchmodat(res, (long long)(fd), \
+ (long long)(path), (long long)(mode), \
+ (long long)(flag))
+#define __sanitizer_syscall_pre_fchownat(fd, path, owner, group, flag) \
+ __sanitizer_syscall_pre_impl_fchownat((long long)(fd), (long long)(path), \
+ (long long)(owner), \
+ (long long)(group), (long long)(flag))
+#define __sanitizer_syscall_post_fchownat(res, fd, path, owner, group, flag) \
+ __sanitizer_syscall_post_impl_fchownat( \
+ res, (long long)(fd), (long long)(path), (long long)(owner), \
+ (long long)(group), (long long)(flag))
+#define __sanitizer_syscall_pre_fexecve(fd, argp, envp) \
+ __sanitizer_syscall_pre_impl_fexecve((long long)(fd), (long long)(argp), \
+ (long long)(envp))
+#define __sanitizer_syscall_post_fexecve(res, fd, argp, envp) \
+ __sanitizer_syscall_post_impl_fexecve(res, (long long)(fd), \
+ (long long)(argp), (long long)(envp))
+#define __sanitizer_syscall_pre_fstatat(fd, path, buf, flag) \
+ __sanitizer_syscall_pre_impl_fstatat((long long)(fd), (long long)(path), \
+ (long long)(buf), (long long)(flag))
+#define __sanitizer_syscall_post_fstatat(res, fd, path, buf, flag) \
+ __sanitizer_syscall_post_impl_fstatat(res, (long long)(fd), \
+ (long long)(path), (long long)(buf), \
+ (long long)(flag))
+#define __sanitizer_syscall_pre_utimensat(fd, path, tptr, flag) \
+ __sanitizer_syscall_pre_impl_utimensat((long long)(fd), (long long)(path), \
+ (long long)(tptr), (long long)(flag))
+#define __sanitizer_syscall_post_utimensat(res, fd, path, tptr, flag) \
+ __sanitizer_syscall_post_impl_utimensat( \
+ res, (long long)(fd), (long long)(path), (long long)(tptr), \
+ (long long)(flag))
+#define __sanitizer_syscall_pre_openat(fd, path, oflags, mode) \
+ __sanitizer_syscall_pre_impl_openat((long long)(fd), (long long)(path), \
+ (long long)(oflags), (long long)(mode))
+#define __sanitizer_syscall_post_openat(res, fd, path, oflags, mode) \
+ __sanitizer_syscall_post_impl_openat(res, (long long)(fd), \
+ (long long)(path), (long long)(oflags), \
+ (long long)(mode))
+#define __sanitizer_syscall_pre_readlinkat(fd, path, buf, bufsize) \
+ __sanitizer_syscall_pre_impl_readlinkat((long long)(fd), (long long)(path), \
+ (long long)(buf), \
+ (long long)(bufsize))
+#define __sanitizer_syscall_post_readlinkat(res, fd, path, buf, bufsize) \
+ __sanitizer_syscall_post_impl_readlinkat( \
+ res, (long long)(fd), (long long)(path), (long long)(buf), \
+ (long long)(bufsize))
+#define __sanitizer_syscall_pre_symlinkat(path1, fd, path2) \
+ __sanitizer_syscall_pre_impl_symlinkat((long long)(path1), (long long)(fd), \
+ (long long)(path2))
+#define __sanitizer_syscall_post_symlinkat(res, path1, fd, path2) \
+ __sanitizer_syscall_post_impl_symlinkat(res, (long long)(path1), \
+ (long long)(fd), (long long)(path2))
+#define __sanitizer_syscall_pre_unlinkat(fd, path, flag) \
+ __sanitizer_syscall_pre_impl_unlinkat((long long)(fd), (long long)(path), \
+ (long long)(flag))
+#define __sanitizer_syscall_post_unlinkat(res, fd, path, flag) \
+ __sanitizer_syscall_post_impl_unlinkat(res, (long long)(fd), \
+ (long long)(path), (long long)(flag))
+#define __sanitizer_syscall_pre_futimens(fd, tptr) \
+ __sanitizer_syscall_pre_impl_futimens((long long)(fd), (long long)(tptr))
+#define __sanitizer_syscall_post_futimens(res, fd, tptr) \
+ __sanitizer_syscall_post_impl_futimens(res, (long long)(fd), \
+ (long long)(tptr))
+#define __sanitizer_syscall_pre___quotactl(path, args) \
+ __sanitizer_syscall_pre_impl___quotactl((long long)(path), (long long)(args))
+#define __sanitizer_syscall_post___quotactl(res, path, args) \
+ __sanitizer_syscall_post_impl___quotactl(res, (long long)(path), \
+ (long long)(args))
+#define __sanitizer_syscall_pre_posix_spawn(pid, path, file_actions, attrp, \
+ argv, envp) \
+ __sanitizer_syscall_pre_impl_posix_spawn( \
+ (long long)(pid), (long long)(path), (long long)(file_actions), \
+ (long long)(attrp), (long long)(argv), (long long)(envp))
+#define __sanitizer_syscall_post_posix_spawn(res, pid, path, file_actions, \
+ attrp, argv, envp) \
+ __sanitizer_syscall_post_impl_posix_spawn( \
+ res, (long long)(pid), (long long)(path), (long long)(file_actions), \
+ (long long)(attrp), (long long)(argv), (long long)(envp))
+#define __sanitizer_syscall_pre_recvmmsg(s, mmsg, vlen, flags, timeout) \
+ __sanitizer_syscall_pre_impl_recvmmsg((long long)(s), (long long)(mmsg), \
+ (long long)(vlen), (long long)(flags), \
+ (long long)(timeout))
+#define __sanitizer_syscall_post_recvmmsg(res, s, mmsg, vlen, flags, timeout) \
+ __sanitizer_syscall_post_impl_recvmmsg( \
+ res, (long long)(s), (long long)(mmsg), (long long)(vlen), \
+ (long long)(flags), (long long)(timeout))
+#define __sanitizer_syscall_pre_sendmmsg(s, mmsg, vlen, flags) \
+ __sanitizer_syscall_pre_impl_sendmmsg((long long)(s), (long long)(mmsg), \
+ (long long)(vlen), (long long)(flags))
+#define __sanitizer_syscall_post_sendmmsg(res, s, mmsg, vlen, flags) \
+ __sanitizer_syscall_post_impl_sendmmsg(res, (long long)(s), \
+ (long long)(mmsg), (long long)(vlen), \
+ (long long)(flags))
+#define __sanitizer_syscall_pre_clock_nanosleep(clock_id, flags, rqtp, rmtp) \
+ __sanitizer_syscall_pre_impl_clock_nanosleep( \
+ (long long)(clock_id), (long long)(flags), (long long)(rqtp), \
+ (long long)(rmtp))
+#define __sanitizer_syscall_post_clock_nanosleep(res, clock_id, flags, rqtp, \
+ rmtp) \
+ __sanitizer_syscall_post_impl_clock_nanosleep( \
+ res, (long long)(clock_id), (long long)(flags), (long long)(rqtp), \
+ (long long)(rmtp))
+#define __sanitizer_syscall_pre____lwp_park60(clock_id, flags, ts, unpark, \
+ hint, unparkhint) \
+ __sanitizer_syscall_pre_impl____lwp_park60( \
+ (long long)(clock_id), (long long)(flags), (long long)(ts), \
+ (long long)(unpark), (long long)(hint), (long long)(unparkhint))
+#define __sanitizer_syscall_post____lwp_park60(res, clock_id, flags, ts, \
+ unpark, hint, unparkhint) \
+ __sanitizer_syscall_post_impl____lwp_park60( \
+ res, (long long)(clock_id), (long long)(flags), (long long)(ts), \
+ (long long)(unpark), (long long)(hint), (long long)(unparkhint))
+#define __sanitizer_syscall_pre_posix_fallocate(fd, PAD, pos, len) \
+ __sanitizer_syscall_pre_impl_posix_fallocate( \
+ (long long)(fd), (long long)(PAD), (long long)(pos), (long long)(len))
+#define __sanitizer_syscall_post_posix_fallocate(res, fd, PAD, pos, len) \
+ __sanitizer_syscall_post_impl_posix_fallocate( \
+ res, (long long)(fd), (long long)(PAD), (long long)(pos), \
+ (long long)(len))
+#define __sanitizer_syscall_pre_fdiscard(fd, PAD, pos, len) \
+ __sanitizer_syscall_pre_impl_fdiscard((long long)(fd), (long long)(PAD), \
+ (long long)(pos), (long long)(len))
+#define __sanitizer_syscall_post_fdiscard(res, fd, PAD, pos, len) \
+ __sanitizer_syscall_post_impl_fdiscard(res, (long long)(fd), \
+ (long long)(PAD), (long long)(pos), \
+ (long long)(len))
+#define __sanitizer_syscall_pre_wait6(idtype, id, status, options, wru, info) \
+ __sanitizer_syscall_pre_impl_wait6( \
+ (long long)(idtype), (long long)(id), (long long)(status), \
+ (long long)(options), (long long)(wru), (long long)(info))
+#define __sanitizer_syscall_post_wait6(res, idtype, id, status, options, wru, \
+ info) \
+ __sanitizer_syscall_post_impl_wait6( \
+ res, (long long)(idtype), (long long)(id), (long long)(status), \
+ (long long)(options), (long long)(wru), (long long)(info))
+#define __sanitizer_syscall_pre_clock_getcpuclockid2(idtype, id, clock_id) \
+ __sanitizer_syscall_pre_impl_clock_getcpuclockid2( \
+ (long long)(idtype), (long long)(id), (long long)(clock_id))
+#define __sanitizer_syscall_post_clock_getcpuclockid2(res, idtype, id, \
+ clock_id) \
+ __sanitizer_syscall_post_impl_clock_getcpuclockid2( \
+ res, (long long)(idtype), (long long)(id), (long long)(clock_id))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Private declarations. Do not call directly from user code. Use macros above.
+
+// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
+
+void __sanitizer_syscall_pre_impl_syscall(long long code, long long arg0,
+ long long arg1, long long arg2,
+ long long arg3, long long arg4,
+ long long arg5, long long arg6,
+ long long arg7);
+void __sanitizer_syscall_post_impl_syscall(long long res, long long code,
+ long long arg0, long long arg1,
+ long long arg2, long long arg3,
+ long long arg4, long long arg5,
+ long long arg6, long long arg7);
+void __sanitizer_syscall_pre_impl_exit(long long rval);
+void __sanitizer_syscall_post_impl_exit(long long res, long long rval);
+void __sanitizer_syscall_pre_impl_fork(void);
+void __sanitizer_syscall_post_impl_fork(long long res);
+void __sanitizer_syscall_pre_impl_read(long long fd, long long buf,
+ long long nbyte);
+void __sanitizer_syscall_post_impl_read(long long res, long long fd,
+ long long buf, long long nbyte);
+void __sanitizer_syscall_pre_impl_write(long long fd, long long buf,
+ long long nbyte);
+void __sanitizer_syscall_post_impl_write(long long res, long long fd,
+ long long buf, long long nbyte);
+void __sanitizer_syscall_pre_impl_open(long long path, long long flags,
+ long long mode);
+void __sanitizer_syscall_post_impl_open(long long res, long long path,
+ long long flags, long long mode);
+void __sanitizer_syscall_pre_impl_close(long long fd);
+void __sanitizer_syscall_post_impl_close(long long res, long long fd);
+void __sanitizer_syscall_pre_impl_compat_50_wait4(long long pid,
+ long long status,
+ long long options,
+ long long rusage);
+void __sanitizer_syscall_post_impl_compat_50_wait4(long long res, long long pid,
+ long long status,
+ long long options,
+ long long rusage);
+void __sanitizer_syscall_pre_impl_compat_43_ocreat(long long path,
+ long long mode);
+void __sanitizer_syscall_post_impl_compat_43_ocreat(long long res,
+ long long path,
+ long long mode);
+void __sanitizer_syscall_pre_impl_link(long long path, long long link);
+void __sanitizer_syscall_post_impl_link(long long res, long long path,
+ long long link);
+void __sanitizer_syscall_pre_impl_unlink(long long path);
+void __sanitizer_syscall_post_impl_unlink(long long res, long long path);
+/* syscall 11 has been skipped */
+void __sanitizer_syscall_pre_impl_chdir(long long path);
+void __sanitizer_syscall_post_impl_chdir(long long res, long long path);
+void __sanitizer_syscall_pre_impl_fchdir(long long fd);
+void __sanitizer_syscall_post_impl_fchdir(long long res, long long fd);
+void __sanitizer_syscall_pre_impl_compat_50_mknod(long long path,
+ long long mode,
+ long long dev);
+void __sanitizer_syscall_post_impl_compat_50_mknod(long long res,
+ long long path,
+ long long mode,
+ long long dev);
+void __sanitizer_syscall_pre_impl_chmod(long long path, long long mode);
+void __sanitizer_syscall_post_impl_chmod(long long res, long long path,
+ long long mode);
+void __sanitizer_syscall_pre_impl_chown(long long path, long long uid,
+ long long gid);
+void __sanitizer_syscall_post_impl_chown(long long res, long long path,
+ long long uid, long long gid);
+void __sanitizer_syscall_pre_impl_break(long long nsize);
+void __sanitizer_syscall_post_impl_break(long long res, long long nsize);
+void __sanitizer_syscall_pre_impl_compat_20_getfsstat(long long buf,
+ long long bufsize,
+ long long flags);
+void __sanitizer_syscall_post_impl_compat_20_getfsstat(long long res,
+ long long buf,
+ long long bufsize,
+ long long flags);
+void __sanitizer_syscall_pre_impl_compat_43_olseek(long long fd,
+ long long offset,
+ long long whence);
+void __sanitizer_syscall_post_impl_compat_43_olseek(long long res, long long fd,
+ long long offset,
+ long long whence);
+void __sanitizer_syscall_pre_impl_getpid(void);
+void __sanitizer_syscall_post_impl_getpid(long long res);
+void __sanitizer_syscall_pre_impl_compat_40_mount(long long type,
+ long long path,
+ long long flags,
+ long long data);
+void __sanitizer_syscall_post_impl_compat_40_mount(long long res,
+ long long type,
+ long long path,
+ long long flags,
+ long long data);
+void __sanitizer_syscall_pre_impl_unmount(long long path, long long flags);
+void __sanitizer_syscall_post_impl_unmount(long long res, long long path,
+ long long flags);
+void __sanitizer_syscall_pre_impl_setuid(long long uid);
+void __sanitizer_syscall_post_impl_setuid(long long res, long long uid);
+void __sanitizer_syscall_pre_impl_getuid(void);
+void __sanitizer_syscall_post_impl_getuid(long long res);
+void __sanitizer_syscall_pre_impl_geteuid(void);
+void __sanitizer_syscall_post_impl_geteuid(long long res);
+void __sanitizer_syscall_pre_impl_ptrace(long long req, long long pid,
+ long long addr, long long data);
+void __sanitizer_syscall_post_impl_ptrace(long long res, long long req,
+ long long pid, long long addr,
+ long long data);
+void __sanitizer_syscall_pre_impl_recvmsg(long long s, long long msg,
+ long long flags);
+void __sanitizer_syscall_post_impl_recvmsg(long long res, long long s,
+ long long msg, long long flags);
+void __sanitizer_syscall_pre_impl_sendmsg(long long s, long long msg,
+ long long flags);
+void __sanitizer_syscall_post_impl_sendmsg(long long res, long long s,
+ long long msg, long long flags);
+void __sanitizer_syscall_pre_impl_recvfrom(long long s, long long buf,
+ long long len, long long flags,
+ long long from,
+ long long fromlenaddr);
+void __sanitizer_syscall_post_impl_recvfrom(long long res, long long s,
+ long long buf, long long len,
+ long long flags, long long from,
+ long long fromlenaddr);
+void __sanitizer_syscall_pre_impl_accept(long long s, long long name,
+ long long anamelen);
+void __sanitizer_syscall_post_impl_accept(long long res, long long s,
+ long long name, long long anamelen);
+void __sanitizer_syscall_pre_impl_getpeername(long long fdes, long long asa,
+ long long alen);
+void __sanitizer_syscall_post_impl_getpeername(long long res, long long fdes,
+ long long asa, long long alen);
+void __sanitizer_syscall_pre_impl_getsockname(long long fdes, long long asa,
+ long long alen);
+void __sanitizer_syscall_post_impl_getsockname(long long res, long long fdes,
+ long long asa, long long alen);
+void __sanitizer_syscall_pre_impl_access(long long path, long long flags);
+void __sanitizer_syscall_post_impl_access(long long res, long long path,
+ long long flags);
+void __sanitizer_syscall_pre_impl_chflags(long long path, long long flags);
+void __sanitizer_syscall_post_impl_chflags(long long res, long long path,
+ long long flags);
+void __sanitizer_syscall_pre_impl_fchflags(long long fd, long long flags);
+void __sanitizer_syscall_post_impl_fchflags(long long res, long long fd,
+ long long flags);
+void __sanitizer_syscall_pre_impl_sync(void);
+void __sanitizer_syscall_post_impl_sync(long long res);
+void __sanitizer_syscall_pre_impl_kill(long long pid, long long signum);
+void __sanitizer_syscall_post_impl_kill(long long res, long long pid,
+ long long signum);
+void __sanitizer_syscall_pre_impl_compat_43_stat43(long long path,
+ long long ub);
+void __sanitizer_syscall_post_impl_compat_43_stat43(long long res,
+ long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl_getppid(void);
+void __sanitizer_syscall_post_impl_getppid(long long res);
+void __sanitizer_syscall_pre_impl_compat_43_lstat43(long long path,
+ long long ub);
+void __sanitizer_syscall_post_impl_compat_43_lstat43(long long res,
+ long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl_dup(long long fd);
+void __sanitizer_syscall_post_impl_dup(long long res, long long fd);
+void __sanitizer_syscall_pre_impl_pipe(void);
+void __sanitizer_syscall_post_impl_pipe(long long res);
+void __sanitizer_syscall_pre_impl_getegid(void);
+void __sanitizer_syscall_post_impl_getegid(long long res);
+void __sanitizer_syscall_pre_impl_profil(long long samples, long long size,
+ long long offset, long long scale);
+void __sanitizer_syscall_post_impl_profil(long long res, long long samples,
+ long long size, long long offset,
+ long long scale);
+void __sanitizer_syscall_pre_impl_ktrace(long long fname, long long ops,
+ long long facs, long long pid);
+void __sanitizer_syscall_post_impl_ktrace(long long res, long long fname,
+ long long ops, long long facs,
+ long long pid);
+void __sanitizer_syscall_pre_impl_compat_13_sigaction13(long long signum,
+ long long nsa,
+ long long osa);
+void __sanitizer_syscall_post_impl_compat_13_sigaction13(long long res,
+ long long signum,
+ long long nsa,
+ long long osa);
+void __sanitizer_syscall_pre_impl_getgid(void);
+void __sanitizer_syscall_post_impl_getgid(long long res);
+void __sanitizer_syscall_pre_impl_compat_13_sigprocmask13(long long how,
+ long long mask);
+void __sanitizer_syscall_post_impl_compat_13_sigprocmask13(long long res,
+ long long how,
+ long long mask);
+void __sanitizer_syscall_pre_impl___getlogin(long long namebuf,
+ long long namelen);
+void __sanitizer_syscall_post_impl___getlogin(long long res, long long namebuf,
+ long long namelen);
+void __sanitizer_syscall_pre_impl___setlogin(long long namebuf);
+void __sanitizer_syscall_post_impl___setlogin(long long res, long long namebuf);
+void __sanitizer_syscall_pre_impl_acct(long long path);
+void __sanitizer_syscall_post_impl_acct(long long res, long long path);
+void __sanitizer_syscall_pre_impl_compat_13_sigpending13(void);
+void __sanitizer_syscall_post_impl_compat_13_sigpending13(long long res);
+void __sanitizer_syscall_pre_impl_compat_13_sigaltstack13(long long nss,
+ long long oss);
+void __sanitizer_syscall_post_impl_compat_13_sigaltstack13(long long res,
+ long long nss,
+ long long oss);
+void __sanitizer_syscall_pre_impl_ioctl(long long fd, long long com,
+ long long data);
+void __sanitizer_syscall_post_impl_ioctl(long long res, long long fd,
+ long long com, long long data);
+void __sanitizer_syscall_pre_impl_compat_12_oreboot(long long opt);
+void __sanitizer_syscall_post_impl_compat_12_oreboot(long long res,
+ long long opt);
+void __sanitizer_syscall_pre_impl_revoke(long long path);
+void __sanitizer_syscall_post_impl_revoke(long long res, long long path);
+void __sanitizer_syscall_pre_impl_symlink(long long path, long long link);
+void __sanitizer_syscall_post_impl_symlink(long long res, long long path,
+ long long link);
+void __sanitizer_syscall_pre_impl_readlink(long long path, long long buf,
+ long long count);
+void __sanitizer_syscall_post_impl_readlink(long long res, long long path,
+ long long buf, long long count);
+void __sanitizer_syscall_pre_impl_execve(long long path, long long argp,
+ long long envp);
+void __sanitizer_syscall_post_impl_execve(long long res, long long path,
+ long long argp, long long envp);
+void __sanitizer_syscall_pre_impl_umask(long long newmask);
+void __sanitizer_syscall_post_impl_umask(long long res, long long newmask);
+void __sanitizer_syscall_pre_impl_chroot(long long path);
+void __sanitizer_syscall_post_impl_chroot(long long res, long long path);
+void __sanitizer_syscall_pre_impl_compat_43_fstat43(long long fd, long long sb);
+void __sanitizer_syscall_post_impl_compat_43_fstat43(long long res,
+ long long fd,
+ long long sb);
+void __sanitizer_syscall_pre_impl_compat_43_ogetkerninfo(long long op,
+ long long where,
+ long long size,
+ long long arg);
+void __sanitizer_syscall_post_impl_compat_43_ogetkerninfo(long long res,
+ long long op,
+ long long where,
+ long long size,
+ long long arg);
+void __sanitizer_syscall_pre_impl_compat_43_ogetpagesize(void);
+void __sanitizer_syscall_post_impl_compat_43_ogetpagesize(long long res);
+void __sanitizer_syscall_pre_impl_compat_12_msync(long long addr,
+ long long len);
+void __sanitizer_syscall_post_impl_compat_12_msync(long long res,
+ long long addr,
+ long long len);
+void __sanitizer_syscall_pre_impl_vfork(void);
+void __sanitizer_syscall_post_impl_vfork(long long res);
+/* syscall 67 has been skipped */
+/* syscall 68 has been skipped */
+/* syscall 69 has been skipped */
+/* syscall 70 has been skipped */
+void __sanitizer_syscall_pre_impl_compat_43_ommap(long long addr, long long len,
+ long long prot,
+ long long flags, long long fd,
+ long long pos);
+void __sanitizer_syscall_post_impl_compat_43_ommap(
+ long long res, long long addr, long long len, long long prot,
+ long long flags, long long fd, long long pos);
+void __sanitizer_syscall_pre_impl_vadvise(long long anom);
+void __sanitizer_syscall_post_impl_vadvise(long long res, long long anom);
+void __sanitizer_syscall_pre_impl_munmap(long long addr, long long len);
+void __sanitizer_syscall_post_impl_munmap(long long res, long long addr,
+ long long len);
+void __sanitizer_syscall_pre_impl_mprotect(long long addr, long long len,
+ long long prot);
+void __sanitizer_syscall_post_impl_mprotect(long long res, long long addr,
+ long long len, long long prot);
+void __sanitizer_syscall_pre_impl_madvise(long long addr, long long len,
+ long long behav);
+void __sanitizer_syscall_post_impl_madvise(long long res, long long addr,
+ long long len, long long behav);
+/* syscall 76 has been skipped */
+/* syscall 77 has been skipped */
+void __sanitizer_syscall_pre_impl_mincore(long long addr, long long len,
+ long long vec);
+void __sanitizer_syscall_post_impl_mincore(long long res, long long addr,
+ long long len, long long vec);
+void __sanitizer_syscall_pre_impl_getgroups(long long gidsetsize,
+ long long gidset);
+void __sanitizer_syscall_post_impl_getgroups(long long res,
+ long long gidsetsize,
+ long long gidset);
+void __sanitizer_syscall_pre_impl_setgroups(long long gidsetsize,
+ long long gidset);
+void __sanitizer_syscall_post_impl_setgroups(long long res,
+ long long gidsetsize,
+ long long gidset);
+void __sanitizer_syscall_pre_impl_getpgrp(void);
+void __sanitizer_syscall_post_impl_getpgrp(long long res);
+void __sanitizer_syscall_pre_impl_setpgid(long long pid, long long pgid);
+void __sanitizer_syscall_post_impl_setpgid(long long res, long long pid,
+ long long pgid);
+void __sanitizer_syscall_pre_impl_compat_50_setitimer(long long which,
+ long long itv,
+ long long oitv);
+void __sanitizer_syscall_post_impl_compat_50_setitimer(long long res,
+ long long which,
+ long long itv,
+ long long oitv);
+void __sanitizer_syscall_pre_impl_compat_43_owait(void);
+void __sanitizer_syscall_post_impl_compat_43_owait(long long res);
+void __sanitizer_syscall_pre_impl_compat_12_oswapon(long long name);
+void __sanitizer_syscall_post_impl_compat_12_oswapon(long long res,
+ long long name);
+void __sanitizer_syscall_pre_impl_compat_50_getitimer(long long which,
+ long long itv);
+void __sanitizer_syscall_post_impl_compat_50_getitimer(long long res,
+ long long which,
+ long long itv);
+void __sanitizer_syscall_pre_impl_compat_43_ogethostname(long long hostname,
+ long long len);
+void __sanitizer_syscall_post_impl_compat_43_ogethostname(long long res,
+ long long hostname,
+ long long len);
+void __sanitizer_syscall_pre_impl_compat_43_osethostname(long long hostname,
+ long long len);
+void __sanitizer_syscall_post_impl_compat_43_osethostname(long long res,
+ long long hostname,
+ long long len);
+void __sanitizer_syscall_pre_impl_compat_43_ogetdtablesize(void);
+void __sanitizer_syscall_post_impl_compat_43_ogetdtablesize(long long res);
+void __sanitizer_syscall_pre_impl_dup2(long long from, long long to);
+void __sanitizer_syscall_post_impl_dup2(long long res, long long from,
+ long long to);
+/* syscall 91 has been skipped */
+void __sanitizer_syscall_pre_impl_fcntl(long long fd, long long cmd,
+ long long arg);
+void __sanitizer_syscall_post_impl_fcntl(long long res, long long fd,
+ long long cmd, long long arg);
+void __sanitizer_syscall_pre_impl_compat_50_select(long long nd, long long in,
+ long long ou, long long ex,
+ long long tv);
+void __sanitizer_syscall_post_impl_compat_50_select(long long res, long long nd,
+ long long in, long long ou,
+ long long ex, long long tv);
+/* syscall 94 has been skipped */
+void __sanitizer_syscall_pre_impl_fsync(long long fd);
+void __sanitizer_syscall_post_impl_fsync(long long res, long long fd);
+void __sanitizer_syscall_pre_impl_setpriority(long long which, long long who,
+ long long prio);
+void __sanitizer_syscall_post_impl_setpriority(long long res, long long which,
+ long long who, long long prio);
+void __sanitizer_syscall_pre_impl_compat_30_socket(long long domain,
+ long long type,
+ long long protocol);
+void __sanitizer_syscall_post_impl_compat_30_socket(long long res,
+ long long domain,
+ long long type,
+ long long protocol);
+void __sanitizer_syscall_pre_impl_connect(long long s, long long name,
+ long long namelen);
+void __sanitizer_syscall_post_impl_connect(long long res, long long s,
+ long long name, long long namelen);
+void __sanitizer_syscall_pre_impl_compat_43_oaccept(long long s, long long name,
+ long long anamelen);
+void __sanitizer_syscall_post_impl_compat_43_oaccept(long long res, long long s,
+ long long name,
+ long long anamelen);
+void __sanitizer_syscall_pre_impl_getpriority(long long which, long long who);
+void __sanitizer_syscall_post_impl_getpriority(long long res, long long which,
+ long long who);
+void __sanitizer_syscall_pre_impl_compat_43_osend(long long s, long long buf,
+ long long len,
+ long long flags);
+void __sanitizer_syscall_post_impl_compat_43_osend(long long res, long long s,
+ long long buf, long long len,
+ long long flags);
+void __sanitizer_syscall_pre_impl_compat_43_orecv(long long s, long long buf,
+ long long len,
+ long long flags);
+void __sanitizer_syscall_post_impl_compat_43_orecv(long long res, long long s,
+ long long buf, long long len,
+ long long flags);
+void __sanitizer_syscall_pre_impl_compat_13_sigreturn13(long long sigcntxp);
+void __sanitizer_syscall_post_impl_compat_13_sigreturn13(long long res,
+ long long sigcntxp);
+void __sanitizer_syscall_pre_impl_bind(long long s, long long name,
+ long long namelen);
+void __sanitizer_syscall_post_impl_bind(long long res, long long s,
+ long long name, long long namelen);
+void __sanitizer_syscall_pre_impl_setsockopt(long long s, long long level,
+ long long name, long long val,
+ long long valsize);
+void __sanitizer_syscall_post_impl_setsockopt(long long res, long long s,
+ long long level, long long name,
+ long long val, long long valsize);
+void __sanitizer_syscall_pre_impl_listen(long long s, long long backlog);
+void __sanitizer_syscall_post_impl_listen(long long res, long long s,
+ long long backlog);
+/* syscall 107 has been skipped */
+void __sanitizer_syscall_pre_impl_compat_43_osigvec(long long signum,
+ long long nsv,
+ long long osv);
+void __sanitizer_syscall_post_impl_compat_43_osigvec(long long res,
+ long long signum,
+ long long nsv,
+ long long osv);
+void __sanitizer_syscall_pre_impl_compat_43_osigblock(long long mask);
+void __sanitizer_syscall_post_impl_compat_43_osigblock(long long res,
+ long long mask);
+void __sanitizer_syscall_pre_impl_compat_43_osigsetmask(long long mask);
+void __sanitizer_syscall_post_impl_compat_43_osigsetmask(long long res,
+ long long mask);
+void __sanitizer_syscall_pre_impl_compat_13_sigsuspend13(long long mask);
+void __sanitizer_syscall_post_impl_compat_13_sigsuspend13(long long res,
+ long long mask);
+void __sanitizer_syscall_pre_impl_compat_43_osigstack(long long nss,
+ long long oss);
+void __sanitizer_syscall_post_impl_compat_43_osigstack(long long res,
+ long long nss,
+ long long oss);
+void __sanitizer_syscall_pre_impl_compat_43_orecvmsg(long long s, long long msg,
+ long long flags);
+void __sanitizer_syscall_post_impl_compat_43_orecvmsg(long long res,
+ long long s,
+ long long msg,
+ long long flags);
+void __sanitizer_syscall_pre_impl_compat_43_osendmsg(long long s, long long msg,
+ long long flags);
+void __sanitizer_syscall_post_impl_compat_43_osendmsg(long long res,
+ long long s,
+ long long msg,
+ long long flags);
+/* syscall 115 has been skipped */
+void __sanitizer_syscall_pre_impl_compat_50_gettimeofday(long long tp,
+ long long tzp);
+void __sanitizer_syscall_post_impl_compat_50_gettimeofday(long long res,
+ long long tp,
+ long long tzp);
+void __sanitizer_syscall_pre_impl_compat_50_getrusage(long long who,
+ long long rusage);
+void __sanitizer_syscall_post_impl_compat_50_getrusage(long long res,
+ long long who,
+ long long rusage);
+void __sanitizer_syscall_pre_impl_getsockopt(long long s, long long level,
+ long long name, long long val,
+ long long avalsize);
+void __sanitizer_syscall_post_impl_getsockopt(long long res, long long s,
+ long long level, long long name,
+ long long val,
+ long long avalsize);
+/* syscall 119 has been skipped */
+void __sanitizer_syscall_pre_impl_readv(long long fd, long long iovp,
+ long long iovcnt);
+void __sanitizer_syscall_post_impl_readv(long long res, long long fd,
+ long long iovp, long long iovcnt);
+void __sanitizer_syscall_pre_impl_writev(long long fd, long long iovp,
+ long long iovcnt);
+void __sanitizer_syscall_post_impl_writev(long long res, long long fd,
+ long long iovp, long long iovcnt);
+void __sanitizer_syscall_pre_impl_compat_50_settimeofday(long long tv,
+ long long tzp);
+void __sanitizer_syscall_post_impl_compat_50_settimeofday(long long res,
+ long long tv,
+ long long tzp);
+void __sanitizer_syscall_pre_impl_fchown(long long fd, long long uid,
+ long long gid);
+void __sanitizer_syscall_post_impl_fchown(long long res, long long fd,
+ long long uid, long long gid);
+void __sanitizer_syscall_pre_impl_fchmod(long long fd, long long mode);
+void __sanitizer_syscall_post_impl_fchmod(long long res, long long fd,
+ long long mode);
+void __sanitizer_syscall_pre_impl_compat_43_orecvfrom(
+ long long s, long long buf, long long len, long long flags, long long from,
+ long long fromlenaddr);
+void __sanitizer_syscall_post_impl_compat_43_orecvfrom(
+ long long res, long long s, long long buf, long long len, long long flags,
+ long long from, long long fromlenaddr);
+void __sanitizer_syscall_pre_impl_setreuid(long long ruid, long long euid);
+void __sanitizer_syscall_post_impl_setreuid(long long res, long long ruid,
+ long long euid);
+void __sanitizer_syscall_pre_impl_setregid(long long rgid, long long egid);
+void __sanitizer_syscall_post_impl_setregid(long long res, long long rgid,
+ long long egid);
+void __sanitizer_syscall_pre_impl_rename(long long from, long long to);
+void __sanitizer_syscall_post_impl_rename(long long res, long long from,
+ long long to);
+void __sanitizer_syscall_pre_impl_compat_43_otruncate(long long path,
+ long long length);
+void __sanitizer_syscall_post_impl_compat_43_otruncate(long long res,
+ long long path,
+ long long length);
+void __sanitizer_syscall_pre_impl_compat_43_oftruncate(long long fd,
+ long long length);
+void __sanitizer_syscall_post_impl_compat_43_oftruncate(long long res,
+ long long fd,
+ long long length);
+void __sanitizer_syscall_pre_impl_flock(long long fd, long long how);
+void __sanitizer_syscall_post_impl_flock(long long res, long long fd,
+ long long how);
+void __sanitizer_syscall_pre_impl_mkfifo(long long path, long long mode);
+void __sanitizer_syscall_post_impl_mkfifo(long long res, long long path,
+ long long mode);
+void __sanitizer_syscall_pre_impl_sendto(long long s, long long buf,
+ long long len, long long flags,
+ long long to, long long tolen);
+void __sanitizer_syscall_post_impl_sendto(long long res, long long s,
+ long long buf, long long len,
+ long long flags, long long to,
+ long long tolen);
+void __sanitizer_syscall_pre_impl_shutdown(long long s, long long how);
+void __sanitizer_syscall_post_impl_shutdown(long long res, long long s,
+ long long how);
+void __sanitizer_syscall_pre_impl_socketpair(long long domain, long long type,
+ long long protocol, long long rsv);
+void __sanitizer_syscall_post_impl_socketpair(long long res, long long domain,
+ long long type,
+ long long protocol,
+ long long rsv);
+void __sanitizer_syscall_pre_impl_mkdir(long long path, long long mode);
+void __sanitizer_syscall_post_impl_mkdir(long long res, long long path,
+ long long mode);
+void __sanitizer_syscall_pre_impl_rmdir(long long path);
+void __sanitizer_syscall_post_impl_rmdir(long long res, long long path);
+void __sanitizer_syscall_pre_impl_compat_50_utimes(long long path,
+ long long tptr);
+void __sanitizer_syscall_post_impl_compat_50_utimes(long long res,
+ long long path,
+ long long tptr);
+/* syscall 139 has been skipped */
+void __sanitizer_syscall_pre_impl_compat_50_adjtime(long long delta,
+ long long olddelta);
+void __sanitizer_syscall_post_impl_compat_50_adjtime(long long res,
+ long long delta,
+ long long olddelta);
+void __sanitizer_syscall_pre_impl_compat_43_ogetpeername(long long fdes,
+ long long asa,
+ long long alen);
+void __sanitizer_syscall_post_impl_compat_43_ogetpeername(long long res,
+ long long fdes,
+ long long asa,
+ long long alen);
+void __sanitizer_syscall_pre_impl_compat_43_ogethostid(void);
+void __sanitizer_syscall_post_impl_compat_43_ogethostid(long long res);
+void __sanitizer_syscall_pre_impl_compat_43_osethostid(long long hostid);
+void __sanitizer_syscall_post_impl_compat_43_osethostid(long long res,
+ long long hostid);
+void __sanitizer_syscall_pre_impl_compat_43_ogetrlimit(long long which,
+ long long rlp);
+void __sanitizer_syscall_post_impl_compat_43_ogetrlimit(long long res,
+ long long which,
+ long long rlp);
+void __sanitizer_syscall_pre_impl_compat_43_osetrlimit(long long which,
+ long long rlp);
+void __sanitizer_syscall_post_impl_compat_43_osetrlimit(long long res,
+ long long which,
+ long long rlp);
+void __sanitizer_syscall_pre_impl_compat_43_okillpg(long long pgid,
+ long long signum);
+void __sanitizer_syscall_post_impl_compat_43_okillpg(long long res,
+ long long pgid,
+ long long signum);
+void __sanitizer_syscall_pre_impl_setsid(void);
+void __sanitizer_syscall_post_impl_setsid(long long res);
+void __sanitizer_syscall_pre_impl_compat_50_quotactl(long long path,
+ long long cmd,
+ long long uid,
+ long long arg);
+void __sanitizer_syscall_post_impl_compat_50_quotactl(
+ long long res, long long path, long long cmd, long long uid, long long arg);
+void __sanitizer_syscall_pre_impl_compat_43_oquota(void);
+void __sanitizer_syscall_post_impl_compat_43_oquota(long long res);
+void __sanitizer_syscall_pre_impl_compat_43_ogetsockname(long long fdec,
+ long long asa,
+ long long alen);
+void __sanitizer_syscall_post_impl_compat_43_ogetsockname(long long res,
+ long long fdec,
+ long long asa,
+ long long alen);
+/* syscall 151 has been skipped */
+/* syscall 152 has been skipped */
+/* syscall 153 has been skipped */
+/* syscall 154 has been skipped */
+void __sanitizer_syscall_pre_impl_nfssvc(long long flag, long long argp);
+void __sanitizer_syscall_post_impl_nfssvc(long long res, long long flag,
+ long long argp);
+void __sanitizer_syscall_pre_impl_compat_43_ogetdirentries(long long fd,
+ long long buf,
+ long long count,
+ long long basep);
+void __sanitizer_syscall_post_impl_compat_43_ogetdirentries(long long res,
+ long long fd,
+ long long buf,
+ long long count,
+ long long basep);
+void __sanitizer_syscall_pre_impl_compat_20_statfs(long long path,
+ long long buf);
+void __sanitizer_syscall_post_impl_compat_20_statfs(long long res,
+ long long path,
+ long long buf);
+void __sanitizer_syscall_pre_impl_compat_20_fstatfs(long long fd,
+ long long buf);
+void __sanitizer_syscall_post_impl_compat_20_fstatfs(long long res,
+ long long fd,
+ long long buf);
+/* syscall 159 has been skipped */
+/* syscall 160 has been skipped */
+void __sanitizer_syscall_pre_impl_compat_30_getfh(long long fname,
+ long long fhp);
+void __sanitizer_syscall_post_impl_compat_30_getfh(long long res,
+ long long fname,
+ long long fhp);
+void __sanitizer_syscall_pre_impl_compat_09_ogetdomainname(long long domainname,
+ long long len);
+void __sanitizer_syscall_post_impl_compat_09_ogetdomainname(
+ long long res, long long domainname, long long len);
+void __sanitizer_syscall_pre_impl_compat_09_osetdomainname(long long domainname,
+ long long len);
+void __sanitizer_syscall_post_impl_compat_09_osetdomainname(
+ long long res, long long domainname, long long len);
+void __sanitizer_syscall_pre_impl_compat_09_ouname(long long name);
+void __sanitizer_syscall_post_impl_compat_09_ouname(long long res,
+ long long name);
+void __sanitizer_syscall_pre_impl_sysarch(long long op, long long parms);
+void __sanitizer_syscall_post_impl_sysarch(long long res, long long op,
+ long long parms);
+/* syscall 166 has been skipped */
+/* syscall 167 has been skipped */
+/* syscall 168 has been skipped */
+#if !defined(_LP64)
+void __sanitizer_syscall_pre_impl_compat_10_osemsys(long long which,
+ long long a2, long long a3,
+ long long a4, long long a5);
+void __sanitizer_syscall_post_impl_compat_10_osemsys(long long res,
+ long long which,
+ long long a2, long long a3,
+ long long a4,
+ long long a5);
+#else
+/* syscall 169 has been skipped */
+#endif
+#if !defined(_LP64)
+void __sanitizer_syscall_pre_impl_compat_10_omsgsys(long long which,
+ long long a2, long long a3,
+ long long a4, long long a5,
+ long long a6);
+void __sanitizer_syscall_post_impl_compat_10_omsgsys(long long res,
+ long long which,
+ long long a2, long long a3,
+ long long a4, long long a5,
+ long long a6);
+#else
+/* syscall 170 has been skipped */
+#endif
+#if !defined(_LP64)
+void __sanitizer_syscall_pre_impl_compat_10_oshmsys(long long which,
+ long long a2, long long a3,
+ long long a4);
+void __sanitizer_syscall_post_impl_compat_10_oshmsys(long long res,
+ long long which,
+ long long a2, long long a3,
+ long long a4);
+#else
+/* syscall 171 has been skipped */
+#endif
+/* syscall 172 has been skipped */
+void __sanitizer_syscall_pre_impl_pread(long long fd, long long buf,
+ long long nbyte, long long PAD,
+ long long offset);
+void __sanitizer_syscall_post_impl_pread(long long res, long long fd,
+ long long buf, long long nbyte,
+ long long PAD, long long offset);
+void __sanitizer_syscall_pre_impl_pwrite(long long fd, long long buf,
+ long long nbyte, long long PAD,
+ long long offset);
+void __sanitizer_syscall_post_impl_pwrite(long long res, long long fd,
+ long long buf, long long nbyte,
+ long long PAD, long long offset);
+void __sanitizer_syscall_pre_impl_compat_30_ntp_gettime(long long ntvp);
+void __sanitizer_syscall_post_impl_compat_30_ntp_gettime(long long res,
+ long long ntvp);
+#if defined(NTP) || !defined(_KERNEL_OPT)
+void __sanitizer_syscall_pre_impl_ntp_adjtime(long long tp);
+void __sanitizer_syscall_post_impl_ntp_adjtime(long long res, long long tp);
+#else
+/* syscall 176 has been skipped */
+#endif
+/* syscall 177 has been skipped */
+/* syscall 178 has been skipped */
+/* syscall 179 has been skipped */
+/* syscall 180 has been skipped */
+void __sanitizer_syscall_pre_impl_setgid(long long gid);
+void __sanitizer_syscall_post_impl_setgid(long long res, long long gid);
+void __sanitizer_syscall_pre_impl_setegid(long long egid);
+void __sanitizer_syscall_post_impl_setegid(long long res, long long egid);
+void __sanitizer_syscall_pre_impl_seteuid(long long euid);
+void __sanitizer_syscall_post_impl_seteuid(long long res, long long euid);
+void __sanitizer_syscall_pre_impl_lfs_bmapv(long long fsidp, long long blkiov,
+ long long blkcnt);
+void __sanitizer_syscall_post_impl_lfs_bmapv(long long res, long long fsidp,
+ long long blkiov,
+ long long blkcnt);
+void __sanitizer_syscall_pre_impl_lfs_markv(long long fsidp, long long blkiov,
+ long long blkcnt);
+void __sanitizer_syscall_post_impl_lfs_markv(long long res, long long fsidp,
+ long long blkiov,
+ long long blkcnt);
+void __sanitizer_syscall_pre_impl_lfs_segclean(long long fsidp,
+ long long segment);
+void __sanitizer_syscall_post_impl_lfs_segclean(long long res, long long fsidp,
+ long long segment);
+void __sanitizer_syscall_pre_impl_compat_50_lfs_segwait(long long fsidp,
+ long long tv);
+void __sanitizer_syscall_post_impl_compat_50_lfs_segwait(long long res,
+ long long fsidp,
+ long long tv);
+void __sanitizer_syscall_pre_impl_compat_12_stat12(long long path,
+ long long ub);
+void __sanitizer_syscall_post_impl_compat_12_stat12(long long res,
+ long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl_compat_12_fstat12(long long fd, long long sb);
+void __sanitizer_syscall_post_impl_compat_12_fstat12(long long res,
+ long long fd,
+ long long sb);
+void __sanitizer_syscall_pre_impl_compat_12_lstat12(long long path,
+ long long ub);
+void __sanitizer_syscall_post_impl_compat_12_lstat12(long long res,
+ long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl_pathconf(long long path, long long name);
+void __sanitizer_syscall_post_impl_pathconf(long long res, long long path,
+ long long name);
+void __sanitizer_syscall_pre_impl_fpathconf(long long fd, long long name);
+void __sanitizer_syscall_post_impl_fpathconf(long long res, long long fd,
+ long long name);
+void __sanitizer_syscall_pre_impl_getsockopt2(long long s, long long level,
+ long long name, long long val,
+ long long avalsize);
+void __sanitizer_syscall_post_impl_getsockopt2(long long res, long long s,
+ long long level, long long name,
+ long long val,
+ long long avalsize);
+void __sanitizer_syscall_pre_impl_getrlimit(long long which, long long rlp);
+void __sanitizer_syscall_post_impl_getrlimit(long long res, long long which,
+ long long rlp);
+void __sanitizer_syscall_pre_impl_setrlimit(long long which, long long rlp);
+void __sanitizer_syscall_post_impl_setrlimit(long long res, long long which,
+ long long rlp);
+void __sanitizer_syscall_pre_impl_compat_12_getdirentries(long long fd,
+ long long buf,
+ long long count,
+ long long basep);
+void __sanitizer_syscall_post_impl_compat_12_getdirentries(long long res,
+ long long fd,
+ long long buf,
+ long long count,
+ long long basep);
+void __sanitizer_syscall_pre_impl_mmap(long long addr, long long len,
+ long long prot, long long flags,
+ long long fd, long long PAD,
+ long long pos);
+void __sanitizer_syscall_post_impl_mmap(long long res, long long addr,
+ long long len, long long prot,
+ long long flags, long long fd,
+ long long PAD, long long pos);
+void __sanitizer_syscall_pre_impl___syscall(long long code, long long arg0,
+ long long arg1, long long arg2,
+ long long arg3, long long arg4,
+ long long arg5, long long arg6,
+ long long arg7);
+void __sanitizer_syscall_post_impl___syscall(long long res, long long code,
+ long long arg0, long long arg1,
+ long long arg2, long long arg3,
+ long long arg4, long long arg5,
+ long long arg6, long long arg7);
+void __sanitizer_syscall_pre_impl_lseek(long long fd, long long PAD,
+ long long offset, long long whence);
+void __sanitizer_syscall_post_impl_lseek(long long res, long long fd,
+ long long PAD, long long offset,
+ long long whence);
+void __sanitizer_syscall_pre_impl_truncate(long long path, long long PAD,
+ long long length);
+void __sanitizer_syscall_post_impl_truncate(long long res, long long path,
+ long long PAD, long long length);
+void __sanitizer_syscall_pre_impl_ftruncate(long long fd, long long PAD,
+ long long length);
+void __sanitizer_syscall_post_impl_ftruncate(long long res, long long fd,
+ long long PAD, long long length);
+void __sanitizer_syscall_pre_impl___sysctl(long long name, long long namelen,
+ long long oldv, long long oldlenp,
+ long long newv, long long newlen);
+void __sanitizer_syscall_post_impl___sysctl(long long res, long long name,
+ long long namelen, long long oldv,
+ long long oldlenp, long long newv,
+ long long newlen);
+void __sanitizer_syscall_pre_impl_mlock(long long addr, long long len);
+void __sanitizer_syscall_post_impl_mlock(long long res, long long addr,
+ long long len);
+void __sanitizer_syscall_pre_impl_munlock(long long addr, long long len);
+void __sanitizer_syscall_post_impl_munlock(long long res, long long addr,
+ long long len);
+void __sanitizer_syscall_pre_impl_undelete(long long path);
+void __sanitizer_syscall_post_impl_undelete(long long res, long long path);
+void __sanitizer_syscall_pre_impl_compat_50_futimes(long long fd,
+ long long tptr);
+void __sanitizer_syscall_post_impl_compat_50_futimes(long long res,
+ long long fd,
+ long long tptr);
+void __sanitizer_syscall_pre_impl_getpgid(long long pid);
+void __sanitizer_syscall_post_impl_getpgid(long long res, long long pid);
+void __sanitizer_syscall_pre_impl_reboot(long long opt, long long bootstr);
+void __sanitizer_syscall_post_impl_reboot(long long res, long long opt,
+ long long bootstr);
+void __sanitizer_syscall_pre_impl_poll(long long fds, long long nfds,
+ long long timeout);
+void __sanitizer_syscall_post_impl_poll(long long res, long long fds,
+ long long nfds, long long timeout);
+void __sanitizer_syscall_pre_impl_afssys(long long id, long long a1,
+ long long a2, long long a3,
+ long long a4, long long a5,
+ long long a6);
+void __sanitizer_syscall_post_impl_afssys(long long res, long long id,
+ long long a1, long long a2,
+ long long a3, long long a4,
+ long long a5, long long a6);
+/* syscall 211 has been skipped */
+/* syscall 212 has been skipped */
+/* syscall 213 has been skipped */
+/* syscall 214 has been skipped */
+/* syscall 215 has been skipped */
+/* syscall 216 has been skipped */
+/* syscall 217 has been skipped */
+/* syscall 218 has been skipped */
+/* syscall 219 has been skipped */
+void __sanitizer_syscall_pre_impl_compat_14___semctl(long long semid,
+ long long semnum,
+ long long cmd,
+ long long arg);
+void __sanitizer_syscall_post_impl_compat_14___semctl(long long res,
+ long long semid,
+ long long semnum,
+ long long cmd,
+ long long arg);
+void __sanitizer_syscall_pre_impl_semget(long long key, long long nsems,
+ long long semflg);
+void __sanitizer_syscall_post_impl_semget(long long res, long long key,
+ long long nsems, long long semflg);
+void __sanitizer_syscall_pre_impl_semop(long long semid, long long sops,
+ long long nsops);
+void __sanitizer_syscall_post_impl_semop(long long res, long long semid,
+ long long sops, long long nsops);
+void __sanitizer_syscall_pre_impl_semconfig(long long flag);
+void __sanitizer_syscall_post_impl_semconfig(long long res, long long flag);
+void __sanitizer_syscall_pre_impl_compat_14_msgctl(long long msqid,
+ long long cmd,
+ long long buf);
+void __sanitizer_syscall_post_impl_compat_14_msgctl(long long res,
+ long long msqid,
+ long long cmd,
+ long long buf);
+void __sanitizer_syscall_pre_impl_msgget(long long key, long long msgflg);
+void __sanitizer_syscall_post_impl_msgget(long long res, long long key,
+ long long msgflg);
+void __sanitizer_syscall_pre_impl_msgsnd(long long msqid, long long msgp,
+ long long msgsz, long long msgflg);
+void __sanitizer_syscall_post_impl_msgsnd(long long res, long long msqid,
+ long long msgp, long long msgsz,
+ long long msgflg);
+void __sanitizer_syscall_pre_impl_msgrcv(long long msqid, long long msgp,
+ long long msgsz, long long msgtyp,
+ long long msgflg);
+void __sanitizer_syscall_post_impl_msgrcv(long long res, long long msqid,
+ long long msgp, long long msgsz,
+ long long msgtyp, long long msgflg);
+void __sanitizer_syscall_pre_impl_shmat(long long shmid, long long shmaddr,
+ long long shmflg);
+void __sanitizer_syscall_post_impl_shmat(long long res, long long shmid,
+ long long shmaddr, long long shmflg);
+void __sanitizer_syscall_pre_impl_compat_14_shmctl(long long shmid,
+ long long cmd,
+ long long buf);
+void __sanitizer_syscall_post_impl_compat_14_shmctl(long long res,
+ long long shmid,
+ long long cmd,
+ long long buf);
+void __sanitizer_syscall_pre_impl_shmdt(long long shmaddr);
+void __sanitizer_syscall_post_impl_shmdt(long long res, long long shmaddr);
+void __sanitizer_syscall_pre_impl_shmget(long long key, long long size,
+ long long shmflg);
+void __sanitizer_syscall_post_impl_shmget(long long res, long long key,
+ long long size, long long shmflg);
+void __sanitizer_syscall_pre_impl_compat_50_clock_gettime(long long clock_id,
+ long long tp);
+void __sanitizer_syscall_post_impl_compat_50_clock_gettime(long long res,
+ long long clock_id,
+ long long tp);
+void __sanitizer_syscall_pre_impl_compat_50_clock_settime(long long clock_id,
+ long long tp);
+void __sanitizer_syscall_post_impl_compat_50_clock_settime(long long res,
+ long long clock_id,
+ long long tp);
+void __sanitizer_syscall_pre_impl_compat_50_clock_getres(long long clock_id,
+ long long tp);
+void __sanitizer_syscall_post_impl_compat_50_clock_getres(long long res,
+ long long clock_id,
+ long long tp);
+void __sanitizer_syscall_pre_impl_timer_create(long long clock_id,
+ long long evp,
+ long long timerid);
+void __sanitizer_syscall_post_impl_timer_create(long long res,
+ long long clock_id,
+ long long evp,
+ long long timerid);
+void __sanitizer_syscall_pre_impl_timer_delete(long long timerid);
+void __sanitizer_syscall_post_impl_timer_delete(long long res,
+ long long timerid);
+void __sanitizer_syscall_pre_impl_compat_50_timer_settime(long long timerid,
+ long long flags,
+ long long value,
+ long long ovalue);
+void __sanitizer_syscall_post_impl_compat_50_timer_settime(long long res,
+ long long timerid,
+ long long flags,
+ long long value,
+ long long ovalue);
+void __sanitizer_syscall_pre_impl_compat_50_timer_gettime(long long timerid,
+ long long value);
+void __sanitizer_syscall_post_impl_compat_50_timer_gettime(long long res,
+ long long timerid,
+ long long value);
+void __sanitizer_syscall_pre_impl_timer_getoverrun(long long timerid);
+void __sanitizer_syscall_post_impl_timer_getoverrun(long long res,
+ long long timerid);
+void __sanitizer_syscall_pre_impl_compat_50_nanosleep(long long rqtp,
+ long long rmtp);
+void __sanitizer_syscall_post_impl_compat_50_nanosleep(long long res,
+ long long rqtp,
+ long long rmtp);
+void __sanitizer_syscall_pre_impl_fdatasync(long long fd);
+void __sanitizer_syscall_post_impl_fdatasync(long long res, long long fd);
+void __sanitizer_syscall_pre_impl_mlockall(long long flags);
+void __sanitizer_syscall_post_impl_mlockall(long long res, long long flags);
+void __sanitizer_syscall_pre_impl_munlockall(void);
+void __sanitizer_syscall_post_impl_munlockall(long long res);
+void __sanitizer_syscall_pre_impl_compat_50___sigtimedwait(long long set,
+ long long info,
+ long long timeout);
+void __sanitizer_syscall_post_impl_compat_50___sigtimedwait(long long res,
+ long long set,
+ long long info,
+ long long timeout);
+void __sanitizer_syscall_pre_impl_sigqueueinfo(long long pid, long long info);
+void __sanitizer_syscall_post_impl_sigqueueinfo(long long res, long long pid,
+ long long info);
+void __sanitizer_syscall_pre_impl_modctl(long long cmd, long long arg);
+void __sanitizer_syscall_post_impl_modctl(long long res, long long cmd,
+ long long arg);
+void __sanitizer_syscall_pre_impl__ksem_init(long long value, long long idp);
+void __sanitizer_syscall_post_impl__ksem_init(long long res, long long value,
+ long long idp);
+void __sanitizer_syscall_pre_impl__ksem_open(long long name, long long oflag,
+ long long mode, long long value,
+ long long idp);
+void __sanitizer_syscall_post_impl__ksem_open(long long res, long long name,
+ long long oflag, long long mode,
+ long long value, long long idp);
+void __sanitizer_syscall_pre_impl__ksem_unlink(long long name);
+void __sanitizer_syscall_post_impl__ksem_unlink(long long res, long long name);
+void __sanitizer_syscall_pre_impl__ksem_close(long long id);
+void __sanitizer_syscall_post_impl__ksem_close(long long res, long long id);
+void __sanitizer_syscall_pre_impl__ksem_post(long long id);
+void __sanitizer_syscall_post_impl__ksem_post(long long res, long long id);
+void __sanitizer_syscall_pre_impl__ksem_wait(long long id);
+void __sanitizer_syscall_post_impl__ksem_wait(long long res, long long id);
+void __sanitizer_syscall_pre_impl__ksem_trywait(long long id);
+void __sanitizer_syscall_post_impl__ksem_trywait(long long res, long long id);
+void __sanitizer_syscall_pre_impl__ksem_getvalue(long long id, long long value);
+void __sanitizer_syscall_post_impl__ksem_getvalue(long long res, long long id,
+ long long value);
+void __sanitizer_syscall_pre_impl__ksem_destroy(long long id);
+void __sanitizer_syscall_post_impl__ksem_destroy(long long res, long long id);
+void __sanitizer_syscall_pre_impl__ksem_timedwait(long long id,
+ long long abstime);
+void __sanitizer_syscall_post_impl__ksem_timedwait(long long res, long long id,
+ long long abstime);
+void __sanitizer_syscall_pre_impl_mq_open(long long name, long long oflag,
+ long long mode, long long attr);
+void __sanitizer_syscall_post_impl_mq_open(long long res, long long name,
+ long long oflag, long long mode,
+ long long attr);
+void __sanitizer_syscall_pre_impl_mq_close(long long mqdes);
+void __sanitizer_syscall_post_impl_mq_close(long long res, long long mqdes);
+void __sanitizer_syscall_pre_impl_mq_unlink(long long name);
+void __sanitizer_syscall_post_impl_mq_unlink(long long res, long long name);
+void __sanitizer_syscall_pre_impl_mq_getattr(long long mqdes, long long mqstat);
+void __sanitizer_syscall_post_impl_mq_getattr(long long res, long long mqdes,
+ long long mqstat);
+void __sanitizer_syscall_pre_impl_mq_setattr(long long mqdes, long long mqstat,
+ long long omqstat);
+void __sanitizer_syscall_post_impl_mq_setattr(long long res, long long mqdes,
+ long long mqstat,
+ long long omqstat);
+void __sanitizer_syscall_pre_impl_mq_notify(long long mqdes,
+ long long notification);
+void __sanitizer_syscall_post_impl_mq_notify(long long res, long long mqdes,
+ long long notification);
+void __sanitizer_syscall_pre_impl_mq_send(long long mqdes, long long msg_ptr,
+ long long msg_len,
+ long long msg_prio);
+void __sanitizer_syscall_post_impl_mq_send(long long res, long long mqdes,
+ long long msg_ptr, long long msg_len,
+ long long msg_prio);
+void __sanitizer_syscall_pre_impl_mq_receive(long long mqdes, long long msg_ptr,
+ long long msg_len,
+ long long msg_prio);
+void __sanitizer_syscall_post_impl_mq_receive(long long res, long long mqdes,
+ long long msg_ptr,
+ long long msg_len,
+ long long msg_prio);
+void __sanitizer_syscall_pre_impl_compat_50_mq_timedsend(long long mqdes,
+ long long msg_ptr,
+ long long msg_len,
+ long long msg_prio,
+ long long abs_timeout);
+void __sanitizer_syscall_post_impl_compat_50_mq_timedsend(
+ long long res, long long mqdes, long long msg_ptr, long long msg_len,
+ long long msg_prio, long long abs_timeout);
+void __sanitizer_syscall_pre_impl_compat_50_mq_timedreceive(
+ long long mqdes, long long msg_ptr, long long msg_len, long long msg_prio,
+ long long abs_timeout);
+void __sanitizer_syscall_post_impl_compat_50_mq_timedreceive(
+ long long res, long long mqdes, long long msg_ptr, long long msg_len,
+ long long msg_prio, long long abs_timeout);
+/* syscall 267 has been skipped */
+/* syscall 268 has been skipped */
+/* syscall 269 has been skipped */
+void __sanitizer_syscall_pre_impl___posix_rename(long long from, long long to);
+void __sanitizer_syscall_post_impl___posix_rename(long long res, long long from,
+ long long to);
+void __sanitizer_syscall_pre_impl_swapctl(long long cmd, long long arg,
+ long long misc);
+void __sanitizer_syscall_post_impl_swapctl(long long res, long long cmd,
+ long long arg, long long misc);
+void __sanitizer_syscall_pre_impl_compat_30_getdents(long long fd,
+ long long buf,
+ long long count);
+void __sanitizer_syscall_post_impl_compat_30_getdents(long long res,
+ long long fd,
+ long long buf,
+ long long count);
+void __sanitizer_syscall_pre_impl_minherit(long long addr, long long len,
+ long long inherit);
+void __sanitizer_syscall_post_impl_minherit(long long res, long long addr,
+ long long len, long long inherit);
+void __sanitizer_syscall_pre_impl_lchmod(long long path, long long mode);
+void __sanitizer_syscall_post_impl_lchmod(long long res, long long path,
+ long long mode);
+void __sanitizer_syscall_pre_impl_lchown(long long path, long long uid,
+ long long gid);
+void __sanitizer_syscall_post_impl_lchown(long long res, long long path,
+ long long uid, long long gid);
+void __sanitizer_syscall_pre_impl_compat_50_lutimes(long long path,
+ long long tptr);
+void __sanitizer_syscall_post_impl_compat_50_lutimes(long long res,
+ long long path,
+ long long tptr);
+void __sanitizer_syscall_pre_impl___msync13(long long addr, long long len,
+ long long flags);
+void __sanitizer_syscall_post_impl___msync13(long long res, long long addr,
+ long long len, long long flags);
+void __sanitizer_syscall_pre_impl_compat_30___stat13(long long path,
+ long long ub);
+void __sanitizer_syscall_post_impl_compat_30___stat13(long long res,
+ long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl_compat_30___fstat13(long long fd,
+ long long sb);
+void __sanitizer_syscall_post_impl_compat_30___fstat13(long long res,
+ long long fd,
+ long long sb);
+void __sanitizer_syscall_pre_impl_compat_30___lstat13(long long path,
+ long long ub);
+void __sanitizer_syscall_post_impl_compat_30___lstat13(long long res,
+ long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl___sigaltstack14(long long nss, long long oss);
+void __sanitizer_syscall_post_impl___sigaltstack14(long long res, long long nss,
+ long long oss);
+void __sanitizer_syscall_pre_impl___vfork14(void);
+void __sanitizer_syscall_post_impl___vfork14(long long res);
+void __sanitizer_syscall_pre_impl___posix_chown(long long path, long long uid,
+ long long gid);
+void __sanitizer_syscall_post_impl___posix_chown(long long res, long long path,
+ long long uid, long long gid);
+void __sanitizer_syscall_pre_impl___posix_fchown(long long fd, long long uid,
+ long long gid);
+void __sanitizer_syscall_post_impl___posix_fchown(long long res, long long fd,
+ long long uid, long long gid);
+void __sanitizer_syscall_pre_impl___posix_lchown(long long path, long long uid,
+ long long gid);
+void __sanitizer_syscall_post_impl___posix_lchown(long long res, long long path,
+ long long uid, long long gid);
+void __sanitizer_syscall_pre_impl_getsid(long long pid);
+void __sanitizer_syscall_post_impl_getsid(long long res, long long pid);
+void __sanitizer_syscall_pre_impl___clone(long long flags, long long stack);
+void __sanitizer_syscall_post_impl___clone(long long res, long long flags,
+ long long stack);
+void __sanitizer_syscall_pre_impl_fktrace(long long fd, long long ops,
+ long long facs, long long pid);
+void __sanitizer_syscall_post_impl_fktrace(long long res, long long fd,
+ long long ops, long long facs,
+ long long pid);
+void __sanitizer_syscall_pre_impl_preadv(long long fd, long long iovp,
+ long long iovcnt, long long PAD,
+ long long offset);
+void __sanitizer_syscall_post_impl_preadv(long long res, long long fd,
+ long long iovp, long long iovcnt,
+ long long PAD, long long offset);
+void __sanitizer_syscall_pre_impl_pwritev(long long fd, long long iovp,
+ long long iovcnt, long long PAD,
+ long long offset);
+void __sanitizer_syscall_post_impl_pwritev(long long res, long long fd,
+ long long iovp, long long iovcnt,
+ long long PAD, long long offset);
+void __sanitizer_syscall_pre_impl_compat_16___sigaction14(long long signum,
+ long long nsa,
+ long long osa);
+void __sanitizer_syscall_post_impl_compat_16___sigaction14(long long res,
+ long long signum,
+ long long nsa,
+ long long osa);
+void __sanitizer_syscall_pre_impl___sigpending14(long long set);
+void __sanitizer_syscall_post_impl___sigpending14(long long res, long long set);
+void __sanitizer_syscall_pre_impl___sigprocmask14(long long how, long long set,
+ long long oset);
+void __sanitizer_syscall_post_impl___sigprocmask14(long long res, long long how,
+ long long set,
+ long long oset);
+void __sanitizer_syscall_pre_impl___sigsuspend14(long long set);
+void __sanitizer_syscall_post_impl___sigsuspend14(long long res, long long set);
+void __sanitizer_syscall_pre_impl_compat_16___sigreturn14(long long sigcntxp);
+void __sanitizer_syscall_post_impl_compat_16___sigreturn14(long long res,
+ long long sigcntxp);
+void __sanitizer_syscall_pre_impl___getcwd(long long bufp, long long length);
+void __sanitizer_syscall_post_impl___getcwd(long long res, long long bufp,
+ long long length);
+void __sanitizer_syscall_pre_impl_fchroot(long long fd);
+void __sanitizer_syscall_post_impl_fchroot(long long res, long long fd);
+void __sanitizer_syscall_pre_impl_compat_30_fhopen(long long fhp,
+ long long flags);
+void __sanitizer_syscall_post_impl_compat_30_fhopen(long long res,
+ long long fhp,
+ long long flags);
+void __sanitizer_syscall_pre_impl_compat_30_fhstat(long long fhp, long long sb);
+void __sanitizer_syscall_post_impl_compat_30_fhstat(long long res,
+ long long fhp,
+ long long sb);
+void __sanitizer_syscall_pre_impl_compat_20_fhstatfs(long long fhp,
+ long long buf);
+void __sanitizer_syscall_post_impl_compat_20_fhstatfs(long long res,
+ long long fhp,
+ long long buf);
+void __sanitizer_syscall_pre_impl_compat_50_____semctl13(long long semid,
+ long long semnum,
+ long long cmd,
+ long long arg);
+void __sanitizer_syscall_post_impl_compat_50_____semctl13(long long res,
+ long long semid,
+ long long semnum,
+ long long cmd,
+ long long arg);
+void __sanitizer_syscall_pre_impl_compat_50___msgctl13(long long msqid,
+ long long cmd,
+ long long buf);
+void __sanitizer_syscall_post_impl_compat_50___msgctl13(long long res,
+ long long msqid,
+ long long cmd,
+ long long buf);
+void __sanitizer_syscall_pre_impl_compat_50___shmctl13(long long shmid,
+ long long cmd,
+ long long buf);
+void __sanitizer_syscall_post_impl_compat_50___shmctl13(long long res,
+ long long shmid,
+ long long cmd,
+ long long buf);
+void __sanitizer_syscall_pre_impl_lchflags(long long path, long long flags);
+void __sanitizer_syscall_post_impl_lchflags(long long res, long long path,
+ long long flags);
+void __sanitizer_syscall_pre_impl_issetugid(void);
+void __sanitizer_syscall_post_impl_issetugid(long long res);
+void __sanitizer_syscall_pre_impl_utrace(long long label, long long addr,
+ long long len);
+void __sanitizer_syscall_post_impl_utrace(long long res, long long label,
+ long long addr, long long len);
+void __sanitizer_syscall_pre_impl_getcontext(long long ucp);
+void __sanitizer_syscall_post_impl_getcontext(long long res, long long ucp);
+void __sanitizer_syscall_pre_impl_setcontext(long long ucp);
+void __sanitizer_syscall_post_impl_setcontext(long long res, long long ucp);
+void __sanitizer_syscall_pre_impl__lwp_create(long long ucp, long long flags,
+ long long new_lwp);
+void __sanitizer_syscall_post_impl__lwp_create(long long res, long long ucp,
+ long long flags,
+ long long new_lwp);
+void __sanitizer_syscall_pre_impl__lwp_exit(void);
+void __sanitizer_syscall_post_impl__lwp_exit(long long res);
+void __sanitizer_syscall_pre_impl__lwp_self(void);
+void __sanitizer_syscall_post_impl__lwp_self(long long res);
+void __sanitizer_syscall_pre_impl__lwp_wait(long long wait_for,
+ long long departed);
+void __sanitizer_syscall_post_impl__lwp_wait(long long res, long long wait_for,
+ long long departed);
+void __sanitizer_syscall_pre_impl__lwp_suspend(long long target);
+void __sanitizer_syscall_post_impl__lwp_suspend(long long res,
+ long long target);
+void __sanitizer_syscall_pre_impl__lwp_continue(long long target);
+void __sanitizer_syscall_post_impl__lwp_continue(long long res,
+ long long target);
+void __sanitizer_syscall_pre_impl__lwp_wakeup(long long target);
+void __sanitizer_syscall_post_impl__lwp_wakeup(long long res, long long target);
+void __sanitizer_syscall_pre_impl__lwp_getprivate(void);
+void __sanitizer_syscall_post_impl__lwp_getprivate(long long res);
+void __sanitizer_syscall_pre_impl__lwp_setprivate(long long ptr);
+void __sanitizer_syscall_post_impl__lwp_setprivate(long long res,
+ long long ptr);
+void __sanitizer_syscall_pre_impl__lwp_kill(long long target, long long signo);
+void __sanitizer_syscall_post_impl__lwp_kill(long long res, long long target,
+ long long signo);
+void __sanitizer_syscall_pre_impl__lwp_detach(long long target);
+void __sanitizer_syscall_post_impl__lwp_detach(long long res, long long target);
+void __sanitizer_syscall_pre_impl_compat_50__lwp_park(long long ts,
+ long long unpark,
+ long long hint,
+ long long unparkhint);
+void __sanitizer_syscall_post_impl_compat_50__lwp_park(long long res,
+ long long ts,
+ long long unpark,
+ long long hint,
+ long long unparkhint);
+void __sanitizer_syscall_pre_impl__lwp_unpark(long long target, long long hint);
+void __sanitizer_syscall_post_impl__lwp_unpark(long long res, long long target,
+ long long hint);
+void __sanitizer_syscall_pre_impl__lwp_unpark_all(long long targets,
+ long long ntargets,
+ long long hint);
+void __sanitizer_syscall_post_impl__lwp_unpark_all(long long res,
+ long long targets,
+ long long ntargets,
+ long long hint);
+void __sanitizer_syscall_pre_impl__lwp_setname(long long target,
+ long long name);
+void __sanitizer_syscall_post_impl__lwp_setname(long long res, long long target,
+ long long name);
+void __sanitizer_syscall_pre_impl__lwp_getname(long long target, long long name,
+ long long len);
+void __sanitizer_syscall_post_impl__lwp_getname(long long res, long long target,
+ long long name, long long len);
+void __sanitizer_syscall_pre_impl__lwp_ctl(long long features,
+ long long address);
+void __sanitizer_syscall_post_impl__lwp_ctl(long long res, long long features,
+ long long address);
+/* syscall 326 has been skipped */
+/* syscall 327 has been skipped */
+/* syscall 328 has been skipped */
+/* syscall 329 has been skipped */
+void __sanitizer_syscall_pre_impl_compat_60_sa_register(
+ long long newv, long long oldv, long long flags,
+ long long stackinfo_offset);
+void __sanitizer_syscall_post_impl_compat_60_sa_register(
+ long long res, long long newv, long long oldv, long long flags,
+ long long stackinfo_offset);
+void __sanitizer_syscall_pre_impl_compat_60_sa_stacks(long long num,
+ long long stacks);
+void __sanitizer_syscall_post_impl_compat_60_sa_stacks(long long res,
+ long long num,
+ long long stacks);
+void __sanitizer_syscall_pre_impl_compat_60_sa_enable(void);
+void __sanitizer_syscall_post_impl_compat_60_sa_enable(long long res);
+void __sanitizer_syscall_pre_impl_compat_60_sa_setconcurrency(
+ long long concurrency);
+void __sanitizer_syscall_post_impl_compat_60_sa_setconcurrency(
+ long long res, long long concurrency);
+void __sanitizer_syscall_pre_impl_compat_60_sa_yield(void);
+void __sanitizer_syscall_post_impl_compat_60_sa_yield(long long res);
+void __sanitizer_syscall_pre_impl_compat_60_sa_preempt(long long sa_id);
+void __sanitizer_syscall_post_impl_compat_60_sa_preempt(long long res,
+ long long sa_id);
+/* syscall 336 has been skipped */
+/* syscall 337 has been skipped */
+/* syscall 338 has been skipped */
+/* syscall 339 has been skipped */
+void __sanitizer_syscall_pre_impl___sigaction_sigtramp(long long signum,
+ long long nsa,
+ long long osa,
+ long long tramp,
+ long long vers);
+void __sanitizer_syscall_post_impl___sigaction_sigtramp(
+ long long res, long long signum, long long nsa, long long osa,
+ long long tramp, long long vers);
+/* syscall 341 has been skipped */
+/* syscall 342 has been skipped */
+void __sanitizer_syscall_pre_impl_rasctl(long long addr, long long len,
+ long long op);
+void __sanitizer_syscall_post_impl_rasctl(long long res, long long addr,
+ long long len, long long op);
+void __sanitizer_syscall_pre_impl_kqueue(void);
+void __sanitizer_syscall_post_impl_kqueue(long long res);
+void __sanitizer_syscall_pre_impl_compat_50_kevent(
+ long long fd, long long changelist, long long nchanges, long long eventlist,
+ long long nevents, long long timeout);
+void __sanitizer_syscall_post_impl_compat_50_kevent(
+ long long res, long long fd, long long changelist, long long nchanges,
+ long long eventlist, long long nevents, long long timeout);
+void __sanitizer_syscall_pre_impl__sched_setparam(long long pid, long long lid,
+ long long policy,
+ long long params);
+void __sanitizer_syscall_post_impl__sched_setparam(long long res, long long pid,
+ long long lid,
+ long long policy,
+ long long params);
+void __sanitizer_syscall_pre_impl__sched_getparam(long long pid, long long lid,
+ long long policy,
+ long long params);
+void __sanitizer_syscall_post_impl__sched_getparam(long long res, long long pid,
+ long long lid,
+ long long policy,
+ long long params);
+void __sanitizer_syscall_pre_impl__sched_setaffinity(long long pid,
+ long long lid,
+ long long size,
+ long long cpuset);
+void __sanitizer_syscall_post_impl__sched_setaffinity(long long res,
+ long long pid,
+ long long lid,
+ long long size,
+ long long cpuset);
+void __sanitizer_syscall_pre_impl__sched_getaffinity(long long pid,
+ long long lid,
+ long long size,
+ long long cpuset);
+void __sanitizer_syscall_post_impl__sched_getaffinity(long long res,
+ long long pid,
+ long long lid,
+ long long size,
+ long long cpuset);
+void __sanitizer_syscall_pre_impl_sched_yield(void);
+void __sanitizer_syscall_post_impl_sched_yield(long long res);
+void __sanitizer_syscall_pre_impl__sched_protect(long long priority);
+void __sanitizer_syscall_post_impl__sched_protect(long long res,
+ long long priority);
+/* syscall 352 has been skipped */
+/* syscall 353 has been skipped */
+void __sanitizer_syscall_pre_impl_fsync_range(long long fd, long long flags,
+ long long start,
+ long long length);
+void __sanitizer_syscall_post_impl_fsync_range(long long res, long long fd,
+ long long flags, long long start,
+ long long length);
+void __sanitizer_syscall_pre_impl_uuidgen(long long store, long long count);
+void __sanitizer_syscall_post_impl_uuidgen(long long res, long long store,
+ long long count);
+void __sanitizer_syscall_pre_impl_getvfsstat(long long buf, long long bufsize,
+ long long flags);
+void __sanitizer_syscall_post_impl_getvfsstat(long long res, long long buf,
+ long long bufsize,
+ long long flags);
+void __sanitizer_syscall_pre_impl_statvfs1(long long path, long long buf,
+ long long flags);
+void __sanitizer_syscall_post_impl_statvfs1(long long res, long long path,
+ long long buf, long long flags);
+void __sanitizer_syscall_pre_impl_fstatvfs1(long long fd, long long buf,
+ long long flags);
+void __sanitizer_syscall_post_impl_fstatvfs1(long long res, long long fd,
+ long long buf, long long flags);
+void __sanitizer_syscall_pre_impl_compat_30_fhstatvfs1(long long fhp,
+ long long buf,
+ long long flags);
+void __sanitizer_syscall_post_impl_compat_30_fhstatvfs1(long long res,
+ long long fhp,
+ long long buf,
+ long long flags);
+void __sanitizer_syscall_pre_impl_extattrctl(long long path, long long cmd,
+ long long filename,
+ long long attrnamespace,
+ long long attrname);
+void __sanitizer_syscall_post_impl_extattrctl(long long res, long long path,
+ long long cmd, long long filename,
+ long long attrnamespace,
+ long long attrname);
+void __sanitizer_syscall_pre_impl_extattr_set_file(long long path,
+ long long attrnamespace,
+ long long attrname,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_post_impl_extattr_set_file(
+ long long res, long long path, long long attrnamespace, long long attrname,
+ long long data, long long nbytes);
+void __sanitizer_syscall_pre_impl_extattr_get_file(long long path,
+ long long attrnamespace,
+ long long attrname,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_post_impl_extattr_get_file(
+ long long res, long long path, long long attrnamespace, long long attrname,
+ long long data, long long nbytes);
+void __sanitizer_syscall_pre_impl_extattr_delete_file(long long path,
+ long long attrnamespace,
+ long long attrname);
+void __sanitizer_syscall_post_impl_extattr_delete_file(long long res,
+ long long path,
+ long long attrnamespace,
+ long long attrname);
+void __sanitizer_syscall_pre_impl_extattr_set_fd(long long fd,
+ long long attrnamespace,
+ long long attrname,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_post_impl_extattr_set_fd(long long res, long long fd,
+ long long attrnamespace,
+ long long attrname,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_pre_impl_extattr_get_fd(long long fd,
+ long long attrnamespace,
+ long long attrname,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_post_impl_extattr_get_fd(long long res, long long fd,
+ long long attrnamespace,
+ long long attrname,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_pre_impl_extattr_delete_fd(long long fd,
+ long long attrnamespace,
+ long long attrname);
+void __sanitizer_syscall_post_impl_extattr_delete_fd(long long res,
+ long long fd,
+ long long attrnamespace,
+ long long attrname);
+void __sanitizer_syscall_pre_impl_extattr_set_link(long long path,
+ long long attrnamespace,
+ long long attrname,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_post_impl_extattr_set_link(
+ long long res, long long path, long long attrnamespace, long long attrname,
+ long long data, long long nbytes);
+void __sanitizer_syscall_pre_impl_extattr_get_link(long long path,
+ long long attrnamespace,
+ long long attrname,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_post_impl_extattr_get_link(
+ long long res, long long path, long long attrnamespace, long long attrname,
+ long long data, long long nbytes);
+void __sanitizer_syscall_pre_impl_extattr_delete_link(long long path,
+ long long attrnamespace,
+ long long attrname);
+void __sanitizer_syscall_post_impl_extattr_delete_link(long long res,
+ long long path,
+ long long attrnamespace,
+ long long attrname);
+void __sanitizer_syscall_pre_impl_extattr_list_fd(long long fd,
+ long long attrnamespace,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_post_impl_extattr_list_fd(long long res, long long fd,
+ long long attrnamespace,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_pre_impl_extattr_list_file(long long path,
+ long long attrnamespace,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_post_impl_extattr_list_file(long long res,
+ long long path,
+ long long attrnamespace,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_pre_impl_extattr_list_link(long long path,
+ long long attrnamespace,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_post_impl_extattr_list_link(long long res,
+ long long path,
+ long long attrnamespace,
+ long long data,
+ long long nbytes);
+void __sanitizer_syscall_pre_impl_compat_50_pselect(long long nd, long long in,
+ long long ou, long long ex,
+ long long ts,
+ long long mask);
+void __sanitizer_syscall_post_impl_compat_50_pselect(long long res,
+ long long nd, long long in,
+ long long ou, long long ex,
+ long long ts,
+ long long mask);
+void __sanitizer_syscall_pre_impl_compat_50_pollts(long long fds,
+ long long nfds, long long ts,
+ long long mask);
+void __sanitizer_syscall_post_impl_compat_50_pollts(
+ long long res, long long fds, long long nfds, long long ts, long long mask);
+void __sanitizer_syscall_pre_impl_setxattr(long long path, long long name,
+ long long value, long long size,
+ long long flags);
+void __sanitizer_syscall_post_impl_setxattr(long long res, long long path,
+ long long name, long long value,
+ long long size, long long flags);
+void __sanitizer_syscall_pre_impl_lsetxattr(long long path, long long name,
+ long long value, long long size,
+ long long flags);
+void __sanitizer_syscall_post_impl_lsetxattr(long long res, long long path,
+ long long name, long long value,
+ long long size, long long flags);
+void __sanitizer_syscall_pre_impl_fsetxattr(long long fd, long long name,
+ long long value, long long size,
+ long long flags);
+void __sanitizer_syscall_post_impl_fsetxattr(long long res, long long fd,
+ long long name, long long value,
+ long long size, long long flags);
+void __sanitizer_syscall_pre_impl_getxattr(long long path, long long name,
+ long long value, long long size);
+void __sanitizer_syscall_post_impl_getxattr(long long res, long long path,
+ long long name, long long value,
+ long long size);
+void __sanitizer_syscall_pre_impl_lgetxattr(long long path, long long name,
+ long long value, long long size);
+void __sanitizer_syscall_post_impl_lgetxattr(long long res, long long path,
+ long long name, long long value,
+ long long size);
+void __sanitizer_syscall_pre_impl_fgetxattr(long long fd, long long name,
+ long long value, long long size);
+void __sanitizer_syscall_post_impl_fgetxattr(long long res, long long fd,
+ long long name, long long value,
+ long long size);
+void __sanitizer_syscall_pre_impl_listxattr(long long path, long long list,
+ long long size);
+void __sanitizer_syscall_post_impl_listxattr(long long res, long long path,
+ long long list, long long size);
+void __sanitizer_syscall_pre_impl_llistxattr(long long path, long long list,
+ long long size);
+void __sanitizer_syscall_post_impl_llistxattr(long long res, long long path,
+ long long list, long long size);
+void __sanitizer_syscall_pre_impl_flistxattr(long long fd, long long list,
+ long long size);
+void __sanitizer_syscall_post_impl_flistxattr(long long res, long long fd,
+ long long list, long long size);
+void __sanitizer_syscall_pre_impl_removexattr(long long path, long long name);
+void __sanitizer_syscall_post_impl_removexattr(long long res, long long path,
+ long long name);
+void __sanitizer_syscall_pre_impl_lremovexattr(long long path, long long name);
+void __sanitizer_syscall_post_impl_lremovexattr(long long res, long long path,
+ long long name);
+void __sanitizer_syscall_pre_impl_fremovexattr(long long fd, long long name);
+void __sanitizer_syscall_post_impl_fremovexattr(long long res, long long fd,
+ long long name);
+void __sanitizer_syscall_pre_impl_compat_50___stat30(long long path,
+ long long ub);
+void __sanitizer_syscall_post_impl_compat_50___stat30(long long res,
+ long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl_compat_50___fstat30(long long fd,
+ long long sb);
+void __sanitizer_syscall_post_impl_compat_50___fstat30(long long res,
+ long long fd,
+ long long sb);
+void __sanitizer_syscall_pre_impl_compat_50___lstat30(long long path,
+ long long ub);
+void __sanitizer_syscall_post_impl_compat_50___lstat30(long long res,
+ long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl___getdents30(long long fd, long long buf,
+ long long count);
+void __sanitizer_syscall_post_impl___getdents30(long long res, long long fd,
+ long long buf, long long count);
+void __sanitizer_syscall_pre_impl_posix_fadvise(long long);
+void __sanitizer_syscall_post_impl_posix_fadvise(long long res, long long);
+void __sanitizer_syscall_pre_impl_compat_30___fhstat30(long long fhp,
+ long long sb);
+void __sanitizer_syscall_post_impl_compat_30___fhstat30(long long res,
+ long long fhp,
+ long long sb);
+void __sanitizer_syscall_pre_impl_compat_50___ntp_gettime30(long long ntvp);
+void __sanitizer_syscall_post_impl_compat_50___ntp_gettime30(long long res,
+ long long ntvp);
+void __sanitizer_syscall_pre_impl___socket30(long long domain, long long type,
+ long long protocol);
+void __sanitizer_syscall_post_impl___socket30(long long res, long long domain,
+ long long type,
+ long long protocol);
+void __sanitizer_syscall_pre_impl___getfh30(long long fname, long long fhp,
+ long long fh_size);
+void __sanitizer_syscall_post_impl___getfh30(long long res, long long fname,
+ long long fhp, long long fh_size);
+void __sanitizer_syscall_pre_impl___fhopen40(long long fhp, long long fh_size,
+ long long flags);
+void __sanitizer_syscall_post_impl___fhopen40(long long res, long long fhp,
+ long long fh_size,
+ long long flags);
+void __sanitizer_syscall_pre_impl___fhstatvfs140(long long fhp,
+ long long fh_size,
+ long long buf,
+ long long flags);
+void __sanitizer_syscall_post_impl___fhstatvfs140(long long res, long long fhp,
+ long long fh_size,
+ long long buf,
+ long long flags);
+void __sanitizer_syscall_pre_impl_compat_50___fhstat40(long long fhp,
+ long long fh_size,
+ long long sb);
+void __sanitizer_syscall_post_impl_compat_50___fhstat40(long long res,
+ long long fhp,
+ long long fh_size,
+ long long sb);
+void __sanitizer_syscall_pre_impl_aio_cancel(long long fildes,
+ long long aiocbp);
+void __sanitizer_syscall_post_impl_aio_cancel(long long res, long long fildes,
+ long long aiocbp);
+void __sanitizer_syscall_pre_impl_aio_error(long long aiocbp);
+void __sanitizer_syscall_post_impl_aio_error(long long res, long long aiocbp);
+void __sanitizer_syscall_pre_impl_aio_fsync(long long op, long long aiocbp);
+void __sanitizer_syscall_post_impl_aio_fsync(long long res, long long op,
+ long long aiocbp);
+void __sanitizer_syscall_pre_impl_aio_read(long long aiocbp);
+void __sanitizer_syscall_post_impl_aio_read(long long res, long long aiocbp);
+void __sanitizer_syscall_pre_impl_aio_return(long long aiocbp);
+void __sanitizer_syscall_post_impl_aio_return(long long res, long long aiocbp);
+void __sanitizer_syscall_pre_impl_compat_50_aio_suspend(long long list,
+ long long nent,
+ long long timeout);
+void __sanitizer_syscall_post_impl_compat_50_aio_suspend(long long res,
+ long long list,
+ long long nent,
+ long long timeout);
+void __sanitizer_syscall_pre_impl_aio_write(long long aiocbp);
+void __sanitizer_syscall_post_impl_aio_write(long long res, long long aiocbp);
+void __sanitizer_syscall_pre_impl_lio_listio(long long mode, long long list,
+ long long nent, long long sig);
+void __sanitizer_syscall_post_impl_lio_listio(long long res, long long mode,
+ long long list, long long nent,
+ long long sig);
+/* syscall 407 has been skipped */
+/* syscall 408 has been skipped */
+/* syscall 409 has been skipped */
+void __sanitizer_syscall_pre_impl___mount50(long long type, long long path,
+ long long flags, long long data,
+ long long data_len);
+void __sanitizer_syscall_post_impl___mount50(long long res, long long type,
+ long long path, long long flags,
+ long long data,
+ long long data_len);
+void __sanitizer_syscall_pre_impl_mremap(long long old_address,
+ long long old_size,
+ long long new_address,
+ long long new_size, long long flags);
+void __sanitizer_syscall_post_impl_mremap(long long res, long long old_address,
+ long long old_size,
+ long long new_address,
+ long long new_size, long long flags);
+void __sanitizer_syscall_pre_impl_pset_create(long long psid);
+void __sanitizer_syscall_post_impl_pset_create(long long res, long long psid);
+void __sanitizer_syscall_pre_impl_pset_destroy(long long psid);
+void __sanitizer_syscall_post_impl_pset_destroy(long long res, long long psid);
+void __sanitizer_syscall_pre_impl_pset_assign(long long psid, long long cpuid,
+ long long opsid);
+void __sanitizer_syscall_post_impl_pset_assign(long long res, long long psid,
+ long long cpuid,
+ long long opsid);
+void __sanitizer_syscall_pre_impl__pset_bind(long long idtype,
+ long long first_id,
+ long long second_id,
+ long long psid, long long opsid);
+void __sanitizer_syscall_post_impl__pset_bind(long long res, long long idtype,
+ long long first_id,
+ long long second_id,
+ long long psid, long long opsid);
+void __sanitizer_syscall_pre_impl___posix_fadvise50(long long fd, long long PAD,
+ long long offset,
+ long long len,
+ long long advice);
+void __sanitizer_syscall_post_impl___posix_fadvise50(
+ long long res, long long fd, long long PAD, long long offset, long long len,
+ long long advice);
+void __sanitizer_syscall_pre_impl___select50(long long nd, long long in,
+ long long ou, long long ex,
+ long long tv);
+void __sanitizer_syscall_post_impl___select50(long long res, long long nd,
+ long long in, long long ou,
+ long long ex, long long tv);
+void __sanitizer_syscall_pre_impl___gettimeofday50(long long tp, long long tzp);
+void __sanitizer_syscall_post_impl___gettimeofday50(long long res, long long tp,
+ long long tzp);
+void __sanitizer_syscall_pre_impl___settimeofday50(long long tv, long long tzp);
+void __sanitizer_syscall_post_impl___settimeofday50(long long res, long long tv,
+ long long tzp);
+void __sanitizer_syscall_pre_impl___utimes50(long long path, long long tptr);
+void __sanitizer_syscall_post_impl___utimes50(long long res, long long path,
+ long long tptr);
+void __sanitizer_syscall_pre_impl___adjtime50(long long delta,
+ long long olddelta);
+void __sanitizer_syscall_post_impl___adjtime50(long long res, long long delta,
+ long long olddelta);
+void __sanitizer_syscall_pre_impl___lfs_segwait50(long long fsidp,
+ long long tv);
+void __sanitizer_syscall_post_impl___lfs_segwait50(long long res,
+ long long fsidp,
+ long long tv);
+void __sanitizer_syscall_pre_impl___futimes50(long long fd, long long tptr);
+void __sanitizer_syscall_post_impl___futimes50(long long res, long long fd,
+ long long tptr);
+void __sanitizer_syscall_pre_impl___lutimes50(long long path, long long tptr);
+void __sanitizer_syscall_post_impl___lutimes50(long long res, long long path,
+ long long tptr);
+void __sanitizer_syscall_pre_impl___setitimer50(long long which, long long itv,
+ long long oitv);
+void __sanitizer_syscall_post_impl___setitimer50(long long res, long long which,
+ long long itv, long long oitv);
+void __sanitizer_syscall_pre_impl___getitimer50(long long which, long long itv);
+void __sanitizer_syscall_post_impl___getitimer50(long long res, long long which,
+ long long itv);
+void __sanitizer_syscall_pre_impl___clock_gettime50(long long clock_id,
+ long long tp);
+void __sanitizer_syscall_post_impl___clock_gettime50(long long res,
+ long long clock_id,
+ long long tp);
+void __sanitizer_syscall_pre_impl___clock_settime50(long long clock_id,
+ long long tp);
+void __sanitizer_syscall_post_impl___clock_settime50(long long res,
+ long long clock_id,
+ long long tp);
+void __sanitizer_syscall_pre_impl___clock_getres50(long long clock_id,
+ long long tp);
+void __sanitizer_syscall_post_impl___clock_getres50(long long res,
+ long long clock_id,
+ long long tp);
+void __sanitizer_syscall_pre_impl___nanosleep50(long long rqtp, long long rmtp);
+void __sanitizer_syscall_post_impl___nanosleep50(long long res, long long rqtp,
+ long long rmtp);
+void __sanitizer_syscall_pre_impl_____sigtimedwait50(long long set,
+ long long info,
+ long long timeout);
+void __sanitizer_syscall_post_impl_____sigtimedwait50(long long res,
+ long long set,
+ long long info,
+ long long timeout);
+void __sanitizer_syscall_pre_impl___mq_timedsend50(long long mqdes,
+ long long msg_ptr,
+ long long msg_len,
+ long long msg_prio,
+ long long abs_timeout);
+void __sanitizer_syscall_post_impl___mq_timedsend50(
+ long long res, long long mqdes, long long msg_ptr, long long msg_len,
+ long long msg_prio, long long abs_timeout);
+void __sanitizer_syscall_pre_impl___mq_timedreceive50(long long mqdes,
+ long long msg_ptr,
+ long long msg_len,
+ long long msg_prio,
+ long long abs_timeout);
+void __sanitizer_syscall_post_impl___mq_timedreceive50(
+ long long res, long long mqdes, long long msg_ptr, long long msg_len,
+ long long msg_prio, long long abs_timeout);
+void __sanitizer_syscall_pre_impl_compat_60__lwp_park(long long ts,
+ long long unpark,
+ long long hint,
+ long long unparkhint);
+void __sanitizer_syscall_post_impl_compat_60__lwp_park(long long res,
+ long long ts,
+ long long unpark,
+ long long hint,
+ long long unparkhint);
+void __sanitizer_syscall_pre_impl___kevent50(long long fd, long long changelist,
+ long long nchanges,
+ long long eventlist,
+ long long nevents,
+ long long timeout);
+void __sanitizer_syscall_post_impl___kevent50(
+ long long res, long long fd, long long changelist, long long nchanges,
+ long long eventlist, long long nevents, long long timeout);
+void __sanitizer_syscall_pre_impl___pselect50(long long nd, long long in,
+ long long ou, long long ex,
+ long long ts, long long mask);
+void __sanitizer_syscall_post_impl___pselect50(long long res, long long nd,
+ long long in, long long ou,
+ long long ex, long long ts,
+ long long mask);
+void __sanitizer_syscall_pre_impl___pollts50(long long fds, long long nfds,
+ long long ts, long long mask);
+void __sanitizer_syscall_post_impl___pollts50(long long res, long long fds,
+ long long nfds, long long ts,
+ long long mask);
+void __sanitizer_syscall_pre_impl___aio_suspend50(long long list,
+ long long nent,
+ long long timeout);
+void __sanitizer_syscall_post_impl___aio_suspend50(long long res,
+ long long list,
+ long long nent,
+ long long timeout);
+void __sanitizer_syscall_pre_impl___stat50(long long path, long long ub);
+void __sanitizer_syscall_post_impl___stat50(long long res, long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl___fstat50(long long fd, long long sb);
+void __sanitizer_syscall_post_impl___fstat50(long long res, long long fd,
+ long long sb);
+void __sanitizer_syscall_pre_impl___lstat50(long long path, long long ub);
+void __sanitizer_syscall_post_impl___lstat50(long long res, long long path,
+ long long ub);
+void __sanitizer_syscall_pre_impl_____semctl50(long long semid,
+ long long semnum, long long cmd,
+ long long arg);
+void __sanitizer_syscall_post_impl_____semctl50(long long res, long long semid,
+ long long semnum, long long cmd,
+ long long arg);
+void __sanitizer_syscall_pre_impl___shmctl50(long long shmid, long long cmd,
+ long long buf);
+void __sanitizer_syscall_post_impl___shmctl50(long long res, long long shmid,
+ long long cmd, long long buf);
+void __sanitizer_syscall_pre_impl___msgctl50(long long msqid, long long cmd,
+ long long buf);
+void __sanitizer_syscall_post_impl___msgctl50(long long res, long long msqid,
+ long long cmd, long long buf);
+void __sanitizer_syscall_pre_impl___getrusage50(long long who,
+ long long rusage);
+void __sanitizer_syscall_post_impl___getrusage50(long long res, long long who,
+ long long rusage);
+void __sanitizer_syscall_pre_impl___timer_settime50(long long timerid,
+ long long flags,
+ long long value,
+ long long ovalue);
+void __sanitizer_syscall_post_impl___timer_settime50(long long res,
+ long long timerid,
+ long long flags,
+ long long value,
+ long long ovalue);
+void __sanitizer_syscall_pre_impl___timer_gettime50(long long timerid,
+ long long value);
+void __sanitizer_syscall_post_impl___timer_gettime50(long long res,
+ long long timerid,
+ long long value);
+#if defined(NTP) || !defined(_KERNEL_OPT)
+void __sanitizer_syscall_pre_impl___ntp_gettime50(long long ntvp);
+void __sanitizer_syscall_post_impl___ntp_gettime50(long long res,
+ long long ntvp);
+#else
+/* syscall 448 has been skipped */
+#endif
+void __sanitizer_syscall_pre_impl___wait450(long long pid, long long status,
+ long long options,
+ long long rusage);
+void __sanitizer_syscall_post_impl___wait450(long long res, long long pid,
+ long long status,
+ long long options,
+ long long rusage);
+void __sanitizer_syscall_pre_impl___mknod50(long long path, long long mode,
+ long long dev);
+void __sanitizer_syscall_post_impl___mknod50(long long res, long long path,
+ long long mode, long long dev);
+void __sanitizer_syscall_pre_impl___fhstat50(long long fhp, long long fh_size,
+ long long sb);
+void __sanitizer_syscall_post_impl___fhstat50(long long res, long long fhp,
+ long long fh_size, long long sb);
+/* syscall 452 has been skipped */
+void __sanitizer_syscall_pre_impl_pipe2(long long fildes, long long flags);
+void __sanitizer_syscall_post_impl_pipe2(long long res, long long fildes,
+ long long flags);
+void __sanitizer_syscall_pre_impl_dup3(long long from, long long to,
+ long long flags);
+void __sanitizer_syscall_post_impl_dup3(long long res, long long from,
+ long long to, long long flags);
+void __sanitizer_syscall_pre_impl_kqueue1(long long flags);
+void __sanitizer_syscall_post_impl_kqueue1(long long res, long long flags);
+void __sanitizer_syscall_pre_impl_paccept(long long s, long long name,
+ long long anamelen, long long mask,
+ long long flags);
+void __sanitizer_syscall_post_impl_paccept(long long res, long long s,
+ long long name, long long anamelen,
+ long long mask, long long flags);
+void __sanitizer_syscall_pre_impl_linkat(long long fd1, long long name1,
+ long long fd2, long long name2,
+ long long flags);
+void __sanitizer_syscall_post_impl_linkat(long long res, long long fd1,
+ long long name1, long long fd2,
+ long long name2, long long flags);
+void __sanitizer_syscall_pre_impl_renameat(long long fromfd, long long from,
+ long long tofd, long long to);
+void __sanitizer_syscall_post_impl_renameat(long long res, long long fromfd,
+ long long from, long long tofd,
+ long long to);
+void __sanitizer_syscall_pre_impl_mkfifoat(long long fd, long long path,
+ long long mode);
+void __sanitizer_syscall_post_impl_mkfifoat(long long res, long long fd,
+ long long path, long long mode);
+void __sanitizer_syscall_pre_impl_mknodat(long long fd, long long path,
+ long long mode, long long PAD,
+ long long dev);
+void __sanitizer_syscall_post_impl_mknodat(long long res, long long fd,
+ long long path, long long mode,
+ long long PAD, long long dev);
+void __sanitizer_syscall_pre_impl_mkdirat(long long fd, long long path,
+ long long mode);
+void __sanitizer_syscall_post_impl_mkdirat(long long res, long long fd,
+ long long path, long long mode);
+void __sanitizer_syscall_pre_impl_faccessat(long long fd, long long path,
+ long long amode, long long flag);
+void __sanitizer_syscall_post_impl_faccessat(long long res, long long fd,
+ long long path, long long amode,
+ long long flag);
+void __sanitizer_syscall_pre_impl_fchmodat(long long fd, long long path,
+ long long mode, long long flag);
+void __sanitizer_syscall_post_impl_fchmodat(long long res, long long fd,
+ long long path, long long mode,
+ long long flag);
+void __sanitizer_syscall_pre_impl_fchownat(long long fd, long long path,
+ long long owner, long long group,
+ long long flag);
+void __sanitizer_syscall_post_impl_fchownat(long long res, long long fd,
+ long long path, long long owner,
+ long long group, long long flag);
+void __sanitizer_syscall_pre_impl_fexecve(long long fd, long long argp,
+ long long envp);
+void __sanitizer_syscall_post_impl_fexecve(long long res, long long fd,
+ long long argp, long long envp);
+void __sanitizer_syscall_pre_impl_fstatat(long long fd, long long path,
+ long long buf, long long flag);
+void __sanitizer_syscall_post_impl_fstatat(long long res, long long fd,
+ long long path, long long buf,
+ long long flag);
+void __sanitizer_syscall_pre_impl_utimensat(long long fd, long long path,
+ long long tptr, long long flag);
+void __sanitizer_syscall_post_impl_utimensat(long long res, long long fd,
+ long long path, long long tptr,
+ long long flag);
+void __sanitizer_syscall_pre_impl_openat(long long fd, long long path,
+ long long oflags, long long mode);
+void __sanitizer_syscall_post_impl_openat(long long res, long long fd,
+ long long path, long long oflags,
+ long long mode);
+void __sanitizer_syscall_pre_impl_readlinkat(long long fd, long long path,
+ long long buf, long long bufsize);
+void __sanitizer_syscall_post_impl_readlinkat(long long res, long long fd,
+ long long path, long long buf,
+ long long bufsize);
+void __sanitizer_syscall_pre_impl_symlinkat(long long path1, long long fd,
+ long long path2);
+void __sanitizer_syscall_post_impl_symlinkat(long long res, long long path1,
+ long long fd, long long path2);
+void __sanitizer_syscall_pre_impl_unlinkat(long long fd, long long path,
+ long long flag);
+void __sanitizer_syscall_post_impl_unlinkat(long long res, long long fd,
+ long long path, long long flag);
+void __sanitizer_syscall_pre_impl_futimens(long long fd, long long tptr);
+void __sanitizer_syscall_post_impl_futimens(long long res, long long fd,
+ long long tptr);
+void __sanitizer_syscall_pre_impl___quotactl(long long path, long long args);
+void __sanitizer_syscall_post_impl___quotactl(long long res, long long path,
+ long long args);
+void __sanitizer_syscall_pre_impl_posix_spawn(long long pid, long long path,
+ long long file_actions,
+ long long attrp, long long argv,
+ long long envp);
+void __sanitizer_syscall_post_impl_posix_spawn(long long res, long long pid,
+ long long path,
+ long long file_actions,
+ long long attrp, long long argv,
+ long long envp);
+void __sanitizer_syscall_pre_impl_recvmmsg(long long s, long long mmsg,
+ long long vlen, long long flags,
+ long long timeout);
+void __sanitizer_syscall_post_impl_recvmmsg(long long res, long long s,
+ long long mmsg, long long vlen,
+ long long flags, long long timeout);
+void __sanitizer_syscall_pre_impl_sendmmsg(long long s, long long mmsg,
+ long long vlen, long long flags);
+void __sanitizer_syscall_post_impl_sendmmsg(long long res, long long s,
+ long long mmsg, long long vlen,
+ long long flags);
+void __sanitizer_syscall_pre_impl_clock_nanosleep(long long clock_id,
+ long long flags,
+ long long rqtp,
+ long long rmtp);
+void __sanitizer_syscall_post_impl_clock_nanosleep(long long res,
+ long long clock_id,
+ long long flags,
+ long long rqtp,
+ long long rmtp);
+void __sanitizer_syscall_pre_impl____lwp_park60(long long clock_id,
+ long long flags, long long ts,
+ long long unpark,
+ long long hint,
+ long long unparkhint);
+void __sanitizer_syscall_post_impl____lwp_park60(
+ long long res, long long clock_id, long long flags, long long ts,
+ long long unpark, long long hint, long long unparkhint);
+void __sanitizer_syscall_pre_impl_posix_fallocate(long long fd, long long PAD,
+ long long pos, long long len);
+void __sanitizer_syscall_post_impl_posix_fallocate(long long res, long long fd,
+ long long PAD, long long pos,
+ long long len);
+void __sanitizer_syscall_pre_impl_fdiscard(long long fd, long long PAD,
+ long long pos, long long len);
+void __sanitizer_syscall_post_impl_fdiscard(long long res, long long fd,
+ long long PAD, long long pos,
+ long long len);
+void __sanitizer_syscall_pre_impl_wait6(long long idtype, long long id,
+ long long status, long long options,
+ long long wru, long long info);
+void __sanitizer_syscall_post_impl_wait6(long long res, long long idtype,
+ long long id, long long status,
+ long long options, long long wru,
+ long long info);
+void __sanitizer_syscall_pre_impl_clock_getcpuclockid2(long long idtype,
+ long long id,
+ long long clock_id);
+void __sanitizer_syscall_post_impl_clock_getcpuclockid2(long long res,
+ long long idtype,
+ long long id,
+ long long clock_id);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+// DO NOT EDIT! THIS FILE HAS BEEN GENERATED!
+
+#endif // SANITIZER_NETBSD_SYSCALL_HOOKS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/netbsd_syscall_hooks.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/scudo_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/scudo_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/scudo_interface.h (revision 351984)
@@ -0,0 +1,38 @@
+//===-- sanitizer/scudo_interface.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+/// Public Scudo interface header.
+//
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_SCUDO_INTERFACE_H_
+#define SANITIZER_SCUDO_INTERFACE_H_
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+ // This function may be optionally provided by a user and should return
+ // a string containing Scudo runtime options. See scudo_flags.h for details.
+ const char* __scudo_default_options(void);
+
+ // This function allows to set the RSS limit at runtime. This can be either
+ // the hard limit (HardLimit=1) or the soft limit (HardLimit=0). The limit
+ // can be removed by setting LimitMb to 0. This function's parameters should
+ // be fully trusted to avoid security mishaps.
+ void __scudo_set_rss_limit(size_t LimitMb, int HardLimit);
+
+ // This function outputs various allocator statistics for both the Primary
+ // and Secondary allocators, including memory usage, number of allocations
+ // and deallocations.
+ void __scudo_print_stats(void);
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_SCUDO_INTERFACE_H_
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/scudo_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/tsan_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/tsan_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/tsan_interface.h (revision 351984)
@@ -0,0 +1,161 @@
+//===-- tsan_interface.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Public interface header for TSan.
+//===----------------------------------------------------------------------===//
+#ifndef SANITIZER_TSAN_INTERFACE_H
+#define SANITIZER_TSAN_INTERFACE_H
+
+#include <sanitizer/common_interface_defs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// __tsan_release establishes a happens-before relation with a preceding
+// __tsan_acquire on the same address.
+void __tsan_acquire(void *addr);
+void __tsan_release(void *addr);
+
+// Annotations for custom mutexes.
+// The annotations allow to get better reports (with sets of locked mutexes),
+// detect more types of bugs (e.g. mutex misuses, races between lock/unlock and
+// destruction and potential deadlocks) and improve precision and performance
+// (by ignoring individual atomic operations in mutex code). However, the
+// downside is that annotated mutex code itself is not checked for correctness.
+
+// Mutex creation flags are passed to __tsan_mutex_create annotation.
+// If mutex has no constructor and __tsan_mutex_create is not called,
+// the flags may be passed to __tsan_mutex_pre_lock/__tsan_mutex_post_lock
+// annotations.
+
+// Mutex has static storage duration and no-op constructor and destructor.
+// This effectively makes tsan ignore destroy annotation.
+const unsigned __tsan_mutex_linker_init = 1 << 0;
+// Mutex is write reentrant.
+const unsigned __tsan_mutex_write_reentrant = 1 << 1;
+// Mutex is read reentrant.
+const unsigned __tsan_mutex_read_reentrant = 1 << 2;
+// Mutex does not have static storage duration, and must not be used after
+// its destructor runs. The opposite of __tsan_mutex_linker_init.
+// If this flag is passed to __tsan_mutex_destroy, then the destruction
+// is ignored unless this flag was previously set on the mutex.
+const unsigned __tsan_mutex_not_static = 1 << 8;
+
+// Mutex operation flags:
+
+// Denotes read lock operation.
+const unsigned __tsan_mutex_read_lock = 1 << 3;
+// Denotes try lock operation.
+const unsigned __tsan_mutex_try_lock = 1 << 4;
+// Denotes that a try lock operation has failed to acquire the mutex.
+const unsigned __tsan_mutex_try_lock_failed = 1 << 5;
+// Denotes that the lock operation acquires multiple recursion levels.
+// Number of levels is passed in recursion parameter.
+// This is useful for annotation of e.g. Java builtin monitors,
+// for which wait operation releases all recursive acquisitions of the mutex.
+const unsigned __tsan_mutex_recursive_lock = 1 << 6;
+// Denotes that the unlock operation releases all recursion levels.
+// Number of released levels is returned and later must be passed to
+// the corresponding __tsan_mutex_post_lock annotation.
+const unsigned __tsan_mutex_recursive_unlock = 1 << 7;
+
+// Annotate creation of a mutex.
+// Supported flags: mutex creation flags.
+void __tsan_mutex_create(void *addr, unsigned flags);
+
+// Annotate destruction of a mutex.
+// Supported flags:
+// - __tsan_mutex_linker_init
+// - __tsan_mutex_not_static
+void __tsan_mutex_destroy(void *addr, unsigned flags);
+
+// Annotate start of lock operation.
+// Supported flags:
+// - __tsan_mutex_read_lock
+// - __tsan_mutex_try_lock
+// - all mutex creation flags
+void __tsan_mutex_pre_lock(void *addr, unsigned flags);
+
+// Annotate end of lock operation.
+// Supported flags:
+// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_lock)
+// - __tsan_mutex_try_lock (must match __tsan_mutex_pre_lock)
+// - __tsan_mutex_try_lock_failed
+// - __tsan_mutex_recursive_lock
+// - all mutex creation flags
+void __tsan_mutex_post_lock(void *addr, unsigned flags, int recursion);
+
+// Annotate start of unlock operation.
+// Supported flags:
+// - __tsan_mutex_read_lock
+// - __tsan_mutex_recursive_unlock
+int __tsan_mutex_pre_unlock(void *addr, unsigned flags);
+
+// Annotate end of unlock operation.
+// Supported flags:
+// - __tsan_mutex_read_lock (must match __tsan_mutex_pre_unlock)
+void __tsan_mutex_post_unlock(void *addr, unsigned flags);
+
+// Annotate start/end of notify/signal/broadcast operation.
+// Supported flags: none.
+void __tsan_mutex_pre_signal(void *addr, unsigned flags);
+void __tsan_mutex_post_signal(void *addr, unsigned flags);
+
+// Annotate start/end of a region of code where lock/unlock/signal operation
+// diverts to do something else unrelated to the mutex. This can be used to
+// annotate, for example, calls into cooperative scheduler or contention
+// profiling code.
+// These annotations must be called only from within
+// __tsan_mutex_pre/post_lock, __tsan_mutex_pre/post_unlock,
+// __tsan_mutex_pre/post_signal regions.
+// Supported flags: none.
+void __tsan_mutex_pre_divert(void *addr, unsigned flags);
+void __tsan_mutex_post_divert(void *addr, unsigned flags);
+
+// External race detection API.
+// Can be used by non-instrumented libraries to detect when their objects are
+// being used in an unsafe manner.
+// - __tsan_external_read/__tsan_external_write annotates the logical reads
+// and writes of the object at the specified address. 'caller_pc' should
+// be the PC of the library user, which the library can obtain with e.g.
+// `__builtin_return_address(0)`.
+// - __tsan_external_register_tag registers a 'tag' with the specified name,
+// which is later used in read/write annotations to denote the object type
+// - __tsan_external_assign_tag can optionally mark a heap object with a tag
+void *__tsan_external_register_tag(const char *object_type);
+void __tsan_external_register_header(void *tag, const char *header);
+void __tsan_external_assign_tag(void *addr, void *tag);
+void __tsan_external_read(void *addr, void *caller_pc, void *tag);
+void __tsan_external_write(void *addr, void *caller_pc, void *tag);
+
+// Fiber switching API.
+// - TSAN context for fiber can be created by __tsan_create_fiber
+// and freed by __tsan_destroy_fiber.
+// - TSAN context of current fiber or thread can be obtained
+// by calling __tsan_get_current_fiber.
+// - __tsan_switch_to_fiber should be called immediatly before switch
+// to fiber, such as call of swapcontext.
+// - Fiber name can be set by __tsan_set_fiber_name.
+void *__tsan_get_current_fiber(void);
+void *__tsan_create_fiber(unsigned flags);
+void __tsan_destroy_fiber(void *fiber);
+void __tsan_switch_to_fiber(void *fiber, unsigned flags);
+void __tsan_set_fiber_name(void *fiber, const char *name);
+
+// Flags for __tsan_switch_to_fiber:
+// Do not establish a happens-before relation between fibers
+const unsigned __tsan_switch_to_fiber_no_sync = 1 << 0;
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // SANITIZER_TSAN_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/tsan_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/tsan_interface_atomic.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/tsan_interface_atomic.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/tsan_interface_atomic.h (revision 351984)
@@ -0,0 +1,221 @@
+//===-- tsan_interface_atomic.h ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of ThreadSanitizer (TSan), a race detector.
+//
+// Public interface header for TSan atomics.
+//===----------------------------------------------------------------------===//
+#ifndef TSAN_INTERFACE_ATOMIC_H
+#define TSAN_INTERFACE_ATOMIC_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef char __tsan_atomic8;
+typedef short __tsan_atomic16; // NOLINT
+typedef int __tsan_atomic32;
+typedef long __tsan_atomic64; // NOLINT
+#if defined(__SIZEOF_INT128__) \
+ || (__clang_major__ * 100 + __clang_minor__ >= 302)
+__extension__ typedef __int128 __tsan_atomic128;
+# define __TSAN_HAS_INT128 1
+#else
+# define __TSAN_HAS_INT128 0
+#endif
+
+// Part of ABI, do not change.
+// https://github.com/llvm/llvm-project/blob/master/libcxx/include/atomic
+typedef enum {
+ __tsan_memory_order_relaxed,
+ __tsan_memory_order_consume,
+ __tsan_memory_order_acquire,
+ __tsan_memory_order_release,
+ __tsan_memory_order_acq_rel,
+ __tsan_memory_order_seq_cst
+} __tsan_memory_order;
+
+__tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
+ __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
+ __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
+ __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
+ __tsan_memory_order mo);
+#endif
+
+void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
+ __tsan_memory_order mo);
+void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
+ __tsan_memory_order mo);
+void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
+ __tsan_memory_order mo);
+void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
+ __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
+ __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_sub(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_sub(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_sub(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_sub(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_sub(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 v, __tsan_memory_order mo);
+__tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 v, __tsan_memory_order mo);
+__tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 v, __tsan_memory_order mo);
+__tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 v, __tsan_memory_order mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 v, __tsan_memory_order mo);
+#endif
+
+int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
+int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+#endif
+
+int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
+ __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
+ __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
+ __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
+ __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
+int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
+ __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
+ __tsan_memory_order fail_mo);
+#endif
+
+__tsan_atomic8 __tsan_atomic8_compare_exchange_val(
+ volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic16 __tsan_atomic16_compare_exchange_val(
+ volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic32 __tsan_atomic32_compare_exchange_val(
+ volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+__tsan_atomic64 __tsan_atomic64_compare_exchange_val(
+ volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#if __TSAN_HAS_INT128
+__tsan_atomic128 __tsan_atomic128_compare_exchange_val(
+ volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
+ __tsan_memory_order mo, __tsan_memory_order fail_mo);
+#endif
+
+void __tsan_atomic_thread_fence(__tsan_memory_order mo);
+void __tsan_atomic_signal_fence(__tsan_memory_order mo);
+
+#ifdef __cplusplus
+} // extern "C"
+#endif
+
+#endif // TSAN_INTERFACE_ATOMIC_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/sanitizer/tsan_interface_atomic.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_interface.h (revision 351984)
@@ -0,0 +1,130 @@
+//===- xray_interface.h -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// APIs for controlling XRay functionality explicitly.
+//===----------------------------------------------------------------------===//
+
+#ifndef XRAY_XRAY_INTERFACE_H
+#define XRAY_XRAY_INTERFACE_H
+
+#include <cstddef>
+#include <cstdint>
+
+extern "C" {
+
+/// Synchronize this with AsmPrinter::SledKind in LLVM.
+enum XRayEntryType {
+ ENTRY = 0,
+ EXIT = 1,
+ TAIL = 2,
+ LOG_ARGS_ENTRY = 3,
+ CUSTOM_EVENT = 4,
+ TYPED_EVENT = 5,
+};
+
+/// Provide a function to invoke for when instrumentation points are hit. This
+/// is a user-visible control surface that overrides the default implementation.
+/// The function provided should take the following arguments:
+///
+/// - function id: an identifier that indicates the id of a function; this id
+/// is generated by xray; the mapping between the function id
+/// and the actual function pointer is available through
+/// __xray_table.
+/// - entry type: identifies what kind of instrumentation point was
+/// encountered (function entry, function exit, etc.). See the
+/// enum XRayEntryType for more details.
+///
+/// The user handler must handle correctly spurious calls after this handler is
+/// removed or replaced with another handler, because it would be too costly for
+/// XRay runtime to avoid spurious calls.
+/// To prevent circular calling, the handler function itself and all its
+/// direct&indirect callees must not be instrumented with XRay, which can be
+/// achieved by marking them all with: __attribute__((xray_never_instrument))
+///
+/// Returns 1 on success, 0 on error.
+extern int __xray_set_handler(void (*entry)(int32_t, XRayEntryType));
+
+/// This removes whatever the currently provided handler is. Returns 1 on
+/// success, 0 on error.
+extern int __xray_remove_handler();
+
+/// Use XRay to log the first argument of each (instrumented) function call.
+/// When this function exits, all threads will have observed the effect and
+/// start logging their subsequent affected function calls (if patched).
+///
+/// Returns 1 on success, 0 on error.
+extern int __xray_set_handler_arg1(void (*entry)(int32_t, XRayEntryType,
+ uint64_t));
+
+/// Disables the XRay handler used to log first arguments of function calls.
+/// Returns 1 on success, 0 on error.
+extern int __xray_remove_handler_arg1();
+
+/// Provide a function to invoke when XRay encounters a custom event.
+extern int __xray_set_customevent_handler(void (*entry)(void *, std::size_t));
+
+/// This removes whatever the currently provided custom event handler is.
+/// Returns 1 on success, 0 on error.
+extern int __xray_remove_customevent_handler();
+
+/// Set a handler for xray typed event logging. The first parameter is a type
+/// identifier, the second is a payload, and the third is the payload size.
+extern int __xray_set_typedevent_handler(void (*entry)(uint16_t, const void *,
+ std::size_t));
+
+/// Removes the currently set typed event handler.
+/// Returns 1 on success, 0 on error.
+extern int __xray_remove_typedevent_handler();
+
+extern uint16_t __xray_register_event_type(const char *event_type);
+
+enum XRayPatchingStatus {
+ NOT_INITIALIZED = 0,
+ SUCCESS = 1,
+ ONGOING = 2,
+ FAILED = 3,
+};
+
+/// This tells XRay to patch the instrumentation points. See XRayPatchingStatus
+/// for possible result values.
+extern XRayPatchingStatus __xray_patch();
+
+/// Reverses the effect of __xray_patch(). See XRayPatchingStatus for possible
+/// result values.
+extern XRayPatchingStatus __xray_unpatch();
+
+/// This patches a specific function id. See XRayPatchingStatus for possible
+/// result values.
+extern XRayPatchingStatus __xray_patch_function(int32_t FuncId);
+
+/// This unpatches a specific function id. See XRayPatchingStatus for possible
+/// result values.
+extern XRayPatchingStatus __xray_unpatch_function(int32_t FuncId);
+
+/// This function returns the address of the function provided a valid function
+/// id. We return 0 if we encounter any error, even if 0 may be a valid function
+/// address.
+extern uintptr_t __xray_function_address(int32_t FuncId);
+
+/// This function returns the maximum valid function id. Returns 0 if we
+/// encounter errors (when there are no instrumented functions, etc.).
+extern size_t __xray_max_function_id();
+
+/// Initialize the required XRay data structures. This is useful in cases where
+/// users want to control precisely when the XRay instrumentation data
+/// structures are initialized, for example when the XRay library is built with
+/// the XRAY_NO_PREINIT preprocessor definition.
+///
+/// Calling __xray_init() more than once is safe across multiple threads.
+extern void __xray_init();
+
+} // end extern "C"
+
+#endif // XRAY_XRAY_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_log_interface.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_log_interface.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_log_interface.h (revision 351984)
@@ -0,0 +1,357 @@
+//===-- xray_log_interface.h ----------------------------------------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a function call tracing system.
+//
+// APIs for installing a new logging implementation.
+//
+//===----------------------------------------------------------------------===//
+///
+/// XRay allows users to implement their own logging handlers and install them
+/// to replace the default runtime-controllable implementation that comes with
+/// compiler-rt/xray. The "flight data recorder" (FDR) mode implementation uses
+/// this API to install itself in an XRay-enabled binary. See
+/// compiler-rt/lib/xray_fdr_logging.{h,cc} for details of that implementation.
+///
+/// The high-level usage pattern for these APIs look like the following:
+///
+/// // We choose the mode which we'd like to install, and check whether this
+/// // has succeeded. Each mode will have their own set of flags they will
+/// // support, outside of the global XRay configuration options that are
+/// // defined in the XRAY_OPTIONS environment variable.
+/// auto select_status = __xray_log_select_mode("xray-fdr");
+/// if (select_status != XRayLogRegisterStatus::XRAY_REGISTRATION_OK) {
+/// // This failed, we should not proceed with attempting to initialise
+/// // the currently selected mode.
+/// return;
+/// }
+///
+/// // Once that's done, we can now attempt to configure the implementation.
+/// // To do this, we provide the string flags configuration for the mode.
+/// auto config_status = __xray_log_init_mode(
+/// "xray-fdr", "verbosity=1 some_flag=1 another_flag=2");
+/// if (config_status != XRayLogInitStatus::XRAY_LOG_INITIALIZED) {
+/// // deal with the error here, if there is one.
+/// }
+///
+/// // When the log implementation has had the chance to initialize, we can
+/// // now patch the instrumentation points. Note that we could have patched
+/// // the instrumentation points first, but there's no strict ordering to
+/// // these operations.
+/// auto patch_status = __xray_patch();
+/// if (patch_status != XRayPatchingStatus::SUCCESS) {
+/// // deal with the error here, if it is an error.
+/// }
+///
+/// // If we want to stop the implementation, we can then finalize it (before
+/// // optionally flushing the log).
+/// auto fin_status = __xray_log_finalize();
+/// if (fin_status != XRayLogInitStatus::XRAY_LOG_FINALIZED) {
+/// // deal with the error here, if it is an error.
+/// }
+///
+/// // We can optionally wait before flushing the log to give other threads a
+/// // chance to see that the implementation is already finalized. Also, at
+/// // this point we can optionally unpatch the instrumentation points to
+/// // reduce overheads at runtime.
+/// auto unpatch_status = __xray_unpatch();
+/// if (unpatch_status != XRayPatchingStatus::SUCCESS) {
+/// // deal with the error here, if it is an error.
+/// }
+///
+/// // If there are logs or data to be flushed somewhere, we can do so only
+/// // after we've finalized the log. Some implementations may not actually
+/// // have anything to log (it might keep the data in memory, or periodically
+/// // be logging the data anyway).
+/// auto flush_status = __xray_log_flushLog();
+/// if (flush_status != XRayLogFlushStatus::XRAY_LOG_FLUSHED) {
+/// // deal with the error here, if it is an error.
+/// }
+///
+/// // Alternatively, we can go through the buffers ourselves without
+/// // relying on the implementations' flushing semantics (if the
+/// // implementation supports exporting this data directly).
+/// auto MyBufferProcessor = +[](const char* mode, XRayBuffer buffer) {
+/// // Check the "mode" to see if it's something we know how to handle...
+/// // and/or do something with an XRayBuffer instance.
+/// };
+/// auto process_status = __xray_log_process_buffers(MyBufferProcessor);
+/// if (process_status != XRayLogFlushStatus::XRAY_LOG_FLUSHED) {
+/// // deal with the error here, if it is an error.
+/// }
+///
+/// NOTE: Before calling __xray_patch() again, consider re-initializing the
+/// implementation first. Some implementations might stay in an "off" state when
+/// they are finalized, while some might be in an invalid/unknown state.
+///
+#ifndef XRAY_XRAY_LOG_INTERFACE_H
+#define XRAY_XRAY_LOG_INTERFACE_H
+
+#include "xray/xray_interface.h"
+#include <stddef.h>
+
+extern "C" {
+
+/// This enum defines the valid states in which the logging implementation can
+/// be at.
+enum XRayLogInitStatus {
+ /// The default state is uninitialized, and in case there were errors in the
+ /// initialization, the implementation MUST return XRAY_LOG_UNINITIALIZED.
+ XRAY_LOG_UNINITIALIZED = 0,
+
+ /// Some implementations support multi-stage init (or asynchronous init), and
+ /// may return XRAY_LOG_INITIALIZING to signal callers of the API that
+ /// there's an ongoing initialization routine running. This allows
+ /// implementations to support concurrent threads attempting to initialize,
+ /// while only signalling success in one.
+ XRAY_LOG_INITIALIZING = 1,
+
+ /// When an implementation is done initializing, it MUST return
+ /// XRAY_LOG_INITIALIZED. When users call `__xray_patch()`, they are
+ /// guaranteed that the implementation installed with
+ /// `__xray_set_log_impl(...)` has been initialized.
+ XRAY_LOG_INITIALIZED = 2,
+
+ /// Some implementations might support multi-stage finalization (or
+ /// asynchronous finalization), and may return XRAY_LOG_FINALIZING to signal
+ /// callers of the API that there's an ongoing finalization routine running.
+ /// This allows implementations to support concurrent threads attempting to
+ /// finalize, while only signalling success/completion in one.
+ XRAY_LOG_FINALIZING = 3,
+
+ /// When an implementation is done finalizing, it MUST return
+ /// XRAY_LOG_FINALIZED. It is up to the implementation to determine what the
+ /// semantics of a finalized implementation is. Some implementations might
+ /// allow re-initialization once the log is finalized, while some might always
+ /// be on (and that finalization is a no-op).
+ XRAY_LOG_FINALIZED = 4,
+};
+
+/// This enum allows an implementation to signal log flushing operations via
+/// `__xray_log_flushLog()`, and the state of flushing the log.
+enum XRayLogFlushStatus {
+ XRAY_LOG_NOT_FLUSHING = 0,
+ XRAY_LOG_FLUSHING = 1,
+ XRAY_LOG_FLUSHED = 2,
+};
+
+/// This enum indicates the installation state of a logging implementation, when
+/// associating a mode to a particular logging implementation through
+/// `__xray_log_register_impl(...)` or through `__xray_log_select_mode(...`.
+enum XRayLogRegisterStatus {
+ XRAY_REGISTRATION_OK = 0,
+ XRAY_DUPLICATE_MODE = 1,
+ XRAY_MODE_NOT_FOUND = 2,
+ XRAY_INCOMPLETE_IMPL = 3,
+};
+
+/// A valid XRay logging implementation MUST provide all of the function
+/// pointers in XRayLogImpl when being installed through `__xray_set_log_impl`.
+/// To be precise, ALL the functions pointers MUST NOT be nullptr.
+struct XRayLogImpl {
+ /// The log initialization routine provided by the implementation, always
+ /// provided with the following parameters:
+ ///
+ /// - buffer size (unused)
+ /// - maximum number of buffers (unused)
+ /// - a pointer to an argument struct that the implementation MUST handle
+ /// - the size of the argument struct
+ ///
+ /// See XRayLogInitStatus for details on what the implementation MUST return
+ /// when called.
+ ///
+ /// If the implementation needs to install handlers aside from the 0-argument
+ /// function call handler, it MUST do so in this initialization handler.
+ ///
+ /// See xray_interface.h for available handler installation routines.
+ XRayLogInitStatus (*log_init)(size_t, size_t, void *, size_t);
+
+ /// The log finalization routine provided by the implementation.
+ ///
+ /// See XRayLogInitStatus for details on what the implementation MUST return
+ /// when called.
+ XRayLogInitStatus (*log_finalize)();
+
+ /// The 0-argument function call handler. XRay logging implementations MUST
+ /// always have a handler for function entry and exit events. In case the
+ /// implementation wants to support arg1 (or other future extensions to XRay
+ /// logging) those MUST be installed by the installed 'log_init' handler.
+ ///
+ /// Because we didn't want to change the ABI of this struct, the arg1 handler
+ /// may be silently overwritten during initialization as well.
+ void (*handle_arg0)(int32_t, XRayEntryType);
+
+ /// The log implementation provided routine for when __xray_log_flushLog() is
+ /// called.
+ ///
+ /// See XRayLogFlushStatus for details on what the implementation MUST return
+ /// when called.
+ XRayLogFlushStatus (*flush_log)();
+};
+
+/// DEPRECATED: Use the mode registration workflow instead with
+/// __xray_log_register_mode(...) and __xray_log_select_mode(...). See the
+/// documentation for those function.
+///
+/// This function installs a new logging implementation that XRay will use. In
+/// case there are any nullptr members in Impl, XRay will *uninstall any
+/// existing implementations*. It does NOT patch the instrumentation points.
+///
+/// NOTE: This function does NOT attempt to finalize the currently installed
+/// implementation. Use with caution.
+///
+/// It is guaranteed safe to call this function in the following states:
+///
+/// - When the implementation is UNINITIALIZED.
+/// - When the implementation is FINALIZED.
+/// - When there is no current implementation installed.
+///
+/// It is logging implementation defined what happens when this function is
+/// called while in any other states.
+void __xray_set_log_impl(XRayLogImpl Impl);
+
+/// This function registers a logging implementation against a "mode"
+/// identifier. This allows multiple modes to be registered, and chosen at
+/// runtime using the same mode identifier through
+/// `__xray_log_select_mode(...)`.
+///
+/// We treat the Mode identifier as a null-terminated byte string, as the
+/// identifier used when retrieving the log impl.
+///
+/// Returns:
+/// - XRAY_REGISTRATION_OK on success.
+/// - XRAY_DUPLICATE_MODE when an implementation is already associated with
+/// the provided Mode; does not update the already-registered
+/// implementation.
+XRayLogRegisterStatus __xray_log_register_mode(const char *Mode,
+ XRayLogImpl Impl);
+
+/// This function selects the implementation associated with Mode that has been
+/// registered through __xray_log_register_mode(...) and installs that
+/// implementation (as if through calling __xray_set_log_impl(...)). The same
+/// caveats apply to __xray_log_select_mode(...) as with
+/// __xray_log_set_log_impl(...).
+///
+/// Returns:
+/// - XRAY_REGISTRATION_OK on success.
+/// - XRAY_MODE_NOT_FOUND if there is no implementation associated with Mode;
+/// does not update the currently installed implementation.
+XRayLogRegisterStatus __xray_log_select_mode(const char *Mode);
+
+/// Returns an identifier for the currently selected XRay mode chosen through
+/// the __xray_log_select_mode(...) function call. Returns nullptr if there is
+/// no currently installed mode.
+const char *__xray_log_get_current_mode();
+
+/// This function removes the currently installed implementation. It will also
+/// uninstall any handlers that have been previously installed. It does NOT
+/// unpatch the instrumentation points.
+///
+/// NOTE: This function does NOT attempt to finalize the currently installed
+/// implementation. Use with caution.
+///
+/// It is guaranteed safe to call this function in the following states:
+///
+/// - When the implementation is UNINITIALIZED.
+/// - When the implementation is FINALIZED.
+/// - When there is no current implementation installed.
+///
+/// It is logging implementation defined what happens when this function is
+/// called while in any other states.
+void __xray_remove_log_impl();
+
+/// DEPRECATED: Use __xray_log_init_mode() instead, and provide all the options
+/// in string form.
+/// Invokes the installed implementation initialization routine. See
+/// XRayLogInitStatus for what the return values mean.
+XRayLogInitStatus __xray_log_init(size_t BufferSize, size_t MaxBuffers,
+ void *Args, size_t ArgsSize);
+
+/// Invokes the installed initialization routine, which *must* support the
+/// string based form.
+///
+/// NOTE: When this API is used, we still invoke the installed initialization
+/// routine, but we will call it with the following convention to signal that we
+/// are using the string form:
+///
+/// - BufferSize = 0
+/// - MaxBuffers = 0
+/// - ArgsSize = 0
+/// - Args will be the pointer to the character buffer representing the
+/// configuration.
+///
+/// FIXME: Updating the XRayLogImpl struct is an ABI breaking change. When we
+/// are ready to make a breaking change, we should clean this up appropriately.
+XRayLogInitStatus __xray_log_init_mode(const char *Mode, const char *Config);
+
+/// Like __xray_log_init_mode(...) this version allows for providing
+/// configurations that might have non-null-terminated strings. This will
+/// operate similarly to __xray_log_init_mode, with the exception that
+/// |ArgsSize| will be what |ConfigSize| is.
+XRayLogInitStatus __xray_log_init_mode_bin(const char *Mode, const char *Config,
+ size_t ConfigSize);
+
+/// Invokes the installed implementation finalization routine. See
+/// XRayLogInitStatus for what the return values mean.
+XRayLogInitStatus __xray_log_finalize();
+
+/// Invokes the install implementation log flushing routine. See
+/// XRayLogFlushStatus for what the return values mean.
+XRayLogFlushStatus __xray_log_flushLog();
+
+/// An XRayBuffer represents a section of memory which can be treated by log
+/// processing functions as bytes stored in the logging implementation's
+/// buffers.
+struct XRayBuffer {
+ const void *Data;
+ size_t Size;
+};
+
+/// Registers an iterator function which takes an XRayBuffer argument, then
+/// returns another XRayBuffer function representing the next buffer. When the
+/// Iterator function returns an empty XRayBuffer (Data = nullptr, Size = 0),
+/// this signifies the end of the buffers.
+///
+/// The first invocation of this Iterator function will always take an empty
+/// XRayBuffer (Data = nullptr, Size = 0).
+void __xray_log_set_buffer_iterator(XRayBuffer (*Iterator)(XRayBuffer));
+
+/// Removes the currently registered buffer iterator function.
+void __xray_log_remove_buffer_iterator();
+
+/// Invokes the provided handler to process data maintained by the logging
+/// handler. This API will be provided raw access to the data available in
+/// memory from the logging implementation. The callback function must:
+///
+/// 1) Not modify the data, to avoid running into undefined behaviour.
+///
+/// 2) Either know the data layout, or treat the data as raw bytes for later
+/// interpretation.
+///
+/// This API is best used in place of the `__xray_log_flushLog()` implementation
+/// above to enable the caller to provide an alternative means of extracting the
+/// data from the XRay implementation.
+///
+/// Implementations MUST then provide:
+///
+/// 1) A function that will return an XRayBuffer. Functions that return an
+/// "empty" XRayBuffer signifies that there are no more buffers to be
+/// processed. This function should be registered through the
+/// `__xray_log_set_buffer_iterator(...)` function.
+///
+/// 2) Its own means of converting data it holds in memory into an XRayBuffer
+/// structure.
+///
+/// See XRayLogFlushStatus for what the return values mean.
+///
+XRayLogFlushStatus __xray_log_process_buffers(void (*Processor)(const char *,
+ XRayBuffer));
+
+} // extern "C"
+
+#endif // XRAY_XRAY_LOG_INTERFACE_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_log_interface.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_records.h
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_records.h (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_records.h (revision 351984)
@@ -0,0 +1,134 @@
+//===-- xray_records.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is a part of XRay, a dynamic runtime instrumentation system.
+//
+// This header exposes some record types useful for the XRay in-memory logging
+// implementation.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef XRAY_XRAY_RECORDS_H
+#define XRAY_XRAY_RECORDS_H
+
+#include <cstdint>
+
+namespace __xray {
+
+enum FileTypes {
+ NAIVE_LOG = 0,
+ FDR_LOG = 1,
+};
+
+// FDR mode use of the union field in the XRayFileHeader.
+struct alignas(16) FdrAdditionalHeaderData {
+ uint64_t ThreadBufferSize;
+};
+
+static_assert(sizeof(FdrAdditionalHeaderData) == 16,
+ "FdrAdditionalHeaderData != 16 bytes");
+
+// This data structure is used to describe the contents of the file. We use this
+// for versioning the supported XRay file formats.
+struct alignas(32) XRayFileHeader {
+ uint16_t Version = 0;
+
+ // The type of file we're writing out. See the FileTypes enum for more
+ // information. This allows different implementations of the XRay logging to
+ // have different files for different information being stored.
+ uint16_t Type = 0;
+
+ // What follows are a set of flags that indicate useful things for when
+ // reading the data in the file.
+ bool ConstantTSC : 1;
+ bool NonstopTSC : 1;
+
+ // The frequency by which TSC increases per-second.
+ alignas(8) uint64_t CycleFrequency = 0;
+
+ union {
+ char FreeForm[16];
+ // The current civiltime timestamp, as retrieved from 'clock_gettime'. This
+ // allows readers of the file to determine when the file was created or
+ // written down.
+ struct timespec TS;
+
+ struct FdrAdditionalHeaderData FdrData;
+ };
+} __attribute__((packed));
+
+static_assert(sizeof(XRayFileHeader) == 32, "XRayFileHeader != 32 bytes");
+
+enum RecordTypes {
+ NORMAL = 0,
+ ARG_PAYLOAD = 1,
+};
+
+struct alignas(32) XRayRecord {
+ // This is the type of the record being written. We use 16 bits to allow us to
+ // treat this as a discriminant, and so that the first 4 bytes get packed
+ // properly. See RecordTypes for more supported types.
+ uint16_t RecordType = RecordTypes::NORMAL;
+
+ // The CPU where the thread is running. We assume number of CPUs <= 256.
+ uint8_t CPU = 0;
+
+ // The type of the event. One of the following:
+ // ENTER = 0
+ // EXIT = 1
+ // TAIL_EXIT = 2
+ // ENTER_ARG = 3
+ uint8_t Type = 0;
+
+ // The function ID for the record.
+ int32_t FuncId = 0;
+
+ // Get the full 8 bytes of the TSC when we get the log record.
+ uint64_t TSC = 0;
+
+ // The thread ID for the currently running thread.
+ uint32_t TId = 0;
+
+ // The ID of process that is currently running
+ uint32_t PId = 0;
+
+ // Use some bytes in the end of the record for buffers.
+ char Buffer[8] = {};
+} __attribute__((packed));
+
+static_assert(sizeof(XRayRecord) == 32, "XRayRecord != 32 bytes");
+
+struct alignas(32) XRayArgPayload {
+ // We use the same 16 bits as a discriminant for the records in the log here
+ // too, and so that the first 4 bytes are packed properly.
+ uint16_t RecordType = RecordTypes::ARG_PAYLOAD;
+
+ // Add a few bytes to pad.
+ uint8_t Padding[2] = {};
+
+ // The function ID for the record.
+ int32_t FuncId = 0;
+
+ // The thread ID for the currently running thread.
+ uint32_t TId = 0;
+
+ // The ID of process that is currently running
+ uint32_t PId = 0;
+
+ // The argument payload.
+ uint64_t Arg = 0;
+
+ // The rest of this record ought to be left as padding.
+ uint8_t TailPadding[8] = {};
+} __attribute__((packed));
+
+static_assert(sizeof(XRayArgPayload) == 32, "XRayArgPayload != 32 bytes");
+
+} // namespace __xray
+
+#endif // XRAY_XRAY_RECORDS_H
Property changes on: vendor/compiler-rt/compiler-rt-release_90-r371301/include/xray/xray_records.h
___________________________________________________________________
Added: svn:eol-style
## -0,0 +1 ##
+native
\ No newline at end of property
Added: svn:mime-type
## -0,0 +1 ##
+text/plain
\ No newline at end of property
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/CREDITS.TXT
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/CREDITS.TXT (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/CREDITS.TXT (revision 351984)
@@ -0,0 +1,36 @@
+This file is a partial list of people who have contributed to the LLVM/CompilerRT
+project. If you have contributed a patch or made some other contribution to
+LLVM/CompilerRT, please submit a patch to this file to add yourself, and it will be
+done!
+
+The list is sorted by surname and formatted to allow easy grepping and
+beautification by scripts. The fields are: name (N), email (E), web-address
+(W), PGP key ID and fingerprint (P), description (D), and snail-mail address
+(S).
+
+N: Craig van Vliet
+E: cvanvliet@auroraux.org
+W: http://www.auroraux.org
+D: Code style and Readability fixes.
+
+N: Edward O'Callaghan
+E: eocallaghan@auroraux.org
+W: http://www.auroraux.org
+D: CMake'ify Compiler-RT build system
+D: Maintain Solaris & AuroraUX ports of Compiler-RT
+
+N: Howard Hinnant
+E: hhinnant@apple.com
+D: Architect and primary author of compiler-rt
+
+N: Guan-Hong Liu
+E: koviankevin@hotmail.com
+D: IEEE Quad-precision functions
+
+N: Joerg Sonnenberger
+E: joerg@NetBSD.org
+D: Maintains NetBSD port.
+
+N: Matt Thomas
+E: matt@NetBSD.org
+D: ARM improvements.
Index: vendor/compiler-rt/compiler-rt-release_90-r371301/README.txt
===================================================================
--- vendor/compiler-rt/compiler-rt-release_90-r371301/README.txt (nonexistent)
+++ vendor/compiler-rt/compiler-rt-release_90-r371301/README.txt (revision 351984)
@@ -0,0 +1,11 @@
+Compiler-RT
+================================
+
+This directory and its subdirectories contain source code for the compiler
+support routines.
+
+Compiler-RT is open source software. You may freely distribute it under the
+terms of the license agreement found in LICENSE.txt.
+
+================================
+

File Metadata

Mime Type
application/octet-stream
Expires
Mon, Apr 29, 3:27 AM (1 d, 23 h)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
mORlOae2RDdm
Default Alt Text
(7 MB)

Event Timeline